summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c9
-rw-r--r--net/Kconfig7
-rw-r--r--net/atm/br2684.c27
-rw-r--r--net/atm/clip.c36
-rw-r--r--net/atm/common.c34
-rw-r--r--net/atm/common.h1
-rw-r--r--net/atm/pppoatm.c4
-rw-r--r--net/batman-adv/bat_sysfs.c4
-rw-r--r--net/batman-adv/bitarray.c2
-rw-r--r--net/batman-adv/gateway_client.c153
-rw-r--r--net/batman-adv/gateway_client.h5
-rw-r--r--net/batman-adv/gateway_common.c4
-rw-r--r--net/batman-adv/hash.c4
-rw-r--r--net/batman-adv/hash.h13
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/originator.c13
-rw-r--r--net/batman-adv/originator.h2
-rw-r--r--net/batman-adv/routing.c22
-rw-r--r--net/batman-adv/soft-interface.c43
-rw-r--r--net/batman-adv/translation-table.c100
-rw-r--r--net/batman-adv/vis.c17
-rw-r--r--net/bluetooth/bnep/core.c13
-rw-r--r--net/bluetooth/cmtp/core.c13
-rw-r--r--net/bluetooth/hci_conn.c69
-rw-r--r--net/bluetooth/hci_core.c399
-rw-r--r--net/bluetooth/hci_event.c221
-rw-r--r--net/bluetooth/hci_sysfs.c40
-rw-r--r--net/bluetooth/hidp/core.c157
-rw-r--r--net/bluetooth/l2cap_core.c947
-rw-r--r--net/bluetooth/l2cap_sock.c90
-rw-r--r--net/bluetooth/mgmt.c500
-rw-r--r--net/bluetooth/rfcomm/core.c65
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c45
-rw-r--r--net/bluetooth/smp.c3
-rw-r--r--net/bridge/br_device.c7
-rw-r--r--net/bridge/br_fdb.c38
-rw-r--r--net/bridge/br_if.c5
-rw-r--r--net/bridge/br_multicast.c10
-rw-r--r--net/bridge/br_private.h3
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/dev.c138
-rw-r--r--net/core/ethtool.c556
-rw-r--r--net/core/neighbour.c162
-rw-r--r--net/core/net-sysfs.c56
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/netprio_cgroup.c344
-rw-r--r--net/core/pktgen.c15
-rw-r--r--net/core/skbuff.c71
-rw-r--r--net/core/sock.c42
-rw-r--r--net/core/sysctl_net_core.c9
-rw-r--r--net/dccp/ipv6.c42
-rw-r--r--net/dccp/minisocks.c6
-rw-r--r--net/decnet/dn_neigh.c2
-rw-r--r--net/econet/af_econet.c7
-rw-r--r--net/ieee802154/6lowpan.c374
-rw-r--r--net/ieee802154/6lowpan.h23
-rw-r--r--net/ieee802154/dgram.c7
-rw-r--r--net/ieee802154/raw.c7
-rw-r--r--net/ipv4/af_inet.c11
-rw-r--r--net/ipv4/arp.c8
-rw-r--r--net/ipv4/igmp.c13
-rw-r--r--net/ipv4/inet_connection_sock.c17
-rw-r--r--net/ipv4/inet_diag.c19
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_sockglue.c35
-rw-r--r--net/ipv4/ipconfig.c15
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/proc.c9
-rw-r--r--net/ipv4/raw.c10
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_minisocks.c6
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/addrconf.c8
-rw-r--r--net/ipv6/af_inet6.c27
-rw-r--r--net/ipv6/ah6.c12
-rw-r--r--net/ipv6/anycast.c4
-rw-r--r--net/ipv6/datagram.c36
-rw-r--r--net/ipv6/exthdrs.c18
-rw-r--r--net/ipv6/fib6_rules.c2
-rw-r--r--net/ipv6/icmp.c18
-rw-r--r--net/ipv6/inet6_connection_sock.c12
-rw-r--r--net/ipv6/ip6_fib.c115
-rw-r--r--net/ipv6/ip6_flowlabel.c2
-rw-r--r--net/ipv6/ip6_output.c26
-rw-r--r--net/ipv6/ip6_tunnel.c14
-rw-r--r--net/ipv6/ip6mr.c12
-rw-r--r--net/ipv6/ipv6_sockglue.c8
-rw-r--r--net/ipv6/mcast.c18
-rw-r--r--net/ipv6/mip6.c4
-rw-r--r--net/ipv6/ndisc.c21
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c8
-rw-r--r--net/ipv6/proc.c15
-rw-r--r--net/ipv6/raw.c19
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/route.c55
-rw-r--r--net/ipv6/sit.c6
-rw-r--r--net/ipv6/syncookies.c8
-rw-r--r--net/ipv6/tcp_ipv6.c46
-rw-r--r--net/ipv6/udp.c14
-rw-r--r--net/ipv6/xfrm6_mode_beet.c8
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c4
-rw-r--r--net/ipv6/xfrm6_output.c4
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/ipv6/xfrm6_state.c4
-rw-r--r--net/irda/irttp.c4
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/mac80211/agg-rx.c3
-rw-r--r--net/mac80211/agg-tx.c10
-rw-r--r--net/mac80211/cfg.c165
-rw-r--r--net/mac80211/debugfs.c2
-rw-r--r--net/mac80211/driver-ops.h68
-rw-r--r--net/mac80211/ht.c3
-rw-r--r--net/mac80211/ibss.c3
-rw-r--r--net/mac80211/ieee80211_i.h29
-rw-r--r--net/mac80211/iface.c44
-rw-r--r--net/mac80211/key.c9
-rw-r--r--net/mac80211/main.c39
-rw-r--r--net/mac80211/mesh.c70
-rw-r--r--net/mac80211/mesh.h7
-rw-r--r--net/mac80211/mesh_hwmp.c52
-rw-r--r--net/mac80211/mesh_pathtbl.c3
-rw-r--r--net/mac80211/mesh_plink.c50
-rw-r--r--net/mac80211/mlme.c90
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/rc80211_minstrel.c7
-rw-r--r--net/mac80211/rx.c73
-rw-r--r--net/mac80211/scan.c199
-rw-r--r--net/mac80211/sta_info.h8
-rw-r--r--net/mac80211/status.c87
-rw-r--r--net/mac80211/tx.c110
-rw-r--r--net/mac80211/util.c126
-rw-r--r--net/mac80211/wme.c10
-rw-r--r--net/mac80211/work.c46
-rw-r--r--net/mac80211/wpa.c9
-rw-r--r--net/netfilter/core.c13
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c10
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c4
-rw-r--r--net/netfilter/xt_TCPMSS.c2
-rw-r--r--net/netfilter/xt_addrtype.c2
-rw-r--r--net/netlabel/netlabel_kapi.c4
-rw-r--r--net/netlabel/netlabel_mgmt.c4
-rw-r--r--net/netlabel/netlabel_unlabeled.c4
-rw-r--r--net/nfc/nci/core.c18
-rw-r--r--net/nfc/nci/data.c5
-rw-r--r--net/nfc/nci/lib.c8
-rw-r--r--net/nfc/nci/ntf.c152
-rw-r--r--net/nfc/nci/rsp.c83
-rw-r--r--net/packet/af_packet.c21
-rw-r--r--net/phonet/pep.c106
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/rxrpc/ar-key.c6
-rw-r--r--net/sched/sch_choke.c27
-rw-r--r--net/sched/sch_generic.c1
-rw-r--r--net/sctp/ipv6.c40
-rw-r--r--net/sctp/sm_make_chunk.c4
-rw-r--r--net/sctp/sm_sideeffect.c8
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/transport.c16
-rw-r--r--net/socket.c20
-rw-r--r--net/sunrpc/svcauth_unix.c6
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/wireless/core.c4
-rw-r--r--net/wireless/core.h4
-rw-r--r--net/wireless/mlme.c35
-rw-r--r--net/wireless/nl80211.c410
-rw-r--r--net/wireless/nl80211.h5
-rw-r--r--net/wireless/scan.c117
-rw-r--r--net/wireless/wext-compat.c12
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--net/xfrm/xfrm_state.c12
175 files changed, 5479 insertions, 2751 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index bc252862458..2b5fcde1f62 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -591,18 +591,17 @@ static void vlan_dev_uninit(struct net_device *dev)
}
}
-static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
u32 old_features = features;
- features &= real_dev->features;
features &= real_dev->vlan_features;
+ features |= NETIF_F_RXCSUM;
+ features &= real_dev->features;
features |= old_features & NETIF_F_SOFT_FEATURES;
-
- if (dev_ethtool_get_rx_csum(real_dev))
- features |= NETIF_F_RXCSUM;
features |= NETIF_F_LLTX;
return features;
diff --git a/net/Kconfig b/net/Kconfig
index a0731484423..63d2c5dc36f 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -232,6 +232,13 @@ config XPS
depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
default y
+config NETPRIO_CGROUP
+ tristate "Network priority cgroup"
+ depends on CGROUPS
+ ---help---
+ Cgroup subsystem for use in assigning processes to network priorities on
+ a per-interface basis
+
config HAVE_BPF_JIT
bool
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index d07223c834a..53b0aa14a1e 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -489,15 +489,11 @@ free_skb:
*/
static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
{
- struct sk_buff_head queue;
- int err;
struct br2684_vcc *brvcc;
- struct sk_buff *skb, *tmp;
- struct sk_buff_head *rq;
struct br2684_dev *brdev;
struct net_device *net_dev;
struct atm_backend_br2684 be;
- unsigned long flags;
+ int err;
if (copy_from_user(&be, arg, sizeof be))
return -EFAULT;
@@ -550,23 +546,6 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
atmvcc->push = br2684_push;
atmvcc->pop = br2684_pop;
- __skb_queue_head_init(&queue);
- rq = &sk_atm(atmvcc)->sk_receive_queue;
-
- spin_lock_irqsave(&rq->lock, flags);
- skb_queue_splice_init(rq, &queue);
- spin_unlock_irqrestore(&rq->lock, flags);
-
- skb_queue_walk_safe(&queue, skb, tmp) {
- struct net_device *dev;
-
- br2684_push(atmvcc, skb);
- dev = skb->dev;
-
- dev->stats.rx_bytes -= skb->len;
- dev->stats.rx_packets--;
- }
-
/* initialize netdev carrier state */
if (atmvcc->dev->signal == ATM_PHY_SIG_LOST)
netif_carrier_off(net_dev);
@@ -574,6 +553,10 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
netif_carrier_on(net_dev);
__module_get(THIS_MODULE);
+
+ /* re-process everything received between connection setup and
+ backend setup */
+ vcc_process_recv_queue(atmvcc);
return 0;
error:
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 852394072fa..11439a7f678 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -189,6 +189,13 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
pr_debug("\n");
+
+ if (!clip_devs) {
+ atm_return(vcc, skb->truesize);
+ kfree_skb(skb);
+ return;
+ }
+
if (!skb) {
pr_debug("removing VCC %p\n", clip_vcc);
if (clip_vcc->entry)
@@ -329,7 +336,7 @@ static struct neigh_table clip_tbl = {
.gc_staletime = 60 * HZ,
.reachable_time = 30 * HZ,
.delay_probe_time = 5 * HZ,
- .queue_len = 3,
+ .queue_len_bytes = 64 * 1024,
.ucast_probes = 3,
.mcast_probes = 3,
.anycast_delay = 1 * HZ,
@@ -448,10 +455,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
static int clip_mkip(struct atm_vcc *vcc, int timeout)
{
- struct sk_buff_head *rq, queue;
struct clip_vcc *clip_vcc;
- struct sk_buff *skb, *tmp;
- unsigned long flags;
if (!vcc->push)
return -EBADFD;
@@ -472,29 +476,9 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
vcc->push = clip_push;
vcc->pop = clip_pop;
- __skb_queue_head_init(&queue);
- rq = &sk_atm(vcc)->sk_receive_queue;
-
- spin_lock_irqsave(&rq->lock, flags);
- skb_queue_splice_init(rq, &queue);
- spin_unlock_irqrestore(&rq->lock, flags);
-
/* re-process everything received between connection setup and MKIP */
- skb_queue_walk_safe(&queue, skb, tmp) {
- if (!clip_devs) {
- atm_return(vcc, skb->truesize);
- kfree_skb(skb);
- } else {
- struct net_device *dev = skb->dev;
- unsigned int len = skb->len;
-
- skb_get(skb);
- clip_push(vcc, skb);
- dev->stats.rx_packets--;
- dev->stats.rx_bytes -= len;
- kfree_skb(skb);
- }
- }
+ vcc_process_recv_queue(vcc);
+
return 0;
}
diff --git a/net/atm/common.c b/net/atm/common.c
index 14ff9fe3998..b4b44dbed64 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -214,6 +214,26 @@ void vcc_release_async(struct atm_vcc *vcc, int reply)
}
EXPORT_SYMBOL(vcc_release_async);
+void vcc_process_recv_queue(struct atm_vcc *vcc)
+{
+ struct sk_buff_head queue, *rq;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+
+ __skb_queue_head_init(&queue);
+ rq = &sk_atm(vcc)->sk_receive_queue;
+
+ spin_lock_irqsave(&rq->lock, flags);
+ skb_queue_splice_init(rq, &queue);
+ spin_unlock_irqrestore(&rq->lock, flags);
+
+ skb_queue_walk_safe(&queue, skb, tmp) {
+ __skb_unlink(skb, &queue);
+ vcc->push(vcc, skb);
+ }
+}
+EXPORT_SYMBOL(vcc_process_recv_queue);
+
void atm_dev_signal_change(struct atm_dev *dev, char signal)
{
pr_debug("%s signal=%d dev=%p number=%d dev->signal=%d\n",
@@ -502,8 +522,11 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
- if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */
+
+ /* only handle MSG_DONTWAIT and MSG_PEEK */
+ if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
return -EOPNOTSUPP;
+
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
@@ -524,8 +547,13 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (error)
return error;
sock_recv_ts_and_drops(msg, sk, skb);
- pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize);
- atm_return(vcc, skb->truesize);
+
+ if (!(flags & MSG_PEEK)) {
+ pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
+ skb->truesize);
+ atm_return(vcc, skb->truesize);
+ }
+
skb_free_datagram(sk, skb);
return copied;
}
diff --git a/net/atm/common.h b/net/atm/common.h
index f48a76b6cdf..cc3c2dae4d7 100644
--- a/net/atm/common.h
+++ b/net/atm/common.h
@@ -24,6 +24,7 @@ int vcc_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen);
int vcc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen);
+void vcc_process_recv_queue(struct atm_vcc *vcc);
int atmpvc_init(void);
void atmpvc_exit(void);
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index db4a11c61d1..df35d9a3b5f 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -303,6 +303,10 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
atmvcc->push = pppoatm_push;
atmvcc->pop = pppoatm_pop;
__module_get(THIS_MODULE);
+
+ /* re-process everything received between connection setup and
+ backend setup */
+ vcc_process_recv_queue(atmvcc);
return 0;
}
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index b8a7414c357..c25492f7d66 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -174,7 +174,7 @@ static int store_uint_attr(const char *buff, size_t count,
unsigned long uint_val;
int ret;
- ret = strict_strtoul(buff, 10, &uint_val);
+ ret = kstrtoul(buff, 10, &uint_val);
if (ret) {
bat_info(net_dev,
"%s: Invalid parameter received: %s\n",
@@ -239,7 +239,7 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
unsigned long val;
int ret, vis_mode_tmp = -1;
- ret = strict_strtoul(buff, 10, &val);
+ ret = kstrtoul(buff, 10, &val);
if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) ||
(strncmp(buff, "client", 6) == 0) ||
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 0be9ff346fa..9bc63b209b3 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -155,7 +155,7 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
/* sequence number is much newer, probably missed a lot of packets */
if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE)
- || (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
+ && (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
bat_dbg(DBG_BATMAN, bat_priv,
"We missed a lot of packets (%i) !\n",
seq_num_diff - 1);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 619fb73b3b7..9373a143c6d 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -25,6 +25,7 @@
#include "gateway_common.h"
#include "hard-interface.h"
#include "originator.h"
+#include "translation-table.h"
#include "routing.h"
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -572,108 +573,142 @@ out:
return ret;
}
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
- struct orig_node *old_gw)
+bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
{
struct ethhdr *ethhdr;
struct iphdr *iphdr;
struct ipv6hdr *ipv6hdr;
struct udphdr *udphdr;
- struct gw_node *curr_gw;
- struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
- unsigned int header_len = 0;
- int ret = 1;
-
- if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
- return 0;
/* check for ethernet header */
- if (!pskb_may_pull(skb, header_len + ETH_HLEN))
- return 0;
+ if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
+ return false;
ethhdr = (struct ethhdr *)skb->data;
- header_len += ETH_HLEN;
+ *header_len += ETH_HLEN;
/* check for initial vlan header */
if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
- if (!pskb_may_pull(skb, header_len + VLAN_HLEN))
- return 0;
+ if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
+ return false;
ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
- header_len += VLAN_HLEN;
+ *header_len += VLAN_HLEN;
}
/* check for ip header */
switch (ntohs(ethhdr->h_proto)) {
case ETH_P_IP:
- if (!pskb_may_pull(skb, header_len + sizeof(*iphdr)))
- return 0;
- iphdr = (struct iphdr *)(skb->data + header_len);
- header_len += iphdr->ihl * 4;
+ if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
+ return false;
+ iphdr = (struct iphdr *)(skb->data + *header_len);
+ *header_len += iphdr->ihl * 4;
/* check for udp header */
if (iphdr->protocol != IPPROTO_UDP)
- return 0;
+ return false;
break;
case ETH_P_IPV6:
- if (!pskb_may_pull(skb, header_len + sizeof(*ipv6hdr)))
- return 0;
- ipv6hdr = (struct ipv6hdr *)(skb->data + header_len);
- header_len += sizeof(*ipv6hdr);
+ if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
+ return false;
+ ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
+ *header_len += sizeof(*ipv6hdr);
/* check for udp header */
if (ipv6hdr->nexthdr != IPPROTO_UDP)
- return 0;
+ return false;
break;
default:
- return 0;
+ return false;
}
- if (!pskb_may_pull(skb, header_len + sizeof(*udphdr)))
- return 0;
- udphdr = (struct udphdr *)(skb->data + header_len);
- header_len += sizeof(*udphdr);
+ if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
+ return false;
+ udphdr = (struct udphdr *)(skb->data + *header_len);
+ *header_len += sizeof(*udphdr);
/* check for bootp port */
if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
(ntohs(udphdr->dest) != 67))
- return 0;
+ return false;
if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
(ntohs(udphdr->dest) != 547))
- return 0;
+ return false;
- if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
- return -1;
+ return true;
+}
- curr_gw = gw_get_selected_gw_node(bat_priv);
- if (!curr_gw)
- return 0;
-
- /* If old_gw != NULL then this packet is unicast.
- * So, at this point we have to check the message type: if it is a
- * DHCPREQUEST we have to decide whether to drop it or not */
- if (old_gw && curr_gw->orig_node != old_gw) {
- if (is_type_dhcprequest(skb, header_len)) {
- /* If the dhcp packet has been sent to a different gw,
- * we have to evaluate whether the old gw is still
- * reliable enough */
- neigh_curr = find_router(bat_priv, curr_gw->orig_node,
- NULL);
- neigh_old = find_router(bat_priv, old_gw, NULL);
- if (!neigh_curr || !neigh_old)
- goto free_neigh;
- if (neigh_curr->tq_avg - neigh_old->tq_avg <
- GW_THRESHOLD)
- ret = -1;
- }
+bool gw_out_of_range(struct bat_priv *bat_priv,
+ struct sk_buff *skb, struct ethhdr *ethhdr)
+{
+ struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
+ struct orig_node *orig_dst_node = NULL;
+ struct gw_node *curr_gw = NULL;
+ bool ret, out_of_range = false;
+ unsigned int header_len = 0;
+ uint8_t curr_tq_avg;
+
+ ret = gw_is_dhcp_target(skb, &header_len);
+ if (!ret)
+ goto out;
+
+ orig_dst_node = transtable_search(bat_priv, ethhdr->h_source,
+ ethhdr->h_dest);
+ if (!orig_dst_node)
+ goto out;
+
+ if (!orig_dst_node->gw_flags)
+ goto out;
+
+ ret = is_type_dhcprequest(skb, header_len);
+ if (!ret)
+ goto out;
+
+ switch (atomic_read(&bat_priv->gw_mode)) {
+ case GW_MODE_SERVER:
+ /* If we are a GW then we are our best GW. We can artificially
+ * set the tq towards ourself as the maximum value */
+ curr_tq_avg = TQ_MAX_VALUE;
+ break;
+ case GW_MODE_CLIENT:
+ curr_gw = gw_get_selected_gw_node(bat_priv);
+ if (!curr_gw)
+ goto out;
+
+ /* packet is going to our gateway */
+ if (curr_gw->orig_node == orig_dst_node)
+ goto out;
+
+ /* If the dhcp packet has been sent to a different gw,
+ * we have to evaluate whether the old gw is still
+ * reliable enough */
+ neigh_curr = find_router(bat_priv, curr_gw->orig_node, NULL);
+ if (!neigh_curr)
+ goto out;
+
+ curr_tq_avg = neigh_curr->tq_avg;
+ break;
+ case GW_MODE_OFF:
+ default:
+ goto out;
}
-free_neigh:
+
+ neigh_old = find_router(bat_priv, orig_dst_node, NULL);
+ if (!!neigh_old)
+ goto out;
+
+ if (curr_tq_avg - neigh_old->tq_avg > GW_THRESHOLD)
+ out_of_range = true;
+
+out:
+ if (orig_dst_node)
+ orig_node_free_ref(orig_dst_node);
+ if (curr_gw)
+ gw_node_free_ref(curr_gw);
if (neigh_old)
neigh_node_free_ref(neigh_old);
if (neigh_curr)
neigh_node_free_ref(neigh_curr);
- if (curr_gw)
- gw_node_free_ref(curr_gw);
- return ret;
+ return out_of_range;
}
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index b9b983c07fe..e1edba08eb1 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -31,7 +31,8 @@ void gw_node_update(struct bat_priv *bat_priv,
void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node);
void gw_node_purge(struct bat_priv *bat_priv);
int gw_client_seq_print_text(struct seq_file *seq, void *offset);
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
- struct orig_node *old_gw);
+bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
+bool gw_out_of_range(struct bat_priv *bat_priv,
+ struct sk_buff *skb, struct ethhdr *ethhdr);
#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 18661af0bc3..c4ac7b0a2a6 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -97,7 +97,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
*tmp_ptr = '\0';
}
- ret = strict_strtol(buff, 10, &ldown);
+ ret = kstrtol(buff, 10, &ldown);
if (ret) {
bat_err(net_dev,
"Download speed of gateway mode invalid: %s\n",
@@ -122,7 +122,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
*tmp_ptr = '\0';
}
- ret = strict_strtol(slash_ptr + 1, 10, &lup);
+ ret = kstrtol(slash_ptr + 1, 10, &lup);
if (ret) {
bat_err(net_dev,
"Upload speed of gateway mode invalid: "
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 2a172505f51..d1da29da333 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -25,7 +25,7 @@
/* clears the hash */
static void hash_init(struct hashtable_t *hash)
{
- int i;
+ uint32_t i;
for (i = 0 ; i < hash->size; i++) {
INIT_HLIST_HEAD(&hash->table[i]);
@@ -42,7 +42,7 @@ void hash_destroy(struct hashtable_t *hash)
}
/* allocates and clears the hash */
-struct hashtable_t *hash_new(int size)
+struct hashtable_t *hash_new(uint32_t size)
{
struct hashtable_t *hash;
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index d20aa71ba1e..4768717f07f 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -33,17 +33,17 @@ typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *);
/* the hashfunction, should return an index
* based on the key in the data of the first
* argument and the size the second */
-typedef int (*hashdata_choose_cb)(const void *, int);
+typedef uint32_t (*hashdata_choose_cb)(const void *, uint32_t);
typedef void (*hashdata_free_cb)(struct hlist_node *, void *);
struct hashtable_t {
struct hlist_head *table; /* the hashtable itself with the buckets */
spinlock_t *list_locks; /* spinlock for each hash list entry */
- int size; /* size of hashtable */
+ uint32_t size; /* size of hashtable */
};
/* allocates and clears the hash */
-struct hashtable_t *hash_new(int size);
+struct hashtable_t *hash_new(uint32_t size);
/* free only the hashtable and the hash itself. */
void hash_destroy(struct hashtable_t *hash);
@@ -57,7 +57,7 @@ static inline void hash_delete(struct hashtable_t *hash,
struct hlist_head *head;
struct hlist_node *node, *node_tmp;
spinlock_t *list_lock; /* spinlock to protect write access */
- int i;
+ uint32_t i;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -93,7 +93,8 @@ static inline int hash_add(struct hashtable_t *hash,
hashdata_choose_cb choose,
const void *data, struct hlist_node *data_node)
{
- int index, ret = -1;
+ uint32_t index;
+ int ret = -1;
struct hlist_head *head;
struct hlist_node *node;
spinlock_t *list_lock; /* spinlock to protect write access */
@@ -137,7 +138,7 @@ static inline void *hash_remove(struct hashtable_t *hash,
hashdata_compare_cb compare,
hashdata_choose_cb choose, void *data)
{
- size_t index;
+ uint32_t index;
struct hlist_node *node;
struct hlist_head *head;
void *data_save = NULL;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 964ad4d8ba3..86354e06eb4 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -28,7 +28,7 @@
#define DRIVER_DEVICE "batman-adv"
#ifndef SOURCE_VERSION
-#define SOURCE_VERSION "2011.4.0"
+#define SOURCE_VERSION "2012.0.0"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 0e5b77255d9..0bc2045a2f2 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -164,7 +164,7 @@ void originator_free(struct bat_priv *bat_priv)
struct hlist_head *head;
spinlock_t *list_lock; /* spinlock to protect write access */
struct orig_node *orig_node;
- int i;
+ uint32_t i;
if (!hash)
return;
@@ -350,7 +350,7 @@ static void _purge_orig(struct bat_priv *bat_priv)
struct hlist_head *head;
spinlock_t *list_lock; /* spinlock to protect write access */
struct orig_node *orig_node;
- int i;
+ uint32_t i;
if (!hash)
return;
@@ -413,7 +413,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
int batman_count = 0;
int last_seen_secs;
int last_seen_msecs;
- int i, ret = 0;
+ uint32_t i;
+ int ret = 0;
primary_if = primary_if_get_selected(bat_priv);
@@ -519,7 +520,8 @@ int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num)
struct hlist_node *node;
struct hlist_head *head;
struct orig_node *orig_node;
- int i, ret;
+ uint32_t i;
+ int ret;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
@@ -601,7 +603,8 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
struct hlist_head *head;
struct hard_iface *hard_iface_tmp;
struct orig_node *orig_node;
- int i, ret;
+ uint32_t i;
+ int ret;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index cfc1f60a96a..67765ffef73 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -42,7 +42,7 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
/* hashfunction to choose an entry in a hash table of given size */
/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static inline int choose_orig(const void *data, int32_t size)
+static inline uint32_t choose_orig(const void *data, uint32_t size)
{
const unsigned char *key = data;
uint32_t hash = 0;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index f961cc5eade..ef24a7205f6 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -39,7 +39,7 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
struct hlist_head *head;
struct orig_node *orig_node;
unsigned long *word;
- int i;
+ uint32_t i;
size_t word_index;
for (i = 0; i < hash->size; i++) {
@@ -578,6 +578,7 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
{
struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct tt_query_packet *tt_query;
+ uint16_t tt_len;
struct ethhdr *ethhdr;
/* drop packet if it has not necessary minimum size */
@@ -616,13 +617,22 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
}
break;
case TT_RESPONSE:
- /* packet needs to be linearized to access the TT changes */
- if (skb_linearize(skb) < 0)
- goto out;
+ if (is_my_mac(tt_query->dst)) {
+ /* packet needs to be linearized to access the TT
+ * changes */
+ if (skb_linearize(skb) < 0)
+ goto out;
+
+ tt_len = tt_query->tt_data * sizeof(struct tt_change);
+
+ /* Ensure we have all the claimed data */
+ if (unlikely(skb_headlen(skb) <
+ sizeof(struct tt_query_packet) +
+ tt_len))
+ goto out;
- if (is_my_mac(tt_query->dst))
handle_tt_response(bat_priv, tt_query);
- else {
+ } else {
bat_dbg(DBG_TT, bat_priv,
"Routing TT_RESPONSE to %pM [%c]\n",
tt_query->dst,
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index f9cc9572898..45297c84309 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -563,10 +563,10 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
struct bcast_packet *bcast_packet;
struct vlan_ethhdr *vhdr;
struct softif_neigh *curr_softif_neigh = NULL;
- struct orig_node *orig_node = NULL;
+ unsigned int header_len = 0;
int data_len = skb->len, ret;
short vid = -1;
- bool do_bcast;
+ bool do_bcast = false;
if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
goto dropped;
@@ -598,17 +598,28 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
/* Register the client MAC in the transtable */
tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
- orig_node = transtable_search(bat_priv, ethhdr->h_source,
- ethhdr->h_dest);
- do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
- if (do_bcast || (orig_node && orig_node->gw_flags)) {
- ret = gw_is_target(bat_priv, skb, orig_node);
+ if (is_multicast_ether_addr(ethhdr->h_dest)) {
+ do_bcast = true;
- if (ret < 0)
- goto dropped;
-
- if (ret)
- do_bcast = false;
+ switch (atomic_read(&bat_priv->gw_mode)) {
+ case GW_MODE_SERVER:
+ /* gateway servers should not send dhcp
+ * requests into the mesh */
+ ret = gw_is_dhcp_target(skb, &header_len);
+ if (ret)
+ goto dropped;
+ break;
+ case GW_MODE_CLIENT:
+ /* gateway clients should send dhcp requests
+ * via unicast to their gateway */
+ ret = gw_is_dhcp_target(skb, &header_len);
+ if (ret)
+ do_bcast = false;
+ break;
+ case GW_MODE_OFF:
+ default:
+ break;
+ }
}
/* ethernet packet should be broadcasted */
@@ -644,6 +655,12 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
/* unicast packet */
} else {
+ if (atomic_read(&bat_priv->gw_mode) != GW_MODE_OFF) {
+ ret = gw_out_of_range(bat_priv, skb, ethhdr);
+ if (ret)
+ goto dropped;
+ }
+
ret = unicast_send_skb(skb, bat_priv);
if (ret != 0)
goto dropped_freed;
@@ -662,8 +679,6 @@ end:
softif_neigh_free_ref(curr_softif_neigh);
if (primary_if)
hardif_free_ref(primary_if);
- if (orig_node)
- orig_node_free_ref(orig_node);
return NETDEV_TX_OK;
}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index c7aafc7c5ed..78b9528bfc2 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -67,7 +67,7 @@ static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
struct hlist_head *head;
struct hlist_node *node;
struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL;
- int index;
+ uint32_t index;
if (!hash)
return NULL;
@@ -99,7 +99,7 @@ static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
struct hlist_node *node;
struct tt_global_entry *tt_global_entry;
struct tt_global_entry *tt_global_entry_tmp = NULL;
- int index;
+ uint32_t index;
if (!hash)
return NULL;
@@ -314,9 +314,8 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
struct hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
- size_t buf_size, pos;
- char *buff;
- int i, ret = 0;
+ uint32_t i;
+ int ret = 0;
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if) {
@@ -337,34 +336,13 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
"announced via TT (TTVN: %u):\n",
net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
- buf_size = 1;
- /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- __hlist_for_each_rcu(node, head)
- buf_size += 29;
- rcu_read_unlock();
- }
-
- buff = kmalloc(buf_size, GFP_ATOMIC);
- if (!buff) {
- ret = -ENOMEM;
- goto out;
- }
-
- buff[0] = '\0';
- pos = 0;
-
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(tt_local_entry, node,
head, hash_entry) {
- pos += snprintf(buff + pos, 30, " * %pM "
- "[%c%c%c%c%c]\n",
+ seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
tt_local_entry->addr,
(tt_local_entry->flags &
TT_CLIENT_ROAM ? 'R' : '.'),
@@ -379,9 +357,6 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
}
rcu_read_unlock();
}
-
- seq_printf(seq, "%s", buff);
- kfree(buff);
out:
if (primary_if)
hardif_free_ref(primary_if);
@@ -427,7 +402,7 @@ static void tt_local_purge(struct bat_priv *bat_priv)
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
- int i;
+ uint32_t i;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -465,7 +440,7 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
struct tt_local_entry *tt_local_entry;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
- int i;
+ uint32_t i;
if (!bat_priv->tt_local_hash)
return;
@@ -590,9 +565,8 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
struct hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
- size_t buf_size, pos;
- char *buff;
- int i, ret = 0;
+ uint32_t i;
+ int ret = 0;
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if) {
@@ -615,35 +589,13 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
seq_printf(seq, " %-13s %s %-15s %s %s\n",
"Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
- buf_size = 1;
- /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
- * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- __hlist_for_each_rcu(node, head)
- buf_size += 67;
- rcu_read_unlock();
- }
-
- buff = kmalloc(buf_size, GFP_ATOMIC);
- if (!buff) {
- ret = -ENOMEM;
- goto out;
- }
-
- buff[0] = '\0';
- pos = 0;
-
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(tt_global_entry, node,
head, hash_entry) {
- pos += snprintf(buff + pos, 69,
- " * %pM (%3u) via %pM (%3u) "
+ seq_printf(seq, " * %pM (%3u) via %pM (%3u) "
"[%c%c%c]\n", tt_global_entry->addr,
tt_global_entry->ttvn,
tt_global_entry->orig_node->orig,
@@ -659,9 +611,6 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
}
rcu_read_unlock();
}
-
- seq_printf(seq, "%s", buff);
- kfree(buff);
out:
if (primary_if)
hardif_free_ref(primary_if);
@@ -716,7 +665,7 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
struct orig_node *orig_node, const char *message)
{
struct tt_global_entry *tt_global_entry;
- int i;
+ uint32_t i;
struct hashtable_t *hash = bat_priv->tt_global_hash;
struct hlist_node *node, *safe;
struct hlist_head *head;
@@ -735,9 +684,10 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
if (tt_global_entry->orig_node == orig_node) {
bat_dbg(DBG_TT, bat_priv,
"Deleting global tt entry %pM "
- "(via %pM): originator time out\n",
+ "(via %pM): %s\n",
tt_global_entry->addr,
- tt_global_entry->orig_node->orig);
+ tt_global_entry->orig_node->orig,
+ message);
hlist_del_rcu(node);
tt_global_entry_free_ref(tt_global_entry);
}
@@ -754,7 +704,7 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
- int i;
+ uint32_t i;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -788,7 +738,7 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
struct tt_global_entry *tt_global_entry;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
- int i;
+ uint32_t i;
if (!bat_priv->tt_global_hash)
return;
@@ -874,7 +824,8 @@ uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
struct tt_global_entry *tt_global_entry;
struct hlist_node *node;
struct hlist_head *head;
- int i, j;
+ uint32_t i;
+ int j;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -911,7 +862,8 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
struct tt_local_entry *tt_local_entry;
struct hlist_node *node;
struct hlist_head *head;
- int i, j;
+ uint32_t i;
+ int j;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -1048,7 +1000,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
struct sk_buff *skb = NULL;
uint16_t tt_tot, tt_count;
ssize_t tt_query_size = sizeof(struct tt_query_packet);
- int i;
+ uint32_t i;
if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
tt_len = primary_if->soft_iface->mtu - tt_query_size;
@@ -1187,11 +1139,11 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
(tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
/* Let's get the orig node of the REAL destination */
- req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst);
+ req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst);
if (!req_dst_orig_node)
goto out;
- res_dst_orig_node = get_orig_node(bat_priv, tt_request->src);
+ res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src);
if (!res_dst_orig_node)
goto out;
@@ -1317,7 +1269,7 @@ static bool send_my_tt_response(struct bat_priv *bat_priv,
my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
req_ttvn = tt_request->ttvn;
- orig_node = get_orig_node(bat_priv, tt_request->src);
+ orig_node = orig_hash_find(bat_priv, tt_request->src);
if (!orig_node)
goto out;
@@ -1725,7 +1677,7 @@ void tt_free(struct bat_priv *bat_priv)
* entry */
static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags)
{
- int i;
+ uint32_t i;
struct hashtable_t *hash = bat_priv->tt_local_hash;
struct hlist_head *head;
struct hlist_node *node;
@@ -1758,7 +1710,7 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
- int i;
+ uint32_t i;
if (!hash)
return;
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index f81a6b668b0..7445413253c 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -66,7 +66,7 @@ static int vis_info_cmp(const struct hlist_node *node, const void *data2)
/* hash function to choose an entry in a hash table of given size */
/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static int vis_info_choose(const void *data, int size)
+static uint32_t vis_info_choose(const void *data, uint32_t size)
{
const struct vis_info *vis_info = data;
const struct vis_packet *packet;
@@ -96,7 +96,7 @@ static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
struct hlist_head *head;
struct hlist_node *node;
struct vis_info *vis_info, *vis_info_tmp = NULL;
- int index;
+ uint32_t index;
if (!hash)
return NULL;
@@ -202,7 +202,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
HLIST_HEAD(vis_if_list);
struct if_list_entry *entry;
struct hlist_node *pos, *n;
- int i, j, ret = 0;
+ uint32_t i;
+ int j, ret = 0;
int vis_server = atomic_read(&bat_priv->vis_mode);
size_t buff_pos, buf_size;
char *buff;
@@ -556,7 +557,8 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
struct hlist_head *head;
struct orig_node *orig_node;
struct vis_packet *packet;
- int best_tq = -1, i;
+ int best_tq = -1;
+ uint32_t i;
packet = (struct vis_packet *)info->skb_packet->data;
@@ -608,7 +610,8 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
struct vis_info_entry *entry;
struct tt_local_entry *tt_local_entry;
- int best_tq = -1, i;
+ int best_tq = -1;
+ uint32_t i;
info->first_seen = jiffies;
packet->vis_type = atomic_read(&bat_priv->vis_mode);
@@ -696,7 +699,7 @@ unlock:
* held */
static void purge_vis_packets(struct bat_priv *bat_priv)
{
- int i;
+ uint32_t i;
struct hashtable_t *hash = bat_priv->vis_hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
@@ -733,7 +736,7 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
struct sk_buff *skb;
struct hard_iface *hard_iface;
uint8_t dstaddr[ETH_ALEN];
- int i;
+ uint32_t i;
packet = (struct vis_packet *)info->skb_packet->data;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 91bcd3a961e..a6cd856046a 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -65,15 +65,13 @@ static DECLARE_RWSEM(bnep_session_sem);
static struct bnep_session *__bnep_get_session(u8 *dst)
{
struct bnep_session *s;
- struct list_head *p;
BT_DBG("");
- list_for_each(p, &bnep_session_list) {
- s = list_entry(p, struct bnep_session, list);
+ list_for_each_entry(s, &bnep_session_list, list)
if (!compare_ether_addr(dst, s->eh.h_source))
return s;
- }
+
return NULL;
}
@@ -667,17 +665,14 @@ static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s)
int bnep_get_connlist(struct bnep_connlist_req *req)
{
- struct list_head *p;
+ struct bnep_session *s;
int err = 0, n = 0;
down_read(&bnep_session_sem);
- list_for_each(p, &bnep_session_list) {
- struct bnep_session *s;
+ list_for_each_entry(s, &bnep_session_list, list) {
struct bnep_conninfo ci;
- s = list_entry(p, struct bnep_session, list);
-
__bnep_copy_ci(&ci, s);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 7d00ddf9e9d..9e8940b24bb 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -53,15 +53,13 @@ static LIST_HEAD(cmtp_session_list);
static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr)
{
struct cmtp_session *session;
- struct list_head *p;
BT_DBG("");
- list_for_each(p, &cmtp_session_list) {
- session = list_entry(p, struct cmtp_session, list);
+ list_for_each_entry(session, &cmtp_session_list, list)
if (!bacmp(bdaddr, &session->bdaddr))
return session;
- }
+
return NULL;
}
@@ -431,19 +429,16 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
int cmtp_get_connlist(struct cmtp_connlist_req *req)
{
- struct list_head *p;
+ struct cmtp_session *session;
int err = 0, n = 0;
BT_DBG("");
down_read(&cmtp_session_sem);
- list_for_each(p, &cmtp_session_list) {
- struct cmtp_session *session;
+ list_for_each_entry(session, &cmtp_session_list, list) {
struct cmtp_conninfo ci;
- session = list_entry(p, struct cmtp_session, list);
-
__cmtp_copy_session(session, &ci);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index e0af7237cd9..de0b93e4598 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -374,6 +374,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
skb_queue_head_init(&conn->data_q);
+ hci_chan_hash_init(conn);
+
setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
@@ -432,6 +434,8 @@ int hci_conn_del(struct hci_conn *conn)
tasklet_disable(&hdev->tx_task);
+ hci_chan_hash_flush(conn);
+
hci_conn_hash_del(hdev, conn);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
@@ -453,16 +457,13 @@ int hci_conn_del(struct hci_conn *conn)
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
{
int use_src = bacmp(src, BDADDR_ANY);
- struct hci_dev *hdev = NULL;
- struct list_head *p;
+ struct hci_dev *hdev = NULL, *d;
BT_DBG("%s -> %s", batostr(src), batostr(dst));
read_lock_bh(&hci_dev_list_lock);
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
-
+ list_for_each_entry(d, &hci_dev_list, list) {
if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
continue;
@@ -819,7 +820,7 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
c->state = BT_CLOSED;
- hci_proto_disconn_cfm(c, 0x16);
+ hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
hci_conn_del(c);
}
}
@@ -855,10 +856,10 @@ EXPORT_SYMBOL(hci_conn_put_device);
int hci_get_conn_list(void __user *arg)
{
+ register struct hci_conn *c;
struct hci_conn_list_req req, *cl;
struct hci_conn_info *ci;
struct hci_dev *hdev;
- struct list_head *p;
int n = 0, size, err;
if (copy_from_user(&req, arg, sizeof(req)))
@@ -882,10 +883,7 @@ int hci_get_conn_list(void __user *arg)
ci = cl->conn_info;
hci_dev_lock_bh(hdev);
- list_for_each(p, &hdev->conn_hash.list) {
- register struct hci_conn *c;
- c = list_entry(p, struct hci_conn, list);
-
+ list_for_each_entry(c, &hdev->conn_hash.list, list) {
bacpy(&(ci + n)->bdaddr, &c->dst);
(ci + n)->handle = c->handle;
(ci + n)->type = c->type;
@@ -956,3 +954,52 @@ int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
}
+
+struct hci_chan *hci_chan_create(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ struct hci_chan *chan;
+
+ BT_DBG("%s conn %p", hdev->name, conn);
+
+ chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC);
+ if (!chan)
+ return NULL;
+
+ chan->conn = conn;
+ skb_queue_head_init(&chan->data_q);
+
+ tasklet_disable(&hdev->tx_task);
+ hci_chan_hash_add(conn, chan);
+ tasklet_enable(&hdev->tx_task);
+
+ return chan;
+}
+
+int hci_chan_del(struct hci_chan *chan)
+{
+ struct hci_conn *conn = chan->conn;
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
+
+ tasklet_disable(&hdev->tx_task);
+ hci_chan_hash_del(conn, chan);
+ tasklet_enable(&hdev->tx_task);
+
+ skb_queue_purge(&chan->data_q);
+ kfree(chan);
+
+ return 0;
+}
+
+void hci_chan_hash_flush(struct hci_conn *conn)
+{
+ struct hci_chan_hash *h = &conn->chan_hash;
+ struct hci_chan *chan, *tmp;
+
+ BT_DBG("conn %p", conn);
+
+ list_for_each_entry_safe(chan, tmp, &h->list, list)
+ hci_chan_del(chan);
+}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index be84ae33ae3..fb3feeb185d 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -319,8 +319,7 @@ static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
* Device is held on return. */
struct hci_dev *hci_dev_get(int index)
{
- struct hci_dev *hdev = NULL;
- struct list_head *p;
+ struct hci_dev *hdev = NULL, *d;
BT_DBG("%d", index);
@@ -328,8 +327,7 @@ struct hci_dev *hci_dev_get(int index)
return NULL;
read_lock(&hci_dev_list_lock);
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
+ list_for_each_entry(d, &hci_dev_list, list) {
if (d->id == index) {
hdev = hci_dev_hold(d);
break;
@@ -551,8 +549,11 @@ int hci_dev_open(__u16 dev)
hci_dev_hold(hdev);
set_bit(HCI_UP, &hdev->flags);
hci_notify(hdev, HCI_DEV_UP);
- if (!test_bit(HCI_SETUP, &hdev->flags))
- mgmt_powered(hdev->id, 1);
+ if (!test_bit(HCI_SETUP, &hdev->flags)) {
+ hci_dev_lock_bh(hdev);
+ mgmt_powered(hdev, 1);
+ hci_dev_unlock_bh(hdev);
+ }
} else {
/* Init failed, cleanup */
tasklet_kill(&hdev->rx_task);
@@ -597,6 +598,14 @@ static int hci_dev_do_close(struct hci_dev *hdev)
tasklet_kill(&hdev->rx_task);
tasklet_kill(&hdev->tx_task);
+ if (hdev->discov_timeout > 0) {
+ cancel_delayed_work(&hdev->discov_off);
+ hdev->discov_timeout = 0;
+ }
+
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+ cancel_delayed_work(&hdev->power_off);
+
hci_dev_lock_bh(hdev);
inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
@@ -636,7 +645,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
* and no tasks are scheduled. */
hdev->close(hdev);
- mgmt_powered(hdev->id, 0);
+ hci_dev_lock_bh(hdev);
+ mgmt_powered(hdev, 0);
+ hci_dev_unlock_bh(hdev);
/* Clear flags */
hdev->flags = 0;
@@ -794,9 +805,9 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
int hci_get_dev_list(void __user *arg)
{
+ struct hci_dev *hdev;
struct hci_dev_list_req *dl;
struct hci_dev_req *dr;
- struct list_head *p;
int n = 0, size, err;
__u16 dev_num;
@@ -815,12 +826,9 @@ int hci_get_dev_list(void __user *arg)
dr = dl->dev_req;
read_lock_bh(&hci_dev_list_lock);
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *hdev;
-
- hdev = list_entry(p, struct hci_dev, list);
-
- hci_del_off_timer(hdev);
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+ cancel_delayed_work(&hdev->power_off);
if (!test_bit(HCI_MGMT, &hdev->flags))
set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -855,7 +863,8 @@ int hci_get_dev_info(void __user *arg)
if (!hdev)
return -ENODEV;
- hci_del_off_timer(hdev);
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+ cancel_delayed_work_sync(&hdev->power_off);
if (!test_bit(HCI_MGMT, &hdev->flags))
set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -912,6 +921,7 @@ struct hci_dev *hci_alloc_dev(void)
if (!hdev)
return NULL;
+ hci_init_sysfs(hdev);
skb_queue_head_init(&hdev->driver_init);
return hdev;
@@ -938,39 +948,41 @@ static void hci_power_on(struct work_struct *work)
return;
if (test_bit(HCI_AUTO_OFF, &hdev->flags))
- mod_timer(&hdev->off_timer,
- jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
+ queue_delayed_work(hdev->workqueue, &hdev->power_off,
+ msecs_to_jiffies(AUTO_OFF_TIMEOUT));
if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
- mgmt_index_added(hdev->id);
+ mgmt_index_added(hdev);
}
static void hci_power_off(struct work_struct *work)
{
- struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ power_off.work);
BT_DBG("%s", hdev->name);
+ clear_bit(HCI_AUTO_OFF, &hdev->flags);
+
hci_dev_close(hdev->id);
}
-static void hci_auto_off(unsigned long data)
+static void hci_discov_off(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) data;
+ struct hci_dev *hdev;
+ u8 scan = SCAN_PAGE;
+
+ hdev = container_of(work, struct hci_dev, discov_off.work);
BT_DBG("%s", hdev->name);
- clear_bit(HCI_AUTO_OFF, &hdev->flags);
+ hci_dev_lock_bh(hdev);
- queue_work(hdev->workqueue, &hdev->power_off);
-}
+ hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
-void hci_del_off_timer(struct hci_dev *hdev)
-{
- BT_DBG("%s", hdev->name);
+ hdev->discov_timeout = 0;
- clear_bit(HCI_AUTO_OFF, &hdev->flags);
- del_timer(&hdev->off_timer);
+ hci_dev_unlock_bh(hdev);
}
int hci_uuids_clear(struct hci_dev *hdev)
@@ -1007,16 +1019,11 @@ int hci_link_keys_clear(struct hci_dev *hdev)
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
- struct list_head *p;
-
- list_for_each(p, &hdev->link_keys) {
- struct link_key *k;
-
- k = list_entry(p, struct link_key, list);
+ struct link_key *k;
+ list_for_each_entry(k, &hdev->link_keys, list)
if (bacmp(bdaddr, &k->bdaddr) == 0)
return k;
- }
return NULL;
}
@@ -1138,7 +1145,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
persistent = hci_persistent_key(hdev, conn, type, old_key_type);
- mgmt_new_key(hdev->id, key, persistent);
+ mgmt_new_link_key(hdev, key, persistent);
if (!persistent) {
list_del(&key->list);
@@ -1181,7 +1188,7 @@ int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
memcpy(id->rand, rand, sizeof(id->rand));
if (new_key)
- mgmt_new_key(hdev->id, key, old_key_type);
+ mgmt_new_link_key(hdev, key, old_key_type);
return 0;
}
@@ -1279,16 +1286,11 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
bdaddr_t *bdaddr)
{
- struct list_head *p;
-
- list_for_each(p, &hdev->blacklist) {
- struct bdaddr_list *b;
-
- b = list_entry(p, struct bdaddr_list, list);
+ struct bdaddr_list *b;
+ list_for_each_entry(b, &hdev->blacklist, list)
if (bacmp(bdaddr, &b->bdaddr) == 0)
return b;
- }
return NULL;
}
@@ -1327,7 +1329,7 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
list_add(&entry->list, &hdev->blacklist);
- return mgmt_device_blocked(hdev->id, bdaddr);
+ return mgmt_device_blocked(hdev, bdaddr);
}
int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
@@ -1346,7 +1348,7 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
list_del(&entry->list);
kfree(entry);
- return mgmt_device_unblocked(hdev->id, bdaddr);
+ return mgmt_device_unblocked(hdev, bdaddr);
}
static void hci_clear_adv_cache(unsigned long arg)
@@ -1425,7 +1427,7 @@ int hci_add_adv_entry(struct hci_dev *hdev,
int hci_register_dev(struct hci_dev *hdev)
{
struct list_head *head = &hci_dev_list, *p;
- int i, id = 0;
+ int i, id, error;
BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
hdev->bus, hdev->owner);
@@ -1433,6 +1435,11 @@ int hci_register_dev(struct hci_dev *hdev)
if (!hdev->open || !hdev->close || !hdev->destruct)
return -EINVAL;
+ /* Do not allow HCI_AMP devices to register at index 0,
+ * so the index can be used as the AMP controller ID.
+ */
+ id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
+
write_lock_bh(&hci_dev_list_lock);
/* Find first available device id */
@@ -1479,6 +1486,8 @@ int hci_register_dev(struct hci_dev *hdev)
hci_conn_hash_init(hdev);
+ INIT_LIST_HEAD(&hdev->mgmt_pending);
+
INIT_LIST_HEAD(&hdev->blacklist);
INIT_LIST_HEAD(&hdev->uuids);
@@ -1492,8 +1501,9 @@ int hci_register_dev(struct hci_dev *hdev)
(unsigned long) hdev);
INIT_WORK(&hdev->power_on, hci_power_on);
- INIT_WORK(&hdev->power_off, hci_power_off);
- setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
+ INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+
+ INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
@@ -1502,10 +1512,14 @@ int hci_register_dev(struct hci_dev *hdev)
write_unlock_bh(&hci_dev_list_lock);
hdev->workqueue = create_singlethread_workqueue(hdev->name);
- if (!hdev->workqueue)
- goto nomem;
+ if (!hdev->workqueue) {
+ error = -ENOMEM;
+ goto err;
+ }
- hci_register_sysfs(hdev);
+ error = hci_add_sysfs(hdev);
+ if (error < 0)
+ goto err_wqueue;
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
@@ -1524,17 +1538,19 @@ int hci_register_dev(struct hci_dev *hdev)
return id;
-nomem:
+err_wqueue:
+ destroy_workqueue(hdev->workqueue);
+err:
write_lock_bh(&hci_dev_list_lock);
list_del(&hdev->list);
write_unlock_bh(&hci_dev_list_lock);
- return -ENOMEM;
+ return error;
}
EXPORT_SYMBOL(hci_register_dev);
/* Unregister HCI device */
-int hci_unregister_dev(struct hci_dev *hdev)
+void hci_unregister_dev(struct hci_dev *hdev)
{
int i;
@@ -1550,8 +1566,15 @@ int hci_unregister_dev(struct hci_dev *hdev)
kfree_skb(hdev->reassembly[i]);
if (!test_bit(HCI_INIT, &hdev->flags) &&
- !test_bit(HCI_SETUP, &hdev->flags))
- mgmt_index_removed(hdev->id);
+ !test_bit(HCI_SETUP, &hdev->flags)) {
+ hci_dev_lock_bh(hdev);
+ mgmt_index_removed(hdev);
+ hci_dev_unlock_bh(hdev);
+ }
+
+ /* mgmt_index_removed should take care of emptying the
+ * pending list */
+ BUG_ON(!list_empty(&hdev->mgmt_pending));
hci_notify(hdev, HCI_DEV_UNREG);
@@ -1560,9 +1583,8 @@ int hci_unregister_dev(struct hci_dev *hdev)
rfkill_destroy(hdev->rfkill);
}
- hci_unregister_sysfs(hdev);
+ hci_del_sysfs(hdev);
- hci_del_off_timer(hdev);
del_timer(&hdev->adv_timer);
destroy_workqueue(hdev->workqueue);
@@ -1576,8 +1598,6 @@ int hci_unregister_dev(struct hci_dev *hdev)
hci_dev_unlock_bh(hdev);
__hci_dev_put(hdev);
-
- return 0;
}
EXPORT_SYMBOL(hci_unregister_dev);
@@ -1948,23 +1968,18 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
hdr->dlen = cpu_to_le16(len);
}
-void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
+static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
+ struct sk_buff *skb, __u16 flags)
{
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
- BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
-
- skb->dev = (void *) hdev;
- bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags);
-
list = skb_shinfo(skb)->frag_list;
if (!list) {
/* Non fragmented */
BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
- skb_queue_tail(&conn->data_q, skb);
+ skb_queue_tail(queue, skb);
} else {
/* Fragmented */
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -1972,9 +1987,9 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
skb_shinfo(skb)->frag_list = NULL;
/* Queue all fragments atomically */
- spin_lock_bh(&conn->data_q.lock);
+ spin_lock_bh(&queue->lock);
- __skb_queue_tail(&conn->data_q, skb);
+ __skb_queue_tail(queue, skb);
flags &= ~ACL_START;
flags |= ACL_CONT;
@@ -1987,11 +2002,25 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
- __skb_queue_tail(&conn->data_q, skb);
+ __skb_queue_tail(queue, skb);
} while (list);
- spin_unlock_bh(&conn->data_q.lock);
+ spin_unlock_bh(&queue->lock);
}
+}
+
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
+{
+ struct hci_conn *conn = chan->conn;
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
+
+ skb->dev = (void *) hdev;
+ bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
+ hci_add_acl_hdr(skb, conn->handle, flags);
+
+ hci_queue_acl(conn, &chan->data_q, skb, flags);
tasklet_schedule(&hdev->tx_task);
}
@@ -2026,16 +2055,12 @@ EXPORT_SYMBOL(hci_send_sco);
static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *conn = NULL;
+ struct hci_conn *conn = NULL, *c;
int num = 0, min = ~0;
- struct list_head *p;
/* We don't have to lock device here. Connections are always
* added and removed with TX task disabled. */
- list_for_each(p, &h->list) {
- struct hci_conn *c;
- c = list_entry(p, struct hci_conn, list);
-
+ list_for_each_entry(c, &h->list, list) {
if (c->type != type || skb_queue_empty(&c->data_q))
continue;
@@ -2084,14 +2109,12 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
- struct hci_conn *c;
+ struct hci_conn *c;
BT_ERR("%s link tx timeout", hdev->name);
/* Kill stalled connections */
- list_for_each(p, &h->list) {
- c = list_entry(p, struct hci_conn, list);
+ list_for_each_entry(c, &h->list, list) {
if (c->type == type && c->sent) {
BT_ERR("%s killing stalled connection %s",
hdev->name, batostr(&c->dst));
@@ -2100,11 +2123,137 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
}
}
-static inline void hci_sched_acl(struct hci_dev *hdev)
+static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
+ int *quote)
{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_chan *chan = NULL;
+ int num = 0, min = ~0, cur_prio = 0;
struct hci_conn *conn;
+ int cnt, q, conn_num = 0;
+
+ BT_DBG("%s", hdev->name);
+
+ list_for_each_entry(conn, &h->list, list) {
+ struct hci_chan_hash *ch;
+ struct hci_chan *tmp;
+
+ if (conn->type != type)
+ continue;
+
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+ continue;
+
+ conn_num++;
+
+ ch = &conn->chan_hash;
+
+ list_for_each_entry(tmp, &ch->list, list) {
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(&tmp->data_q))
+ continue;
+
+ skb = skb_peek(&tmp->data_q);
+ if (skb->priority < cur_prio)
+ continue;
+
+ if (skb->priority > cur_prio) {
+ num = 0;
+ min = ~0;
+ cur_prio = skb->priority;
+ }
+
+ num++;
+
+ if (conn->sent < min) {
+ min = conn->sent;
+ chan = tmp;
+ }
+ }
+
+ if (hci_conn_num(hdev, type) == conn_num)
+ break;
+ }
+
+ if (!chan)
+ return NULL;
+
+ switch (chan->conn->type) {
+ case ACL_LINK:
+ cnt = hdev->acl_cnt;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ cnt = hdev->sco_cnt;
+ break;
+ case LE_LINK:
+ cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
+ break;
+ default:
+ cnt = 0;
+ BT_ERR("Unknown link type");
+ }
+
+ q = cnt / num;
+ *quote = q ? q : 1;
+ BT_DBG("chan %p quote %d", chan, *quote);
+ return chan;
+}
+
+static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *conn;
+ int num = 0;
+
+ BT_DBG("%s", hdev->name);
+
+ list_for_each_entry(conn, &h->list, list) {
+ struct hci_chan_hash *ch;
+ struct hci_chan *chan;
+
+ if (conn->type != type)
+ continue;
+
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+ continue;
+
+ num++;
+
+ ch = &conn->chan_hash;
+ list_for_each_entry(chan, &ch->list, list) {
+ struct sk_buff *skb;
+
+ if (chan->sent) {
+ chan->sent = 0;
+ continue;
+ }
+
+ if (skb_queue_empty(&chan->data_q))
+ continue;
+
+ skb = skb_peek(&chan->data_q);
+ if (skb->priority >= HCI_PRIO_MAX - 1)
+ continue;
+
+ skb->priority = HCI_PRIO_MAX - 1;
+
+ BT_DBG("chan %p skb %p promoted to %d", chan, skb,
+ skb->priority);
+ }
+
+ if (hci_conn_num(hdev, type) == num)
+ break;
+ }
+}
+
+static inline void hci_sched_acl(struct hci_dev *hdev)
+{
+ struct hci_chan *chan;
struct sk_buff *skb;
int quote;
+ unsigned int cnt;
BT_DBG("%s", hdev->name);
@@ -2118,19 +2267,35 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
hci_link_tx_to(hdev, ACL_LINK);
}
- while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
- while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
- BT_DBG("skb %p len %d", skb, skb->len);
+ cnt = hdev->acl_cnt;
+
+ while (hdev->acl_cnt &&
+ (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
+ u32 priority = (skb_peek(&chan->data_q))->priority;
+ while (quote-- && (skb = skb_peek(&chan->data_q))) {
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ skb->len, skb->priority);
- hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
+ /* Stop if priority has changed */
+ if (skb->priority < priority)
+ break;
+
+ skb = skb_dequeue(&chan->data_q);
+
+ hci_conn_enter_active_mode(chan->conn,
+ bt_cb(skb)->force_active);
hci_send_frame(skb);
hdev->acl_last_tx = jiffies;
hdev->acl_cnt--;
- conn->sent++;
+ chan->sent++;
+ chan->conn->sent++;
}
}
+
+ if (cnt != hdev->acl_cnt)
+ hci_prio_recalculate(hdev, ACL_LINK);
}
/* Schedule SCO */
@@ -2182,9 +2347,9 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
static inline void hci_sched_le(struct hci_dev *hdev)
{
- struct hci_conn *conn;
+ struct hci_chan *chan;
struct sk_buff *skb;
- int quote, cnt;
+ int quote, cnt, tmp;
BT_DBG("%s", hdev->name);
@@ -2200,21 +2365,35 @@ static inline void hci_sched_le(struct hci_dev *hdev)
}
cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
- while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
- while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
- BT_DBG("skb %p len %d", skb, skb->len);
+ tmp = cnt;
+ while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
+ u32 priority = (skb_peek(&chan->data_q))->priority;
+ while (quote-- && (skb = skb_peek(&chan->data_q))) {
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ skb->len, skb->priority);
+
+ /* Stop if priority has changed */
+ if (skb->priority < priority)
+ break;
+
+ skb = skb_dequeue(&chan->data_q);
hci_send_frame(skb);
hdev->le_last_tx = jiffies;
cnt--;
- conn->sent++;
+ chan->sent++;
+ chan->conn->sent++;
}
}
+
if (hdev->le_pkts)
hdev->le_cnt = cnt;
else
hdev->acl_cnt = cnt;
+
+ if (cnt != tmp)
+ hci_prio_recalculate(hdev, LE_LINK);
}
static void hci_tx_task(unsigned long arg)
@@ -2407,3 +2586,31 @@ static void hci_cmd_task(unsigned long arg)
}
}
}
+
+int hci_do_inquiry(struct hci_dev *hdev, u8 length)
+{
+ /* General inquiry access code (GIAC) */
+ u8 lap[3] = { 0x33, 0x8b, 0x9e };
+ struct hci_cp_inquiry cp;
+
+ BT_DBG("%s", hdev->name);
+
+ if (test_bit(HCI_INQUIRY, &hdev->flags))
+ return -EINPROGRESS;
+
+ memset(&cp, 0, sizeof(cp));
+ memcpy(&cp.lap, lap, sizeof(cp.lap));
+ cp.length = length;
+
+ return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+}
+
+int hci_cancel_inquiry(struct hci_dev *hdev)
+{
+ BT_DBG("%s", hdev->name);
+
+ if (!test_bit(HCI_INQUIRY, &hdev->flags))
+ return -EPERM;
+
+ return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index d7d96b6b1f0..a89cf1f24e4 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -58,9 +58,11 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
if (status)
return;
- if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
- test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 0);
+ clear_bit(HCI_INQUIRY, &hdev->flags);
+
+ hci_dev_lock(hdev);
+ mgmt_discovering(hdev, 0);
+ hci_dev_unlock(hdev);
hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
@@ -76,10 +78,6 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
if (status)
return;
- if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
- test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 0);
-
hci_conn_check_pending(hdev);
}
@@ -205,13 +203,15 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
+ hci_dev_lock(hdev);
+
if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_set_local_name_complete(hdev->id, sent, status);
+ mgmt_set_local_name_complete(hdev, sent, status);
- if (status)
- return;
+ if (status == 0)
+ memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
- memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -274,7 +274,8 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ __u8 param, status = *((__u8 *) skb->data);
+ int old_pscan, old_iscan;
void *sent;
BT_DBG("%s status 0x%x", hdev->name, status);
@@ -283,28 +284,40 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
- if (!status) {
- __u8 param = *((__u8 *) sent);
- int old_pscan, old_iscan;
-
- old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
- old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
+ param = *((__u8 *) sent);
- if (param & SCAN_INQUIRY) {
- set_bit(HCI_ISCAN, &hdev->flags);
- if (!old_iscan)
- mgmt_discoverable(hdev->id, 1);
- } else if (old_iscan)
- mgmt_discoverable(hdev->id, 0);
+ hci_dev_lock(hdev);
- if (param & SCAN_PAGE) {
- set_bit(HCI_PSCAN, &hdev->flags);
- if (!old_pscan)
- mgmt_connectable(hdev->id, 1);
- } else if (old_pscan)
- mgmt_connectable(hdev->id, 0);
+ if (status != 0) {
+ mgmt_write_scan_failed(hdev, param, status);
+ hdev->discov_timeout = 0;
+ goto done;
}
+ old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
+ old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
+
+ if (param & SCAN_INQUIRY) {
+ set_bit(HCI_ISCAN, &hdev->flags);
+ if (!old_iscan)
+ mgmt_discoverable(hdev, 1);
+ if (hdev->discov_timeout > 0) {
+ int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
+ queue_delayed_work(hdev->workqueue, &hdev->discov_off,
+ to);
+ }
+ } else if (old_iscan)
+ mgmt_discoverable(hdev, 0);
+
+ if (param & SCAN_PAGE) {
+ set_bit(HCI_PSCAN, &hdev->flags);
+ if (!old_pscan)
+ mgmt_connectable(hdev, 1);
+ } else if (old_pscan)
+ mgmt_connectable(hdev, 0);
+
+done:
+ hci_dev_unlock(hdev);
hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
}
@@ -748,6 +761,30 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
}
+static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->amp_status = rp->amp_status;
+ hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
+ hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
+ hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
+ hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
+ hdev->amp_type = rp->amp_type;
+ hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
+ hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
+ hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
+ hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
+
+ hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
+}
+
static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -804,19 +841,24 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ hci_dev_lock(hdev);
+
if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
+ mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
if (rp->status != 0)
- return;
+ goto unlock;
cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
if (!cp)
- return;
+ goto unlock;
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
if (conn)
conn->pin_length = cp->pin_len;
+
+unlock:
+ hci_dev_unlock(hdev);
}
static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -825,10 +867,15 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ hci_dev_lock(hdev);
+
if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
+ mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
rp->status);
+
+ hci_dev_unlock(hdev);
}
+
static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -855,9 +902,13 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ hci_dev_lock(hdev);
+
if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
+ mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
rp->status);
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
@@ -867,9 +918,13 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ hci_dev_lock(hdev);
+
if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
+ mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
rp->status);
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
@@ -879,8 +934,10 @@ static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
BT_DBG("%s status 0x%x", hdev->name, rp->status);
- mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
+ hci_dev_lock(hdev);
+ mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
rp->randomizer, rp->status);
+ hci_dev_unlock(hdev);
}
static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
@@ -955,12 +1012,18 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
if (status) {
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
hci_conn_check_pending(hdev);
+ hci_dev_lock(hdev);
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_inquiry_failed(hdev, status);
+ hci_dev_unlock(hdev);
return;
}
- if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) &&
- test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 1);
+ set_bit(HCI_INQUIRY, &hdev->flags);
+
+ hci_dev_lock(hdev);
+ mgmt_discovering(hdev, 1);
+ hci_dev_unlock(hdev);
}
static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
@@ -1339,13 +1402,16 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
BT_DBG("%s status %d", hdev->name, status);
- if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
- test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 0);
-
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
hci_conn_check_pending(hdev);
+
+ if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+ return;
+
+ hci_dev_lock(hdev);
+ mgmt_discovering(hdev, 0);
+ hci_dev_unlock(hdev);
}
static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1361,12 +1427,6 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
hci_dev_lock(hdev);
- if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 1);
- }
-
for (; num_rsp; num_rsp--, info++) {
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
@@ -1377,8 +1437,8 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
data.rssi = 0x00;
data.ssp_mode = 0x00;
hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0,
- NULL);
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK,
+ info->dev_class, 0, NULL);
}
hci_dev_unlock(hdev);
@@ -1412,7 +1472,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
conn->state = BT_CONFIG;
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
+ mgmt_connected(hdev, &ev->bdaddr, conn->type);
} else
conn->state = BT_CONNECTED;
@@ -1444,7 +1504,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
} else {
conn->state = BT_CLOSED;
if (conn->type == ACL_LINK)
- mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
+ mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
+ ev->status);
}
if (conn->type == ACL_LINK)
@@ -1531,7 +1592,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
struct hci_cp_reject_conn_req cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
- cp.reason = 0x0f;
+ cp.reason = HCI_ERROR_REJ_BAD_ADDR;
hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
}
}
@@ -1544,7 +1605,9 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
BT_DBG("%s status %d", hdev->name, ev->status);
if (ev->status) {
- mgmt_disconnect_failed(hdev->id);
+ hci_dev_lock(hdev);
+ mgmt_disconnect_failed(hdev);
+ hci_dev_unlock(hdev);
return;
}
@@ -1557,7 +1620,7 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
conn->state = BT_CLOSED;
if (conn->type == ACL_LINK || conn->type == LE_LINK)
- mgmt_disconnected(hdev->id, &conn->dst);
+ mgmt_disconnected(hdev, &conn->dst, conn->type);
hci_proto_disconn_cfm(conn, ev->reason);
hci_conn_del(conn);
@@ -1588,7 +1651,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
conn->sec_level = conn->pending_sec_level;
}
} else {
- mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+ mgmt_auth_failed(hdev, &conn->dst, ev->status);
}
clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
@@ -1643,7 +1706,7 @@ static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb
hci_dev_lock(hdev);
if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
- mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
+ mgmt_remote_name(hdev, &ev->bdaddr, ev->name);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!conn)
@@ -1898,6 +1961,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_write_ca_timeout(hdev, skb);
break;
+ case HCI_OP_READ_LOCAL_AMP_INFO:
+ hci_cc_read_local_amp_info(hdev, skb);
+ break;
+
case HCI_OP_DELETE_STORED_LINK_KEY:
hci_cc_delete_stored_link_key(hdev, skb);
break;
@@ -2029,7 +2096,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_OP_DISCONNECT:
if (ev->status != 0)
- mgmt_disconnect_failed(hdev->id);
+ mgmt_disconnect_failed(hdev);
break;
case HCI_OP_LE_CREATE_CONN:
@@ -2194,7 +2261,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
else
secure = 0;
- mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
+ mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
}
unlock:
@@ -2363,12 +2430,6 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
hci_dev_lock(hdev);
- if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 1);
- }
-
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
struct inquiry_info_with_rssi_and_pscan_mode *info;
info = (void *) (skb->data + 1);
@@ -2383,7 +2444,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
data.rssi = info->rssi;
data.ssp_mode = 0x00;
hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr,
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK,
info->dev_class, info->rssi,
NULL);
}
@@ -2400,7 +2461,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
data.rssi = info->rssi;
data.ssp_mode = 0x00;
hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr,
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK,
info->dev_class, info->rssi,
NULL);
}
@@ -2531,12 +2592,6 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
if (!num_rsp)
return;
- if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 1);
- }
-
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
@@ -2549,8 +2604,8 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
data.rssi = info->rssi;
data.ssp_mode = 0x01;
hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class,
- info->rssi, info->data);
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK,
+ info->dev_class, info->rssi, info->data);
}
hci_dev_unlock(hdev);
@@ -2614,7 +2669,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
struct hci_cp_io_capability_neg_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
- cp.reason = 0x18; /* Pairing not allowed */
+ cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
sizeof(cp), &cp);
@@ -2706,7 +2761,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
}
confirm:
- mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey,
+ mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
confirm_hint);
unlock:
@@ -2732,7 +2787,7 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_
* event gets always produced as initiator and is also mapped to
* the mgmt_auth_failed event */
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
- mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+ mgmt_auth_failed(hdev, &conn->dst, ev->status);
hci_conn_put(conn);
@@ -2813,14 +2868,14 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
}
if (ev->status) {
- mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
+ mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, ev->status);
hci_proto_connect_cfm(conn, ev->status);
conn->state = BT_CLOSED;
hci_conn_del(conn);
goto unlock;
}
- mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
+ mgmt_connected(hdev, &ev->bdaddr, conn->type);
conn->sec_level = BT_SECURITY_LOW;
conn->handle = __le16_to_cpu(ev->handle);
@@ -3104,5 +3159,5 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
kfree_skb(skb);
}
-module_param(enable_le, bool, 0444);
+module_param(enable_le, bool, 0644);
MODULE_PARM_DESC(enable_le, "Enable LE support");
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 661b461cf0b..c62d254a137 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -436,17 +436,12 @@ static const struct file_operations inquiry_cache_fops = {
static int blacklist_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
- struct list_head *l;
+ struct bdaddr_list *b;
hci_dev_lock_bh(hdev);
- list_for_each(l, &hdev->blacklist) {
- struct bdaddr_list *b;
-
- b = list_entry(l, struct bdaddr_list, list);
-
+ list_for_each_entry(b, &hdev->blacklist, list)
seq_printf(f, "%s\n", batostr(&b->bdaddr));
- }
hci_dev_unlock_bh(hdev);
@@ -485,17 +480,12 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid)
static int uuids_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
- struct list_head *l;
+ struct bt_uuid *uuid;
hci_dev_lock_bh(hdev);
- list_for_each(l, &hdev->uuids) {
- struct bt_uuid *uuid;
-
- uuid = list_entry(l, struct bt_uuid, list);
-
+ list_for_each_entry(uuid, &hdev->uuids, list)
print_bt_uuid(f, uuid->uuid);
- }
hci_dev_unlock_bh(hdev);
@@ -543,22 +533,28 @@ static int auto_accept_delay_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
auto_accept_delay_set, "%llu\n");
-int hci_register_sysfs(struct hci_dev *hdev)
+void hci_init_sysfs(struct hci_dev *hdev)
+{
+ struct device *dev = &hdev->dev;
+
+ dev->type = &bt_host;
+ dev->class = bt_class;
+
+ dev_set_drvdata(dev, hdev);
+ device_initialize(dev);
+}
+
+int hci_add_sysfs(struct hci_dev *hdev)
{
struct device *dev = &hdev->dev;
int err;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- dev->type = &bt_host;
- dev->class = bt_class;
dev->parent = hdev->parent;
-
dev_set_name(dev, "%s", hdev->name);
- dev_set_drvdata(dev, hdev);
-
- err = device_register(dev);
+ err = device_add(dev);
if (err < 0)
return err;
@@ -582,7 +578,7 @@ int hci_register_sysfs(struct hci_dev *hdev)
return 0;
}
-void hci_unregister_sysfs(struct hci_dev *hdev)
+void hci_del_sysfs(struct hci_dev *hdev)
{
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 075a3e920ca..3c2d888925d 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -81,24 +81,20 @@ static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 };
static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr)
{
struct hidp_session *session;
- struct list_head *p;
BT_DBG("");
- list_for_each(p, &hidp_session_list) {
- session = list_entry(p, struct hidp_session, list);
+ list_for_each_entry(session, &hidp_session_list, list) {
if (!bacmp(bdaddr, &session->bdaddr))
return session;
}
+
return NULL;
}
static void __hidp_link_session(struct hidp_session *session)
{
- __module_get(THIS_MODULE);
list_add(&session->list, &hidp_session_list);
-
- hci_conn_hold_device(session->conn);
}
static void __hidp_unlink_session(struct hidp_session *session)
@@ -106,7 +102,6 @@ static void __hidp_unlink_session(struct hidp_session *session)
hci_conn_put_device(session->conn);
list_del(&session->list);
- module_put(THIS_MODULE);
}
static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
@@ -255,6 +250,9 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
BT_DBG("session %p data %p size %d", session, data, size);
+ if (atomic_read(&session->terminate))
+ return -EIO;
+
skb = alloc_skb(size + 1, GFP_ATOMIC);
if (!skb) {
BT_ERR("Can't allocate memory for new frame");
@@ -329,6 +327,7 @@ static int hidp_get_raw_report(struct hid_device *hid,
struct sk_buff *skb;
size_t len;
int numbered_reports = hid->report_enum[report_type].numbered;
+ int ret;
switch (report_type) {
case HID_FEATURE_REPORT:
@@ -352,8 +351,9 @@ static int hidp_get_raw_report(struct hid_device *hid,
session->waiting_report_number = numbered_reports ? report_number : -1;
set_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
data[0] = report_number;
- if (hidp_send_ctrl_message(hid->driver_data, report_type, data, 1))
- goto err_eio;
+ ret = hidp_send_ctrl_message(hid->driver_data, report_type, data, 1);
+ if (ret)
+ goto err;
/* Wait for the return of the report. The returned report
gets put in session->report_return. */
@@ -365,11 +365,13 @@ static int hidp_get_raw_report(struct hid_device *hid,
5*HZ);
if (res == 0) {
/* timeout */
- goto err_eio;
+ ret = -EIO;
+ goto err;
}
if (res < 0) {
/* signal */
- goto err_restartsys;
+ ret = -ERESTARTSYS;
+ goto err;
}
}
@@ -390,14 +392,10 @@ static int hidp_get_raw_report(struct hid_device *hid,
return len;
-err_restartsys:
- clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
- mutex_unlock(&session->report_mutex);
- return -ERESTARTSYS;
-err_eio:
+err:
clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
mutex_unlock(&session->report_mutex);
- return -EIO;
+ return ret;
}
static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
@@ -422,11 +420,10 @@ static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, s
/* Set up our wait, and send the report request to the device. */
set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
- if (hidp_send_ctrl_message(hid->driver_data, report_type,
- data, count)) {
- ret = -ENOMEM;
+ ret = hidp_send_ctrl_message(hid->driver_data, report_type, data,
+ count);
+ if (ret)
goto err;
- }
/* Wait for the ACK from the device. */
while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
@@ -496,10 +493,9 @@ static void hidp_process_handshake(struct hidp_session *session,
case HIDP_HSHK_ERR_INVALID_REPORT_ID:
case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST:
case HIDP_HSHK_ERR_INVALID_PARAMETER:
- if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) {
- clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ if (test_and_clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags))
wake_up_interruptible(&session->report_queue);
- }
+
/* FIXME: Call into SET_ GET_ handlers here */
break;
@@ -520,10 +516,8 @@ static void hidp_process_handshake(struct hidp_session *session,
}
/* Wake up the waiting thread. */
- if (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
- clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+ if (test_and_clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags))
wake_up_interruptible(&session->report_queue);
- }
}
static void hidp_process_hid_control(struct hidp_session *session,
@@ -663,25 +657,32 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len)
return kernel_sendmsg(sock, &msg, &iv, 1, len);
}
-static void hidp_process_transmit(struct hidp_session *session)
+static void hidp_process_intr_transmit(struct hidp_session *session)
{
struct sk_buff *skb;
BT_DBG("session %p", session);
- while ((skb = skb_dequeue(&session->ctrl_transmit))) {
- if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) {
- skb_queue_head(&session->ctrl_transmit, skb);
+ while ((skb = skb_dequeue(&session->intr_transmit))) {
+ if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
+ skb_queue_head(&session->intr_transmit, skb);
break;
}
hidp_set_timer(session);
kfree_skb(skb);
}
+}
- while ((skb = skb_dequeue(&session->intr_transmit))) {
- if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
- skb_queue_head(&session->intr_transmit, skb);
+static void hidp_process_ctrl_transmit(struct hidp_session *session)
+{
+ struct sk_buff *skb;
+
+ BT_DBG("session %p", session);
+
+ while ((skb = skb_dequeue(&session->ctrl_transmit))) {
+ if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) {
+ skb_queue_head(&session->ctrl_transmit, skb);
break;
}
@@ -700,6 +701,7 @@ static int hidp_session(void *arg)
BT_DBG("session %p", session);
+ __module_get(THIS_MODULE);
set_user_nice(current, -15);
init_waitqueue_entry(&ctrl_wait, current);
@@ -714,23 +716,25 @@ static int hidp_session(void *arg)
intr_sk->sk_state != BT_CONNECTED)
break;
- while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
+ while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
skb_orphan(skb);
if (!skb_linearize(skb))
- hidp_recv_ctrl_frame(session, skb);
+ hidp_recv_intr_frame(session, skb);
else
kfree_skb(skb);
}
- while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
+ hidp_process_intr_transmit(session);
+
+ while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
skb_orphan(skb);
if (!skb_linearize(skb))
- hidp_recv_intr_frame(session, skb);
+ hidp_recv_ctrl_frame(session, skb);
else
kfree_skb(skb);
}
- hidp_process_transmit(session);
+ hidp_process_ctrl_transmit(session);
schedule();
set_current_state(TASK_INTERRUPTIBLE);
@@ -739,6 +743,10 @@ static int hidp_session(void *arg)
remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
+ clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+ clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ wake_up_interruptible(&session->report_queue);
+
down_write(&hidp_session_sem);
hidp_del_timer(session);
@@ -772,34 +780,37 @@ static int hidp_session(void *arg)
kfree(session->rd_data);
kfree(session);
+ module_put_and_exit(0);
return 0;
}
-static struct device *hidp_get_device(struct hidp_session *session)
+static struct hci_conn *hidp_get_connection(struct hidp_session *session)
{
bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src;
bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst;
- struct device *device = NULL;
+ struct hci_conn *conn;
struct hci_dev *hdev;
hdev = hci_get_route(dst, src);
if (!hdev)
return NULL;
- session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
- if (session->conn)
- device = &session->conn->dev;
+ hci_dev_lock_bh(hdev);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+ if (conn)
+ hci_conn_hold_device(conn);
+ hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
- return device;
+ return conn;
}
static int hidp_setup_input(struct hidp_session *session,
struct hidp_connadd_req *req)
{
struct input_dev *input;
- int err, i;
+ int i;
input = input_allocate_device();
if (!input)
@@ -842,17 +853,10 @@ static int hidp_setup_input(struct hidp_session *session,
input->relbit[0] |= BIT_MASK(REL_WHEEL);
}
- input->dev.parent = hidp_get_device(session);
+ input->dev.parent = &session->conn->dev;
input->event = hidp_input_event;
- err = input_register_device(input);
- if (err < 0) {
- input_free_device(input);
- session->input = NULL;
- return err;
- }
-
return 0;
}
@@ -949,7 +953,7 @@ static int hidp_setup_hid(struct hidp_session *session,
strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
- hid->dev.parent = hidp_get_device(session);
+ hid->dev.parent = &session->conn->dev;
hid->ll_driver = &hidp_hid_driver;
hid->hid_get_raw_report = hidp_get_raw_report;
@@ -976,18 +980,20 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst))
return -ENOTUNIQ;
- session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
- if (!session)
- return -ENOMEM;
-
BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size);
down_write(&hidp_session_sem);
s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst);
if (s && s->state == BT_CONNECTED) {
- err = -EEXIST;
- goto failed;
+ up_write(&hidp_session_sem);
+ return -EEXIST;
+ }
+
+ session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
+ if (!session) {
+ up_write(&hidp_session_sem);
+ return -ENOMEM;
}
bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst);
@@ -1003,6 +1009,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
session->intr_sock = intr_sock;
session->state = BT_CONNECTED;
+ session->conn = hidp_get_connection(session);
+ if (!session->conn) {
+ err = -ENOTCONN;
+ goto failed;
+ }
+
setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session);
skb_queue_head_init(&session->ctrl_transmit);
@@ -1015,9 +1027,11 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
session->idle_to = req->idle_to;
+ __hidp_link_session(session);
+
if (req->rd_size > 0) {
err = hidp_setup_hid(session, req);
- if (err && err != -ENODEV)
+ if (err)
goto purge;
}
@@ -1027,8 +1041,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
goto purge;
}
- __hidp_link_session(session);
-
hidp_set_timer(session);
if (session->hid) {
@@ -1054,7 +1066,11 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
!session->waiting_for_startup);
}
- err = hid_add_device(session->hid);
+ if (session->hid)
+ err = hid_add_device(session->hid);
+ else
+ err = input_register_device(session->input);
+
if (err < 0) {
atomic_inc(&session->terminate);
wake_up_process(session->task);
@@ -1077,8 +1093,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
unlink:
hidp_del_timer(session);
- __hidp_unlink_session(session);
-
if (session->input) {
input_unregister_device(session->input);
session->input = NULL;
@@ -1093,6 +1107,8 @@ unlink:
session->rd_data = NULL;
purge:
+ __hidp_unlink_session(session);
+
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
@@ -1134,19 +1150,16 @@ int hidp_del_connection(struct hidp_conndel_req *req)
int hidp_get_connlist(struct hidp_connlist_req *req)
{
- struct list_head *p;
+ struct hidp_session *session;
int err = 0, n = 0;
BT_DBG("");
down_read(&hidp_session_sem);
- list_for_each(p, &hidp_session_list) {
- struct hidp_session *session;
+ list_for_each_entry(session, &hidp_session_list, list) {
struct hidp_conninfo ci;
- session = list_entry(p, struct hidp_session, list);
-
__hidp_copy_session(session, &ci);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 5ea94a1eecf..e8a6837996c 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -57,9 +57,10 @@
#include <net/bluetooth/smp.h>
int disable_ertm;
+int enable_hs;
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
-static u8 l2cap_fixed_chan[8] = { 0x02, };
+static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
static LIST_HEAD(chan_list);
static DEFINE_RWLOCK(chan_list_lock);
@@ -219,7 +220,7 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
{
- BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
+ BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
chan_hold(chan);
@@ -293,6 +294,8 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
atomic_set(&chan->refcnt, 1);
+ BT_DBG("sk %p chan %p", sk, chan);
+
return chan;
}
@@ -310,7 +313,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
chan->psm, chan->dcid);
- conn->disc_reason = 0x13;
+ conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
chan->conn = conn;
@@ -337,6 +340,13 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
chan->omtu = L2CAP_DEFAULT_MTU;
}
+ chan->local_id = L2CAP_BESTEFFORT_ID;
+ chan->local_stype = L2CAP_SERV_BESTEFFORT;
+ chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
+ chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
+ chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
+ chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
+
chan_hold(chan);
list_add(&chan->list, &conn->chan_l);
@@ -556,34 +566,58 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
flags = ACL_START;
bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
+ skb->priority = HCI_PRIO_MAX;
+
+ hci_send_acl(conn->hchan, skb, flags);
+}
+
+static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ struct hci_conn *hcon = chan->conn->hcon;
+ u16 flags;
+
+ BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
+ skb->priority);
+
+ if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
+ lmp_no_flush_capable(hcon->hdev))
+ flags = ACL_START_NO_FLUSH;
+ else
+ flags = ACL_START;
- hci_send_acl(conn->hcon, skb, flags);
+ bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ hci_send_acl(chan->conn->hchan, skb, flags);
}
-static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
+static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
{
struct sk_buff *skb;
struct l2cap_hdr *lh;
struct l2cap_conn *conn = chan->conn;
- int count, hlen = L2CAP_HDR_SIZE + 2;
- u8 flags;
+ int count, hlen;
if (chan->state != BT_CONNECTED)
return;
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ hlen = L2CAP_EXT_HDR_SIZE;
+ else
+ hlen = L2CAP_ENH_HDR_SIZE;
+
if (chan->fcs == L2CAP_FCS_CRC16)
- hlen += 2;
+ hlen += L2CAP_FCS_SIZE;
- BT_DBG("chan %p, control 0x%2.2x", chan, control);
+ BT_DBG("chan %p, control 0x%8.8x", chan, control);
count = min_t(unsigned int, conn->mtu, hlen);
- control |= L2CAP_CTRL_FRAME_TYPE;
+
+ control |= __set_sframe(chan);
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= L2CAP_CTRL_FINAL;
+ control |= __set_ctrl_final(chan);
if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
- control |= L2CAP_CTRL_POLL;
+ control |= __set_ctrl_poll(chan);
skb = bt_skb_alloc(count, GFP_ATOMIC);
if (!skb)
@@ -592,32 +626,27 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
- put_unaligned_le16(control, skb_put(skb, 2));
+
+ __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
if (chan->fcs == L2CAP_FCS_CRC16) {
- u16 fcs = crc16(0, (u8 *)lh, count - 2);
- put_unaligned_le16(fcs, skb_put(skb, 2));
+ u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
+ put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
}
- if (lmp_no_flush_capable(conn->hcon->hdev))
- flags = ACL_START_NO_FLUSH;
- else
- flags = ACL_START;
-
- bt_cb(skb)->force_active = chan->force_active;
-
- hci_send_acl(chan->conn->hcon, skb, flags);
+ skb->priority = HCI_PRIO_MAX;
+ l2cap_do_send(chan, skb);
}
-static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
+static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
{
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= L2CAP_SUPER_RCV_NOT_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
set_bit(CONN_RNR_SENT, &chan->conn_state);
} else
- control |= L2CAP_SUPER_RCV_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
- control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control |= __set_reqseq(chan, chan->buffer_seq);
l2cap_send_sframe(chan, control);
}
@@ -947,7 +976,7 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
list_for_each_entry(chan, &conn->chan_l, list) {
struct sock *sk = chan->sk;
- if (chan->force_reliable)
+ if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
sk->sk_err = err;
}
@@ -986,6 +1015,8 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
chan->ops->close(chan->data);
}
+ hci_chan_del(conn->hchan);
+
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
del_timer_sync(&conn->info_timer);
@@ -1008,18 +1039,26 @@ static void security_timeout(unsigned long arg)
static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
{
struct l2cap_conn *conn = hcon->l2cap_data;
+ struct hci_chan *hchan;
if (conn || status)
return conn;
+ hchan = hci_chan_create(hcon);
+ if (!hchan)
+ return NULL;
+
conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
- if (!conn)
+ if (!conn) {
+ hci_chan_del(hchan);
return NULL;
+ }
hcon->l2cap_data = conn;
conn->hcon = hcon;
+ conn->hchan = hchan;
- BT_DBG("hcon %p conn %p", hcon, conn);
+ BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
conn->mtu = hcon->hdev->le_mtu;
@@ -1043,7 +1082,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
setup_timer(&conn->info_timer, l2cap_info_timeout,
(unsigned long) conn);
- conn->disc_reason = 0x13;
+ conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
return conn;
}
@@ -1245,47 +1284,35 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
__clear_retrans_timer(chan);
}
-static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
-{
- struct hci_conn *hcon = chan->conn->hcon;
- u16 flags;
-
- BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
-
- if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
- flags = ACL_START_NO_FLUSH;
- else
- flags = ACL_START;
-
- bt_cb(skb)->force_active = chan->force_active;
- hci_send_acl(hcon, skb, flags);
-}
-
static void l2cap_streaming_send(struct l2cap_chan *chan)
{
struct sk_buff *skb;
- u16 control, fcs;
+ u32 control;
+ u16 fcs;
while ((skb = skb_dequeue(&chan->tx_q))) {
- control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
- control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
- put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
+ control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
+ control |= __set_txseq(chan, chan->next_tx_seq);
+ __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
- put_unaligned_le16(fcs, skb->data + skb->len - 2);
+ fcs = crc16(0, (u8 *)skb->data,
+ skb->len - L2CAP_FCS_SIZE);
+ put_unaligned_le16(fcs,
+ skb->data + skb->len - L2CAP_FCS_SIZE);
}
l2cap_do_send(chan, skb);
- chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
+ chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
}
}
-static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
{
struct sk_buff *skb, *tx_skb;
- u16 control, fcs;
+ u16 fcs;
+ u32 control;
skb = skb_peek(&chan->tx_q);
if (!skb)
@@ -1308,20 +1335,23 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
tx_skb = skb_clone(skb, GFP_ATOMIC);
bt_cb(skb)->retries++;
- control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
- control &= L2CAP_CTRL_SAR;
+
+ control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
+ control &= __get_sar_mask(chan);
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= L2CAP_CTRL_FINAL;
+ control |= __set_ctrl_final(chan);
- control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
- | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
+ control |= __set_reqseq(chan, chan->buffer_seq);
+ control |= __set_txseq(chan, tx_seq);
- put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+ __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
- put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
+ fcs = crc16(0, (u8 *)tx_skb->data,
+ tx_skb->len - L2CAP_FCS_SIZE);
+ put_unaligned_le16(fcs,
+ tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
}
l2cap_do_send(chan, tx_skb);
@@ -1330,7 +1360,8 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
static int l2cap_ertm_send(struct l2cap_chan *chan)
{
struct sk_buff *skb, *tx_skb;
- u16 control, fcs;
+ u16 fcs;
+ u32 control;
int nsent = 0;
if (chan->state != BT_CONNECTED)
@@ -1348,20 +1379,22 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
bt_cb(skb)->retries++;
- control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
- control &= L2CAP_CTRL_SAR;
+ control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
+ control &= __get_sar_mask(chan);
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= L2CAP_CTRL_FINAL;
+ control |= __set_ctrl_final(chan);
- control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
- | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
- put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+ control |= __set_reqseq(chan, chan->buffer_seq);
+ control |= __set_txseq(chan, chan->next_tx_seq);
+ __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
- put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
+ fcs = crc16(0, (u8 *)skb->data,
+ tx_skb->len - L2CAP_FCS_SIZE);
+ put_unaligned_le16(fcs, skb->data +
+ tx_skb->len - L2CAP_FCS_SIZE);
}
l2cap_do_send(chan, tx_skb);
@@ -1369,7 +1402,8 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
__set_retrans_timer(chan);
bt_cb(skb)->tx_seq = chan->next_tx_seq;
- chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
+
+ chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
if (bt_cb(skb)->retries == 1)
chan->unacked_frames++;
@@ -1401,12 +1435,12 @@ static int l2cap_retransmit_frames(struct l2cap_chan *chan)
static void l2cap_send_ack(struct l2cap_chan *chan)
{
- u16 control = 0;
+ u32 control = 0;
- control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control |= __set_reqseq(chan, chan->buffer_seq);
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= L2CAP_SUPER_RCV_NOT_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
set_bit(CONN_RNR_SENT, &chan->conn_state);
l2cap_send_sframe(chan, control);
return;
@@ -1415,20 +1449,20 @@ static void l2cap_send_ack(struct l2cap_chan *chan)
if (l2cap_ertm_send(chan) > 0)
return;
- control |= L2CAP_SUPER_RCV_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
l2cap_send_sframe(chan, control);
}
static void l2cap_send_srejtail(struct l2cap_chan *chan)
{
struct srej_list *tail;
- u16 control;
+ u32 control;
- control = L2CAP_SUPER_SELECT_REJECT;
- control |= L2CAP_CTRL_FINAL;
+ control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
+ control |= __set_ctrl_final(chan);
tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
- control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control |= __set_reqseq(chan, tail->tx_seq);
l2cap_send_sframe(chan, control);
}
@@ -1456,6 +1490,8 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
return -EFAULT;
+ (*frag)->priority = skb->priority;
+
sent += count;
len -= count;
@@ -1465,15 +1501,17 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
return sent;
}
-static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
+ struct msghdr *msg, size_t len,
+ u32 priority)
{
struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE + 2;
+ int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
struct l2cap_hdr *lh;
- BT_DBG("sk %p len %d", sk, (int)len);
+ BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
count = min_t(unsigned int, (conn->mtu - hlen), len);
skb = bt_skb_send_alloc(sk, count + hlen,
@@ -1481,6 +1519,8 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct
if (!skb)
return ERR_PTR(err);
+ skb->priority = priority;
+
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
@@ -1495,7 +1535,9 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct
return skb;
}
-static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
+ struct msghdr *msg, size_t len,
+ u32 priority)
{
struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
@@ -1511,6 +1553,8 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct ms
if (!skb)
return ERR_PTR(err);
+ skb->priority = priority;
+
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
@@ -1526,12 +1570,12 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct ms
static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
struct msghdr *msg, size_t len,
- u16 control, u16 sdulen)
+ u32 control, u16 sdulen)
{
struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE + 2;
+ int err, count, hlen;
struct l2cap_hdr *lh;
BT_DBG("sk %p len %d", sk, (int)len);
@@ -1539,11 +1583,16 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
if (!conn)
return ERR_PTR(-ENOTCONN);
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ hlen = L2CAP_EXT_HDR_SIZE;
+ else
+ hlen = L2CAP_ENH_HDR_SIZE;
+
if (sdulen)
- hlen += 2;
+ hlen += L2CAP_SDULEN_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16)
- hlen += 2;
+ hlen += L2CAP_FCS_SIZE;
count = min_t(unsigned int, (conn->mtu - hlen), len);
skb = bt_skb_send_alloc(sk, count + hlen,
@@ -1555,9 +1604,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- put_unaligned_le16(control, skb_put(skb, 2));
+
+ __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
+
if (sdulen)
- put_unaligned_le16(sdulen, skb_put(skb, 2));
+ put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
if (unlikely(err < 0)) {
@@ -1566,7 +1617,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
}
if (chan->fcs == L2CAP_FCS_CRC16)
- put_unaligned_le16(0, skb_put(skb, 2));
+ put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
bt_cb(skb)->retries = 0;
return skb;
@@ -1576,11 +1627,11 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si
{
struct sk_buff *skb;
struct sk_buff_head sar_queue;
- u16 control;
+ u32 control;
size_t size = 0;
skb_queue_head_init(&sar_queue);
- control = L2CAP_SDU_START;
+ control = __set_ctrl_sar(chan, L2CAP_SAR_START);
skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1593,10 +1644,10 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si
size_t buflen;
if (len > chan->remote_mps) {
- control = L2CAP_SDU_CONTINUE;
+ control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
buflen = chan->remote_mps;
} else {
- control = L2CAP_SDU_END;
+ control = __set_ctrl_sar(chan, L2CAP_SAR_END);
buflen = len;
}
@@ -1617,15 +1668,16 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si
return size;
}
-int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
+ u32 priority)
{
struct sk_buff *skb;
- u16 control;
+ u32 control;
int err;
/* Connectionless channel */
if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
- skb = l2cap_create_connless_pdu(chan, msg, len);
+ skb = l2cap_create_connless_pdu(chan, msg, len, priority);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1640,7 +1692,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
return -EMSGSIZE;
/* Create a basic PDU */
- skb = l2cap_create_basic_pdu(chan, msg, len);
+ skb = l2cap_create_basic_pdu(chan, msg, len, priority);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1652,7 +1704,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
case L2CAP_MODE_STREAMING:
/* Entire SDU fits into one PDU */
if (len <= chan->remote_mps) {
- control = L2CAP_SDU_UNSEGMENTED;
+ control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
skb = l2cap_create_iframe_pdu(chan, msg, len, control,
0);
if (IS_ERR(skb))
@@ -1850,6 +1902,37 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
*ptr += L2CAP_CONF_OPT_SIZE + len;
}
+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+{
+ struct l2cap_conf_efs efs;
+
+ switch(chan->mode) {
+ case L2CAP_MODE_ERTM:
+ efs.id = chan->local_id;
+ efs.stype = chan->local_stype;
+ efs.msdu = cpu_to_le16(chan->local_msdu);
+ efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
+ efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
+ efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
+ break;
+
+ case L2CAP_MODE_STREAMING:
+ efs.id = 1;
+ efs.stype = L2CAP_SERV_BESTEFFORT;
+ efs.msdu = cpu_to_le16(chan->local_msdu);
+ efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
+ efs.acc_lat = 0;
+ efs.flush_to = 0;
+ break;
+
+ default:
+ return;
+ }
+
+ l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
+ (unsigned long) &efs);
+}
+
static void l2cap_ack_timeout(unsigned long arg)
{
struct l2cap_chan *chan = (void *) arg;
@@ -1896,11 +1979,36 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
}
}
+static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
+{
+ return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
+}
+
+static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
+{
+ return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
+}
+
+static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
+{
+ if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
+ __l2cap_ews_supported(chan)) {
+ /* use extended control field */
+ set_bit(FLAG_EXT_CTRL, &chan->flags);
+ chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+ } else {
+ chan->tx_win = min_t(u16, chan->tx_win,
+ L2CAP_DEFAULT_TX_WINDOW);
+ chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+ }
+}
+
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
{
struct l2cap_conf_req *req = data;
struct l2cap_conf_rfc rfc = { .mode = chan->mode };
void *ptr = req->data;
+ u16 size;
BT_DBG("chan %p", chan);
@@ -1913,6 +2021,9 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
break;
+ if (__l2cap_efs_supported(chan))
+ set_bit(FLAG_EFS_ENABLE, &chan->flags);
+
/* fall through */
default:
chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
@@ -1942,17 +2053,27 @@ done:
case L2CAP_MODE_ERTM:
rfc.mode = L2CAP_MODE_ERTM;
- rfc.txwin_size = chan->tx_win;
rfc.max_transmit = chan->max_tx;
rfc.retrans_timeout = 0;
rfc.monitor_timeout = 0;
- rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
- if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+
+ size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+
+ l2cap_txwin_setup(chan);
+
+ rfc.txwin_size = min_t(u16, chan->tx_win,
+ L2CAP_DEFAULT_TX_WINDOW);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
(unsigned long) &rfc);
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+ l2cap_add_opt_efs(&ptr, chan);
+
if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
break;
@@ -1961,6 +2082,10 @@ done:
chan->fcs = L2CAP_FCS_NONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
}
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+ chan->tx_win);
break;
case L2CAP_MODE_STREAMING:
@@ -1969,13 +2094,19 @@ done:
rfc.max_transmit = 0;
rfc.retrans_timeout = 0;
rfc.monitor_timeout = 0;
- rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
- if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+
+ size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
(unsigned long) &rfc);
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+ l2cap_add_opt_efs(&ptr, chan);
+
if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
break;
@@ -2002,8 +2133,11 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
int type, hint, olen;
unsigned long val;
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+ struct l2cap_conf_efs efs;
+ u8 remote_efs = 0;
u16 mtu = L2CAP_DEFAULT_MTU;
u16 result = L2CAP_CONF_SUCCESS;
+ u16 size;
BT_DBG("chan %p", chan);
@@ -2033,7 +2167,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
case L2CAP_CONF_FCS:
if (val == L2CAP_FCS_NONE)
set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
+ break;
+
+ case L2CAP_CONF_EFS:
+ remote_efs = 1;
+ if (olen == sizeof(efs))
+ memcpy(&efs, (void *) val, olen);
+ break;
+
+ case L2CAP_CONF_EWS:
+ if (!enable_hs)
+ return -ECONNREFUSED;
+ set_bit(FLAG_EXT_CTRL, &chan->flags);
+ set_bit(CONF_EWS_RECV, &chan->conf_state);
+ chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+ chan->remote_tx_win = val;
break;
default:
@@ -2058,6 +2207,13 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
break;
}
+ if (remote_efs) {
+ if (__l2cap_efs_supported(chan))
+ set_bit(FLAG_EFS_ENABLE, &chan->flags);
+ else
+ return -ECONNREFUSED;
+ }
+
if (chan->mode != rfc.mode)
return -ECONNREFUSED;
@@ -2076,7 +2232,6 @@ done:
sizeof(rfc), (unsigned long) &rfc);
}
-
if (result == L2CAP_CONF_SUCCESS) {
/* Configure output options and let the other side know
* which ones we don't like. */
@@ -2089,6 +2244,26 @@ done:
}
l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
+ if (remote_efs) {
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != chan->local_stype) {
+
+ result = L2CAP_CONF_UNACCEPT;
+
+ if (chan->num_conf_req >= 1)
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs),
+ (unsigned long) &efs);
+ } else {
+ /* Send PENDING Conf Rsp */
+ result = L2CAP_CONF_PENDING;
+ set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+ }
+ }
+
switch (rfc.mode) {
case L2CAP_MODE_BASIC:
chan->fcs = L2CAP_FCS_NONE;
@@ -2096,13 +2271,20 @@ done:
break;
case L2CAP_MODE_ERTM:
- chan->remote_tx_win = rfc.txwin_size;
- chan->remote_max_tx = rfc.max_transmit;
+ if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
+ chan->remote_tx_win = rfc.txwin_size;
+ else
+ rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
- if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+ chan->remote_max_tx = rfc.max_transmit;
- chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+ size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
+ chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+ chan->remote_mps = size;
rfc.retrans_timeout =
le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
@@ -2114,13 +2296,29 @@ done:
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ chan->remote_id = efs.id;
+ chan->remote_stype = efs.stype;
+ chan->remote_msdu = le16_to_cpu(efs.msdu);
+ chan->remote_flush_to =
+ le32_to_cpu(efs.flush_to);
+ chan->remote_acc_lat =
+ le32_to_cpu(efs.acc_lat);
+ chan->remote_sdu_itime =
+ le32_to_cpu(efs.sdu_itime);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs), (unsigned long) &efs);
+ }
break;
case L2CAP_MODE_STREAMING:
- if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
-
- chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+ size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
+ chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+ chan->remote_mps = size;
set_bit(CONF_MODE_DONE, &chan->conf_state);
@@ -2153,6 +2351,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
int type, olen;
unsigned long val;
struct l2cap_conf_rfc rfc;
+ struct l2cap_conf_efs efs;
BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
@@ -2188,6 +2387,26 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
break;
+
+ case L2CAP_CONF_EWS:
+ chan->tx_win = min_t(u16, val,
+ L2CAP_DEFAULT_EXT_WINDOW);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+ chan->tx_win);
+ break;
+
+ case L2CAP_CONF_EFS:
+ if (olen == sizeof(efs))
+ memcpy(&efs, (void *)val, olen);
+
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != chan->local_stype)
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs), (unsigned long) &efs);
+ break;
}
}
@@ -2196,13 +2415,23 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
chan->mode = rfc.mode;
- if (*result == L2CAP_CONF_SUCCESS) {
+ if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
chan->mps = le16_to_cpu(rfc.max_pdu_size);
+
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ chan->local_msdu = le16_to_cpu(efs.msdu);
+ chan->local_sdu_itime =
+ le32_to_cpu(efs.sdu_itime);
+ chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
+ chan->local_flush_to =
+ le32_to_cpu(efs.flush_to);
+ }
break;
+
case L2CAP_MODE_STREAMING:
chan->mps = le16_to_cpu(rfc.max_pdu_size);
}
@@ -2330,7 +2559,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
/* Check if the ACL is secure enough (if not SDP) */
if (psm != cpu_to_le16(0x0001) &&
!hci_conn_check_link_mode(conn->hcon)) {
- conn->disc_reason = 0x05;
+ conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
result = L2CAP_CR_SEC_BLOCK;
goto response;
}
@@ -2602,6 +2831,21 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
chan->num_conf_req++;
}
+ /* Got Conf Rsp PENDING from remote side and asume we sent
+ Conf Rsp PENDING in the code above */
+ if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
+ test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
+
+ /* check compatibility */
+
+ clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+ set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+ l2cap_build_conf_rsp(chan, rsp,
+ L2CAP_CONF_SUCCESS, 0x0000), rsp);
+ }
+
unlock:
bh_unlock_sock(sk);
return 0;
@@ -2631,8 +2875,33 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
switch (result) {
case L2CAP_CONF_SUCCESS:
l2cap_conf_rfc_get(chan, rsp->data, len);
+ clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
break;
+ case L2CAP_CONF_PENDING:
+ set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
+
+ if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
+ char buf[64];
+
+ len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+ buf, &result);
+ if (len < 0) {
+ l2cap_send_disconn_req(conn, chan, ECONNRESET);
+ goto done;
+ }
+
+ /* check compatibility */
+
+ clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+ set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+ l2cap_build_conf_rsp(chan, buf,
+ L2CAP_CONF_SUCCESS, 0x0000), buf);
+ }
+ goto done;
+
case L2CAP_CONF_UNACCEPT:
if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
char req[64];
@@ -2782,15 +3051,25 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
if (!disable_ertm)
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
| L2CAP_FEAT_FCS;
+ if (enable_hs)
+ feat_mask |= L2CAP_FEAT_EXT_FLOW
+ | L2CAP_FEAT_EXT_WINDOW;
+
put_unaligned_le32(feat_mask, rsp->data);
l2cap_send_cmd(conn, cmd->ident,
L2CAP_INFO_RSP, sizeof(buf), buf);
} else if (type == L2CAP_IT_FIXED_CHAN) {
u8 buf[12];
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
+
+ if (enable_hs)
+ l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
+ else
+ l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
+
rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
- memcpy(buf + 4, l2cap_fixed_chan, 8);
+ memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
l2cap_send_cmd(conn, cmd->ident,
L2CAP_INFO_RSP, sizeof(buf), buf);
} else {
@@ -2857,6 +3136,165 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
return 0;
}
+static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ void *data)
+{
+ struct l2cap_create_chan_req *req = data;
+ struct l2cap_create_chan_rsp rsp;
+ u16 psm, scid;
+
+ if (cmd_len != sizeof(*req))
+ return -EPROTO;
+
+ if (!enable_hs)
+ return -EINVAL;
+
+ psm = le16_to_cpu(req->psm);
+ scid = le16_to_cpu(req->scid);
+
+ BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
+
+ /* Placeholder: Always reject */
+ rsp.dcid = 0;
+ rsp.scid = cpu_to_le16(scid);
+ rsp.result = L2CAP_CR_NO_MEM;
+ rsp.status = L2CAP_CS_NO_INFO;
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
+ sizeof(rsp), &rsp);
+
+ return 0;
+}
+
+static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, void *data)
+{
+ BT_DBG("conn %p", conn);
+
+ return l2cap_connect_rsp(conn, cmd, data);
+}
+
+static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
+ u16 icid, u16 result)
+{
+ struct l2cap_move_chan_rsp rsp;
+
+ BT_DBG("icid %d, result %d", icid, result);
+
+ rsp.icid = cpu_to_le16(icid);
+ rsp.result = cpu_to_le16(result);
+
+ l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
+}
+
+static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
+ struct l2cap_chan *chan, u16 icid, u16 result)
+{
+ struct l2cap_move_chan_cfm cfm;
+ u8 ident;
+
+ BT_DBG("icid %d, result %d", icid, result);
+
+ ident = l2cap_get_ident(conn);
+ if (chan)
+ chan->ident = ident;
+
+ cfm.icid = cpu_to_le16(icid);
+ cfm.result = cpu_to_le16(result);
+
+ l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
+}
+
+static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
+ u16 icid)
+{
+ struct l2cap_move_chan_cfm_rsp rsp;
+
+ BT_DBG("icid %d", icid);
+
+ rsp.icid = cpu_to_le16(icid);
+ l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
+}
+
+static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_req *req = data;
+ u16 icid = 0;
+ u16 result = L2CAP_MR_NOT_ALLOWED;
+
+ if (cmd_len != sizeof(*req))
+ return -EPROTO;
+
+ icid = le16_to_cpu(req->icid);
+
+ BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
+
+ if (!enable_hs)
+ return -EINVAL;
+
+ /* Placeholder: Always refuse */
+ l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
+
+ return 0;
+}
+
+static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_rsp *rsp = data;
+ u16 icid, result;
+
+ if (cmd_len != sizeof(*rsp))
+ return -EPROTO;
+
+ icid = le16_to_cpu(rsp->icid);
+ result = le16_to_cpu(rsp->result);
+
+ BT_DBG("icid %d, result %d", icid, result);
+
+ /* Placeholder: Always unconfirmed */
+ l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
+
+ return 0;
+}
+
+static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_cfm *cfm = data;
+ u16 icid, result;
+
+ if (cmd_len != sizeof(*cfm))
+ return -EPROTO;
+
+ icid = le16_to_cpu(cfm->icid);
+ result = le16_to_cpu(cfm->result);
+
+ BT_DBG("icid %d, result %d", icid, result);
+
+ l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
+
+ return 0;
+}
+
+static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_cfm_rsp *rsp = data;
+ u16 icid;
+
+ if (cmd_len != sizeof(*rsp))
+ return -EPROTO;
+
+ icid = le16_to_cpu(rsp->icid);
+
+ BT_DBG("icid %d", icid);
+
+ return 0;
+}
+
static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
u16 to_multiplier)
{
@@ -2969,6 +3407,30 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
err = l2cap_information_rsp(conn, cmd, data);
break;
+ case L2CAP_CREATE_CHAN_REQ:
+ err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CREATE_CHAN_RSP:
+ err = l2cap_create_channel_rsp(conn, cmd, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_REQ:
+ err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_RSP:
+ err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_CFM:
+ err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_CFM_RSP:
+ err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
+ break;
+
default:
BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
err = -EINVAL;
@@ -3047,10 +3509,15 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
{
u16 our_fcs, rcv_fcs;
- int hdr_size = L2CAP_HDR_SIZE + 2;
+ int hdr_size;
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ hdr_size = L2CAP_EXT_HDR_SIZE;
+ else
+ hdr_size = L2CAP_ENH_HDR_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16) {
- skb_trim(skb, skb->len - 2);
+ skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
rcv_fcs = get_unaligned_le16(skb->data + skb->len);
our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
@@ -3062,14 +3529,14 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
{
- u16 control = 0;
+ u32 control = 0;
chan->frames_sent = 0;
- control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control |= __set_reqseq(chan, chan->buffer_seq);
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= L2CAP_SUPER_RCV_NOT_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
l2cap_send_sframe(chan, control);
set_bit(CONN_RNR_SENT, &chan->conn_state);
}
@@ -3081,12 +3548,12 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
chan->frames_sent == 0) {
- control |= L2CAP_SUPER_RCV_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
l2cap_send_sframe(chan, control);
}
}
-static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
+static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
{
struct sk_buff *next_skb;
int tx_seq_offset, next_tx_seq_offset;
@@ -3100,18 +3567,14 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
return 0;
}
- tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
- if (tx_seq_offset < 0)
- tx_seq_offset += 64;
+ tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
do {
if (bt_cb(next_skb)->tx_seq == tx_seq)
return -EINVAL;
- next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
- chan->buffer_seq) % 64;
- if (next_tx_seq_offset < 0)
- next_tx_seq_offset += 64;
+ next_tx_seq_offset = __seq_offset(chan,
+ bt_cb(next_skb)->tx_seq, chan->buffer_seq);
if (next_tx_seq_offset > tx_seq_offset) {
__skb_queue_before(&chan->srej_q, next_skb, skb);
@@ -3147,24 +3610,24 @@ static void append_skb_frag(struct sk_buff *skb,
skb->truesize += new_frag->truesize;
}
-static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
+static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
{
int err = -EINVAL;
- switch (control & L2CAP_CTRL_SAR) {
- case L2CAP_SDU_UNSEGMENTED:
+ switch (__get_ctrl_sar(chan, control)) {
+ case L2CAP_SAR_UNSEGMENTED:
if (chan->sdu)
break;
err = chan->ops->recv(chan->data, skb);
break;
- case L2CAP_SDU_START:
+ case L2CAP_SAR_START:
if (chan->sdu)
break;
chan->sdu_len = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
+ skb_pull(skb, L2CAP_SDULEN_SIZE);
if (chan->sdu_len > chan->imtu) {
err = -EMSGSIZE;
@@ -3181,7 +3644,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1
err = 0;
break;
- case L2CAP_SDU_CONTINUE:
+ case L2CAP_SAR_CONTINUE:
if (!chan->sdu)
break;
@@ -3195,7 +3658,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1
err = 0;
break;
- case L2CAP_SDU_END:
+ case L2CAP_SAR_END:
if (!chan->sdu)
break;
@@ -3230,14 +3693,14 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1
static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
{
- u16 control;
+ u32 control;
BT_DBG("chan %p, Enter local busy", chan);
set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
- control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- control |= L2CAP_SUPER_RCV_NOT_READY;
+ control = __set_reqseq(chan, chan->buffer_seq);
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
l2cap_send_sframe(chan, control);
set_bit(CONN_RNR_SENT, &chan->conn_state);
@@ -3247,13 +3710,14 @@ static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
{
- u16 control;
+ u32 control;
if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
goto done;
- control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
+ control = __set_reqseq(chan, chan->buffer_seq);
+ control |= __set_ctrl_poll(chan);
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
l2cap_send_sframe(chan, control);
chan->retry_count = 1;
@@ -3279,10 +3743,10 @@ void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
}
}
-static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
{
struct sk_buff *skb;
- u16 control;
+ u32 control;
while ((skb = skb_peek(&chan->srej_q)) &&
!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
@@ -3292,7 +3756,7 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
break;
skb = skb_dequeue(&chan->srej_q);
- control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
+ control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
err = l2cap_reassemble_sdu(chan, skb, control);
if (err < 0) {
@@ -3300,16 +3764,15 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
break;
}
- chan->buffer_seq_srej =
- (chan->buffer_seq_srej + 1) % 64;
- tx_seq = (tx_seq + 1) % 64;
+ chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
+ tx_seq = __next_seq(chan, tx_seq);
}
}
-static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
{
struct srej_list *l, *tmp;
- u16 control;
+ u32 control;
list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
if (l->tx_seq == tx_seq) {
@@ -3317,45 +3780,48 @@ static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
kfree(l);
return;
}
- control = L2CAP_SUPER_SELECT_REJECT;
- control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
+ control |= __set_reqseq(chan, l->tx_seq);
l2cap_send_sframe(chan, control);
list_del(&l->list);
list_add_tail(&l->list, &chan->srej_l);
}
}
-static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
{
struct srej_list *new;
- u16 control;
+ u32 control;
while (tx_seq != chan->expected_tx_seq) {
- control = L2CAP_SUPER_SELECT_REJECT;
- control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
+ control |= __set_reqseq(chan, chan->expected_tx_seq);
l2cap_send_sframe(chan, control);
new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
new->tx_seq = chan->expected_tx_seq;
- chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
+
+ chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
+
list_add_tail(&new->list, &chan->srej_l);
}
- chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
+
+ chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
}
-static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
+static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
{
- u8 tx_seq = __get_txseq(rx_control);
- u8 req_seq = __get_reqseq(rx_control);
- u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
+ u16 tx_seq = __get_txseq(chan, rx_control);
+ u16 req_seq = __get_reqseq(chan, rx_control);
+ u8 sar = __get_ctrl_sar(chan, rx_control);
int tx_seq_offset, expected_tx_seq_offset;
int num_to_ack = (chan->tx_win/6) + 1;
int err = 0;
- BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
+ BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
tx_seq, rx_control);
- if (L2CAP_CTRL_FINAL & rx_control &&
+ if (__is_ctrl_final(chan, rx_control) &&
test_bit(CONN_WAIT_F, &chan->conn_state)) {
__clear_monitor_timer(chan);
if (chan->unacked_frames > 0)
@@ -3366,9 +3832,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
chan->expected_ack_seq = req_seq;
l2cap_drop_acked_frames(chan);
- tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
- if (tx_seq_offset < 0)
- tx_seq_offset += 64;
+ tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
/* invalid tx_seq */
if (tx_seq_offset >= chan->tx_win) {
@@ -3416,10 +3880,8 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
l2cap_send_srejframe(chan, tx_seq);
}
} else {
- expected_tx_seq_offset =
- (chan->expected_tx_seq - chan->buffer_seq) % 64;
- if (expected_tx_seq_offset < 0)
- expected_tx_seq_offset += 64;
+ expected_tx_seq_offset = __seq_offset(chan,
+ chan->expected_tx_seq, chan->buffer_seq);
/* duplicated tx_seq */
if (tx_seq_offset < expected_tx_seq_offset)
@@ -3444,7 +3906,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
return 0;
expected:
- chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
+ chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
bt_cb(skb)->tx_seq = tx_seq;
@@ -3454,13 +3916,14 @@ expected:
}
err = l2cap_reassemble_sdu(chan, skb, rx_control);
- chan->buffer_seq = (chan->buffer_seq + 1) % 64;
+ chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
+
if (err < 0) {
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
return err;
}
- if (rx_control & L2CAP_CTRL_FINAL) {
+ if (__is_ctrl_final(chan, rx_control)) {
if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
l2cap_retransmit_frames(chan);
}
@@ -3478,15 +3941,15 @@ drop:
return 0;
}
-static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
{
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
- rx_control);
+ BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
+ __get_reqseq(chan, rx_control), rx_control);
- chan->expected_ack_seq = __get_reqseq(rx_control);
+ chan->expected_ack_seq = __get_reqseq(chan, rx_control);
l2cap_drop_acked_frames(chan);
- if (rx_control & L2CAP_CTRL_POLL) {
+ if (__is_ctrl_poll(chan, rx_control)) {
set_bit(CONN_SEND_FBIT, &chan->conn_state);
if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
@@ -3499,7 +3962,7 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co
l2cap_send_i_or_rr_or_rnr(chan);
}
- } else if (rx_control & L2CAP_CTRL_FINAL) {
+ } else if (__is_ctrl_final(chan, rx_control)) {
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
@@ -3518,18 +3981,18 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co
}
}
-static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
{
- u8 tx_seq = __get_reqseq(rx_control);
+ u16 tx_seq = __get_reqseq(chan, rx_control);
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
+ BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
chan->expected_ack_seq = tx_seq;
l2cap_drop_acked_frames(chan);
- if (rx_control & L2CAP_CTRL_FINAL) {
+ if (__is_ctrl_final(chan, rx_control)) {
if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
l2cap_retransmit_frames(chan);
} else {
@@ -3539,15 +4002,15 @@ static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_c
set_bit(CONN_REJ_ACT, &chan->conn_state);
}
}
-static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
{
- u8 tx_seq = __get_reqseq(rx_control);
+ u16 tx_seq = __get_reqseq(chan, rx_control);
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
+ BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- if (rx_control & L2CAP_CTRL_POLL) {
+ if (__is_ctrl_poll(chan, rx_control)) {
chan->expected_ack_seq = tx_seq;
l2cap_drop_acked_frames(chan);
@@ -3560,7 +4023,7 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_
chan->srej_save_reqseq = tx_seq;
set_bit(CONN_SREJ_ACT, &chan->conn_state);
}
- } else if (rx_control & L2CAP_CTRL_FINAL) {
+ } else if (__is_ctrl_final(chan, rx_control)) {
if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
chan->srej_save_reqseq == tx_seq)
clear_bit(CONN_SREJ_ACT, &chan->conn_state);
@@ -3575,37 +4038,39 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_
}
}
-static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
{
- u8 tx_seq = __get_reqseq(rx_control);
+ u16 tx_seq = __get_reqseq(chan, rx_control);
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
+ BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
chan->expected_ack_seq = tx_seq;
l2cap_drop_acked_frames(chan);
- if (rx_control & L2CAP_CTRL_POLL)
+ if (__is_ctrl_poll(chan, rx_control))
set_bit(CONN_SEND_FBIT, &chan->conn_state);
if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
__clear_retrans_timer(chan);
- if (rx_control & L2CAP_CTRL_POLL)
+ if (__is_ctrl_poll(chan, rx_control))
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
return;
}
- if (rx_control & L2CAP_CTRL_POLL)
+ if (__is_ctrl_poll(chan, rx_control)) {
l2cap_send_srejtail(chan);
- else
- l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
+ } else {
+ rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
+ l2cap_send_sframe(chan, rx_control);
+ }
}
-static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
+static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
{
- BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
+ BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
- if (L2CAP_CTRL_FINAL & rx_control &&
+ if (__is_ctrl_final(chan, rx_control) &&
test_bit(CONN_WAIT_F, &chan->conn_state)) {
__clear_monitor_timer(chan);
if (chan->unacked_frames > 0)
@@ -3613,20 +4078,20 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont
clear_bit(CONN_WAIT_F, &chan->conn_state);
}
- switch (rx_control & L2CAP_CTRL_SUPERVISE) {
- case L2CAP_SUPER_RCV_READY:
+ switch (__get_ctrl_super(chan, rx_control)) {
+ case L2CAP_SUPER_RR:
l2cap_data_channel_rrframe(chan, rx_control);
break;
- case L2CAP_SUPER_REJECT:
+ case L2CAP_SUPER_REJ:
l2cap_data_channel_rejframe(chan, rx_control);
break;
- case L2CAP_SUPER_SELECT_REJECT:
+ case L2CAP_SUPER_SREJ:
l2cap_data_channel_srejframe(chan, rx_control);
break;
- case L2CAP_SUPER_RCV_NOT_READY:
+ case L2CAP_SUPER_RNR:
l2cap_data_channel_rnrframe(chan, rx_control);
break;
}
@@ -3638,12 +4103,12 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont
static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
{
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
- u16 control;
- u8 req_seq;
+ u32 control;
+ u16 req_seq;
int len, next_tx_seq_offset, req_seq_offset;
- control = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
+ control = __get_control(chan, skb->data);
+ skb_pull(skb, __ctrl_size(chan));
len = skb->len;
/*
@@ -3654,26 +4119,23 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
if (l2cap_check_fcs(chan, skb))
goto drop;
- if (__is_sar_start(control) && __is_iframe(control))
- len -= 2;
+ if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
+ len -= L2CAP_SDULEN_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16)
- len -= 2;
+ len -= L2CAP_FCS_SIZE;
if (len > chan->mps) {
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
goto drop;
}
- req_seq = __get_reqseq(control);
- req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
- if (req_seq_offset < 0)
- req_seq_offset += 64;
+ req_seq = __get_reqseq(chan, control);
- next_tx_seq_offset =
- (chan->next_tx_seq - chan->expected_ack_seq) % 64;
- if (next_tx_seq_offset < 0)
- next_tx_seq_offset += 64;
+ req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
+
+ next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
+ chan->expected_ack_seq);
/* check for invalid req-seq */
if (req_seq_offset > next_tx_seq_offset) {
@@ -3681,7 +4143,7 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- if (__is_iframe(control)) {
+ if (!__is_sframe(chan, control)) {
if (len < 0) {
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
goto drop;
@@ -3709,8 +4171,8 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
{
struct l2cap_chan *chan;
struct sock *sk = NULL;
- u16 control;
- u8 tx_seq;
+ u32 control;
+ u16 tx_seq;
int len;
chan = l2cap_get_chan_by_scid(conn, cid);
@@ -3751,23 +4213,23 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
goto done;
case L2CAP_MODE_STREAMING:
- control = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
+ control = __get_control(chan, skb->data);
+ skb_pull(skb, __ctrl_size(chan));
len = skb->len;
if (l2cap_check_fcs(chan, skb))
goto drop;
- if (__is_sar_start(control))
- len -= 2;
+ if (__is_sar_start(chan, control))
+ len -= L2CAP_SDULEN_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16)
- len -= 2;
+ len -= L2CAP_FCS_SIZE;
- if (len > chan->mps || len < 0 || __is_sframe(control))
+ if (len > chan->mps || len < 0 || __is_sframe(chan, control))
goto drop;
- tx_seq = __get_txseq(control);
+ tx_seq = __get_txseq(chan, control);
if (chan->expected_tx_seq != tx_seq) {
/* Frame(s) missing - must discard partial SDU */
@@ -3779,7 +4241,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
/* TODO: Notify userland of missing data */
}
- chan->expected_tx_seq = (tx_seq + 1) % 64;
+ chan->expected_tx_seq = __next_seq(chan, tx_seq);
if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
@@ -3933,12 +4395,12 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
lm1 |= HCI_LM_ACCEPT;
- if (c->role_switch)
+ if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
lm1 |= HCI_LM_MASTER;
exact++;
} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
lm2 |= HCI_LM_ACCEPT;
- if (c->role_switch)
+ if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
lm2 |= HCI_LM_MASTER;
}
}
@@ -3973,7 +4435,7 @@ static int l2cap_disconn_ind(struct hci_conn *hcon)
BT_DBG("hcon %p", hcon);
if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
- return 0x13;
+ return HCI_ERROR_REMOTE_USER_TERM;
return conn->disc_reason;
}
@@ -4306,3 +4768,6 @@ void l2cap_exit(void)
module_param(disable_ertm, bool, 0644);
MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
+
+module_param(enable_hs, bool, 0644);
+MODULE_PARM_DESC(enable_hs, "Enable High Speed");
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 5c406d3136f..e2e785c7463 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -334,7 +334,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
opts.mode = chan->mode;
opts.fcs = chan->fcs;
opts.max_tx = chan->max_tx;
- opts.txwin_size = (__u16)chan->tx_win;
+ opts.txwin_size = chan->tx_win;
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
@@ -359,10 +359,10 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
break;
}
- if (chan->role_switch)
+ if (test_bit(FLAG_ROLE_SWITCH, &chan->flags))
opt |= L2CAP_LM_MASTER;
- if (chan->force_reliable)
+ if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
opt |= L2CAP_LM_RELIABLE;
if (put_user(opt, (u32 __user *) optval))
@@ -449,7 +449,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
break;
case BT_FLUSHABLE:
- if (put_user(chan->flushable, (u32 __user *) optval))
+ if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags),
+ (u32 __user *) optval))
err = -EFAULT;
break;
@@ -461,7 +462,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
break;
}
- pwr.force_active = chan->force_active;
+ pwr.force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
len = min_t(unsigned int, len, sizeof(pwr));
if (copy_to_user(optval, (char *) &pwr, len))
@@ -469,6 +470,16 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
break;
+ case BT_CHANNEL_POLICY:
+ if (!enable_hs) {
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ if (put_user(chan->chan_policy, (u32 __user *) optval))
+ err = -EFAULT;
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -503,7 +514,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
opts.mode = chan->mode;
opts.fcs = chan->fcs;
opts.max_tx = chan->max_tx;
- opts.txwin_size = (__u16)chan->tx_win;
+ opts.txwin_size = chan->tx_win;
len = min_t(unsigned int, sizeof(opts), optlen);
if (copy_from_user((char *) &opts, optval, len)) {
@@ -511,7 +522,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
break;
}
- if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
+ if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) {
err = -EINVAL;
break;
}
@@ -535,7 +546,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
chan->omtu = opts.omtu;
chan->fcs = opts.fcs;
chan->max_tx = opts.max_tx;
- chan->tx_win = (__u8)opts.txwin_size;
+ chan->tx_win = opts.txwin_size;
break;
case L2CAP_LM:
@@ -551,8 +562,15 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
if (opt & L2CAP_LM_SECURE)
chan->sec_level = BT_SECURITY_HIGH;
- chan->role_switch = (opt & L2CAP_LM_MASTER);
- chan->force_reliable = (opt & L2CAP_LM_RELIABLE);
+ if (opt & L2CAP_LM_MASTER)
+ set_bit(FLAG_ROLE_SWITCH, &chan->flags);
+ else
+ clear_bit(FLAG_ROLE_SWITCH, &chan->flags);
+
+ if (opt & L2CAP_LM_RELIABLE)
+ set_bit(FLAG_FORCE_RELIABLE, &chan->flags);
+ else
+ clear_bit(FLAG_FORCE_RELIABLE, &chan->flags);
break;
default:
@@ -658,7 +676,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
}
}
- chan->flushable = opt;
+ if (opt)
+ set_bit(FLAG_FLUSHABLE, &chan->flags);
+ else
+ clear_bit(FLAG_FLUSHABLE, &chan->flags);
break;
case BT_POWER:
@@ -675,7 +696,36 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
err = -EFAULT;
break;
}
- chan->force_active = pwr.force_active;
+
+ if (pwr.force_active)
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ else
+ clear_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ break;
+
+ case BT_CHANNEL_POLICY:
+ if (!enable_hs) {
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (chan->mode != L2CAP_MODE_ERTM &&
+ chan->mode != L2CAP_MODE_STREAMING) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ chan->chan_policy = (u8) opt;
break;
default:
@@ -709,7 +759,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
return -ENOTCONN;
}
- err = l2cap_chan_send(chan, msg, len);
+ err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
release_sock(sk);
return err;
@@ -931,11 +981,9 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
chan->fcs = pchan->fcs;
chan->max_tx = pchan->max_tx;
chan->tx_win = pchan->tx_win;
+ chan->tx_win_max = pchan->tx_win_max;
chan->sec_level = pchan->sec_level;
- chan->role_switch = pchan->role_switch;
- chan->force_reliable = pchan->force_reliable;
- chan->flushable = pchan->flushable;
- chan->force_active = pchan->force_active;
+ chan->flags = pchan->flags;
security_sk_clone(parent, sk);
} else {
@@ -964,12 +1012,10 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
chan->max_tx = L2CAP_DEFAULT_MAX_TX;
chan->fcs = L2CAP_FCS_CRC16;
chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+ chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
chan->sec_level = BT_SECURITY_LOW;
- chan->role_switch = 0;
- chan->force_reliable = 0;
- chan->flushable = BT_FLUSHABLE_OFF;
- chan->force_active = BT_POWER_FORCE_ACTIVE_ON;
-
+ chan->flags = 0;
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
}
/* Default config options */
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 2c763429686..94739d3c4f5 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -33,22 +33,23 @@
#define MGMT_VERSION 0
#define MGMT_REVISION 1
+#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
+
struct pending_cmd {
struct list_head list;
- __u16 opcode;
+ u16 opcode;
int index;
void *param;
struct sock *sk;
void *user_data;
};
-static LIST_HEAD(cmd_list);
-
static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_status *ev;
+ int err;
BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
@@ -66,10 +67,11 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
ev->status = status;
put_unaligned_le16(cmd, &ev->opcode);
- if (sock_queue_rcv_skb(sk, skb) < 0)
+ err = sock_queue_rcv_skb(sk, skb);
+ if (err < 0)
kfree_skb(skb);
- return 0;
+ return err;
}
static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
@@ -78,6 +80,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
struct sk_buff *skb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_complete *ev;
+ int err;
BT_DBG("sock %p", sk);
@@ -97,10 +100,11 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
if (rp)
memcpy(ev->data, rp, rp_len);
- if (sock_queue_rcv_skb(sk, skb) < 0)
+ err = sock_queue_rcv_skb(sk, skb);
+ if (err < 0)
kfree_skb(skb);
- return 0;
+ return err;;
}
static int read_version(struct sock *sk)
@@ -120,6 +124,7 @@ static int read_index_list(struct sock *sk)
{
struct mgmt_rp_read_index_list *rp;
struct list_head *p;
+ struct hci_dev *d;
size_t rp_len;
u16 count;
int i, err;
@@ -143,10 +148,9 @@ static int read_index_list(struct sock *sk)
put_unaligned_le16(count, &rp->num_controllers);
i = 0;
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
-
- hci_del_off_timer(d);
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if (test_and_clear_bit(HCI_AUTO_OFF, &d->flags))
+ cancel_delayed_work(&d->power_off);
if (test_bit(HCI_SETUP, &d->flags))
continue;
@@ -176,7 +180,8 @@ static int read_controller_info(struct sock *sk, u16 index)
if (!hdev)
return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV);
- hci_del_off_timer(hdev);
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+ cancel_delayed_work_sync(&hdev->power_off);
hci_dev_lock_bh(hdev);
@@ -221,7 +226,8 @@ static void mgmt_pending_free(struct pending_cmd *cmd)
}
static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
- u16 index, void *data, u16 len)
+ struct hci_dev *hdev,
+ void *data, u16 len)
{
struct pending_cmd *cmd;
@@ -230,7 +236,7 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
return NULL;
cmd->opcode = opcode;
- cmd->index = index;
+ cmd->index = hdev->id;
cmd->param = kmalloc(len, GFP_ATOMIC);
if (!cmd->param) {
@@ -244,48 +250,36 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
cmd->sk = sk;
sock_hold(sk);
- list_add(&cmd->list, &cmd_list);
+ list_add(&cmd->list, &hdev->mgmt_pending);
return cmd;
}
-static void mgmt_pending_foreach(u16 opcode, int index,
+static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
void (*cb)(struct pending_cmd *cmd, void *data),
void *data)
{
struct list_head *p, *n;
- list_for_each_safe(p, n, &cmd_list) {
+ list_for_each_safe(p, n, &hdev->mgmt_pending) {
struct pending_cmd *cmd;
cmd = list_entry(p, struct pending_cmd, list);
- if (cmd->opcode != opcode)
- continue;
-
- if (index >= 0 && cmd->index != index)
+ if (opcode > 0 && cmd->opcode != opcode)
continue;
cb(cmd, data);
}
}
-static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
+static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
{
- struct list_head *p;
-
- list_for_each(p, &cmd_list) {
- struct pending_cmd *cmd;
-
- cmd = list_entry(p, struct pending_cmd, list);
-
- if (cmd->opcode != opcode)
- continue;
-
- if (index >= 0 && cmd->index != index)
- continue;
+ struct pending_cmd *cmd;
- return cmd;
+ list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+ if (cmd->opcode == opcode)
+ return cmd;
}
return NULL;
@@ -323,12 +317,12 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) {
+ if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -337,7 +331,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
if (cp->val)
queue_work(hdev->workqueue, &hdev->power_on);
else
- queue_work(hdev->workqueue, &hdev->power_off);
+ queue_work(hdev->workqueue, &hdev->power_off.work);
err = 0;
@@ -350,7 +344,7 @@ failed:
static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
- struct mgmt_mode *cp;
+ struct mgmt_cp_set_discoverable *cp;
struct hci_dev *hdev;
struct pending_cmd *cmd;
u8 scan;
@@ -374,8 +368,8 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY);
goto failed;
}
@@ -386,7 +380,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -396,11 +390,16 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
if (cp->val)
scan |= SCAN_INQUIRY;
+ else
+ cancel_delayed_work(&hdev->discov_off);
err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
if (err < 0)
mgmt_pending_remove(cmd);
+ if (cp->val)
+ hdev->discov_timeout = get_unaligned_le16(&cp->timeout);
+
failed:
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
@@ -435,8 +434,8 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY);
goto failed;
}
@@ -446,7 +445,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -468,8 +467,8 @@ failed:
return err;
}
-static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
- struct sock *skip_sk)
+static int mgmt_event(u16 event, struct hci_dev *hdev, void *data,
+ u16 data_len, struct sock *skip_sk)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
@@ -482,7 +481,10 @@ static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
hdr = (void *) skb_put(skb, sizeof(*hdr));
hdr->opcode = cpu_to_le16(event);
- hdr->index = cpu_to_le16(index);
+ if (hdev)
+ hdr->index = cpu_to_le16(hdev->id);
+ else
+ hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
hdr->len = cpu_to_le16(data_len);
if (data)
@@ -534,7 +536,7 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
ev.val = cp->val;
- err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
+ err = mgmt_event(MGMT_EV_PAIRABLE, hdev, &ev, sizeof(ev), sk);
failed:
hci_dev_unlock_bh(hdev);
@@ -587,7 +589,7 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
u16 eir_len = 0;
u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
int i, truncated = 0;
- struct list_head *p;
+ struct bt_uuid *uuid;
size_t name_len;
name_len = strlen(hdev->dev_name);
@@ -612,8 +614,7 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
memset(uuid16_list, 0, sizeof(uuid16_list));
/* Group all UUID16 types */
- list_for_each(p, &hdev->uuids) {
- struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
+ list_for_each_entry(uuid, &hdev->uuids, list) {
u16 uuid16;
uuid16 = get_uuid16(uuid->uuid);
@@ -689,14 +690,11 @@ static int update_eir(struct hci_dev *hdev)
static u8 get_service_classes(struct hci_dev *hdev)
{
- struct list_head *p;
+ struct bt_uuid *uuid;
u8 val = 0;
- list_for_each(p, &hdev->uuids) {
- struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
-
+ list_for_each_entry(uuid, &hdev->uuids, list)
val |= uuid->svc_hint;
- }
return val;
}
@@ -895,6 +893,9 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
if (err == 0)
err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
0);
+ else
+ cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, -err);
+
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
@@ -902,30 +903,32 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
return err;
}
-static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static int load_link_keys(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
{
struct hci_dev *hdev;
- struct mgmt_cp_load_keys *cp;
+ struct mgmt_cp_load_link_keys *cp;
u16 key_count, expected_len;
int i;
cp = (void *) data;
if (len < sizeof(*cp))
- return -EINVAL;
+ return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, EINVAL);
key_count = get_unaligned_le16(&cp->key_count);
- expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
+ expected_len = sizeof(*cp) + key_count *
+ sizeof(struct mgmt_link_key_info);
if (expected_len != len) {
- BT_ERR("load_keys: expected %u bytes, got %u bytes",
+ BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
len, expected_len);
- return -EINVAL;
+ return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, EINVAL);
}
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, ENODEV);
BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
key_count);
@@ -942,7 +945,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
for (i = 0; i < key_count; i++) {
- struct mgmt_key_info *key = &cp->keys[i];
+ struct mgmt_link_key_info *key = &cp->keys[i];
hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
key->pin_len);
@@ -954,27 +957,28 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
return 0;
}
-static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static int remove_keys(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
{
struct hci_dev *hdev;
- struct mgmt_cp_remove_key *cp;
+ struct mgmt_cp_remove_keys *cp;
struct hci_conn *conn;
int err;
cp = (void *) data;
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, EINVAL);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, ENODEV);
hci_dev_lock_bh(hdev);
err = hci_remove_link_key(hdev, &cp->bdaddr);
if (err < 0) {
- err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err);
+ err = cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, -err);
goto unlock;
}
@@ -1026,7 +1030,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) {
+ if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY);
goto failed;
}
@@ -1040,7 +1044,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1060,10 +1064,23 @@ failed:
return err;
}
+static u8 link_to_mgmt(u8 link_type)
+{
+ switch (link_type) {
+ case LE_LINK:
+ return MGMT_ADDR_LE;
+ case ACL_LINK:
+ return MGMT_ADDR_BREDR;
+ default:
+ return MGMT_ADDR_INVALID;
+ }
+}
+
static int get_connections(struct sock *sk, u16 index)
{
struct mgmt_rp_get_connections *rp;
struct hci_dev *hdev;
+ struct hci_conn *c;
struct list_head *p;
size_t rp_len;
u16 count;
@@ -1082,7 +1099,7 @@ static int get_connections(struct sock *sk, u16 index)
count++;
}
- rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
+ rp_len = sizeof(*rp) + (count * sizeof(struct mgmt_addr_info));
rp = kmalloc(rp_len, GFP_ATOMIC);
if (!rp) {
err = -ENOMEM;
@@ -1092,12 +1109,17 @@ static int get_connections(struct sock *sk, u16 index)
put_unaligned_le16(count, &rp->conn_count);
i = 0;
- list_for_each(p, &hdev->conn_hash.list) {
- struct hci_conn *c = list_entry(p, struct hci_conn, list);
-
- bacpy(&rp->conn[i++], &c->dst);
+ list_for_each_entry(c, &hdev->conn_hash.list, list) {
+ bacpy(&rp->addr[i].bdaddr, &c->dst);
+ rp->addr[i].type = link_to_mgmt(c->type);
+ if (rp->addr[i].type == MGMT_ADDR_INVALID)
+ continue;
+ i++;
}
+ /* Recalculate length in case of filtered SCO connections, etc */
+ rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
+
err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
unlock:
@@ -1113,7 +1135,7 @@ static int send_pin_code_neg_reply(struct sock *sk, u16 index,
struct pending_cmd *cmd;
int err;
- cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp,
+ cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
sizeof(*cp));
if (!cmd)
return -ENOMEM;
@@ -1174,7 +1196,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1265,19 +1287,12 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- struct list_head *p;
-
- list_for_each(p, &cmd_list) {
- struct pending_cmd *cmd;
-
- cmd = list_entry(p, struct pending_cmd, list);
+ struct pending_cmd *cmd;
+ list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
continue;
- if (cmd->index != hdev->id)
- continue;
-
if (cmd->user_data != conn)
continue;
@@ -1310,16 +1325,19 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
static void pairing_complete_cb(struct hci_conn *conn, u8 status)
{
struct pending_cmd *cmd;
+ struct hci_dev *hdev = conn->hdev;
BT_DBG("status %u", status);
+ hci_dev_lock_bh(hdev);
+
cmd = find_pairing(conn);
- if (!cmd) {
+ if (!cmd)
BT_DBG("Unable to find a pending command");
- return;
- }
+ else
+ pairing_complete(cmd, status);
- pairing_complete(cmd, status);
+ hci_dev_unlock_bh(hdev);
}
static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
@@ -1370,7 +1388,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
hci_conn_put(conn);
@@ -1432,7 +1450,7 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
goto failed;
}
- cmd = mgmt_pending_add(sk, mgmt_op, index, data, len);
+ cmd = mgmt_pending_add(sk, mgmt_op, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1469,7 +1487,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
hci_dev_lock_bh(hdev);
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1515,12 +1533,12 @@ static int read_local_oob_data(struct sock *sk, u16 index)
goto unlock;
}
- if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) {
+ if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY);
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0);
+ cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
if (!cmd) {
err = -ENOMEM;
goto unlock;
@@ -1607,8 +1625,6 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
static int start_discovery(struct sock *sk, u16 index)
{
- u8 lap[3] = { 0x33, 0x8b, 0x9e };
- struct hci_cp_inquiry cp;
struct pending_cmd *cmd;
struct hci_dev *hdev;
int err;
@@ -1621,18 +1637,18 @@ static int start_discovery(struct sock *sk, u16 index)
hci_dev_lock_bh(hdev);
- cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0);
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENETDOWN);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- memset(&cp, 0, sizeof(cp));
- memcpy(&cp.lap, lap, 3);
- cp.length = 0x08;
- cp.num_rsp = 0x00;
-
- err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+ err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
if (err < 0)
mgmt_pending_remove(cmd);
@@ -1657,13 +1673,13 @@ static int stop_discovery(struct sock *sk, u16 index)
hci_dev_lock_bh(hdev);
- cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0);
+ cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+ err = hci_cancel_inquiry(hdev);
if (err < 0)
mgmt_pending_remove(cmd);
@@ -1678,7 +1694,6 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
struct hci_dev *hdev;
- struct pending_cmd *cmd;
struct mgmt_cp_block_device *cp = (void *) data;
int err;
@@ -1695,23 +1710,13 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
hci_dev_lock_bh(hdev);
- cmd = mgmt_pending_add(sk, MGMT_OP_BLOCK_DEVICE, index, NULL, 0);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
err = hci_blacklist_add(hdev, &cp->bdaddr);
-
if (err < 0)
err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err);
else
err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
NULL, 0);
- mgmt_pending_remove(cmd);
-
-failed:
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
@@ -1722,7 +1727,6 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
struct hci_dev *hdev;
- struct pending_cmd *cmd;
struct mgmt_cp_unblock_device *cp = (void *) data;
int err;
@@ -1739,12 +1743,6 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
hci_dev_lock_bh(hdev);
- cmd = mgmt_pending_add(sk, MGMT_OP_UNBLOCK_DEVICE, index, NULL, 0);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
-
err = hci_blacklist_del(hdev, &cp->bdaddr);
if (err < 0)
@@ -1753,9 +1751,6 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
NULL, 0);
- mgmt_pending_remove(cmd);
-
-failed:
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
@@ -1883,11 +1878,11 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
case MGMT_OP_SET_SERVICE_CACHE:
err = set_service_cache(sk, index, buf + sizeof(*hdr), len);
break;
- case MGMT_OP_LOAD_KEYS:
- err = load_keys(sk, index, buf + sizeof(*hdr), len);
+ case MGMT_OP_LOAD_LINK_KEYS:
+ err = load_link_keys(sk, index, buf + sizeof(*hdr), len);
break;
- case MGMT_OP_REMOVE_KEY:
- err = remove_key(sk, index, buf + sizeof(*hdr), len);
+ case MGMT_OP_REMOVE_KEYS:
+ err = remove_keys(sk, index, buf + sizeof(*hdr), len);
break;
case MGMT_OP_DISCONNECT:
err = disconnect(sk, index, buf + sizeof(*hdr), len);
@@ -1958,14 +1953,26 @@ done:
return err;
}
-int mgmt_index_added(u16 index)
+static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
{
- return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL);
+ u8 *status = data;
+
+ cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
+ mgmt_pending_remove(cmd);
}
-int mgmt_index_removed(u16 index)
+int mgmt_index_added(struct hci_dev *hdev)
{
- return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL);
+ return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
+}
+
+int mgmt_index_removed(struct hci_dev *hdev)
+{
+ u8 status = ENODEV;
+
+ mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
+
+ return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
}
struct cmd_lookup {
@@ -1993,17 +2000,22 @@ static void mode_rsp(struct pending_cmd *cmd, void *data)
mgmt_pending_free(cmd);
}
-int mgmt_powered(u16 index, u8 powered)
+int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
struct mgmt_mode ev;
struct cmd_lookup match = { powered, NULL };
int ret;
- mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
+ mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, mode_rsp, &match);
+
+ if (!powered) {
+ u8 status = ENETDOWN;
+ mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
+ }
ev.val = powered;
- ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk);
+ ret = mgmt_event(MGMT_EV_POWERED, hdev, &ev, sizeof(ev), match.sk);
if (match.sk)
sock_put(match.sk);
@@ -2011,17 +2023,17 @@ int mgmt_powered(u16 index, u8 powered)
return ret;
}
-int mgmt_discoverable(u16 index, u8 discoverable)
+int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
{
struct mgmt_mode ev;
struct cmd_lookup match = { discoverable, NULL };
int ret;
- mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match);
+ mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, mode_rsp, &match);
ev.val = discoverable;
- ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev),
+ ret = mgmt_event(MGMT_EV_DISCOVERABLE, hdev, &ev, sizeof(ev),
match.sk);
if (match.sk)
@@ -2030,17 +2042,17 @@ int mgmt_discoverable(u16 index, u8 discoverable)
return ret;
}
-int mgmt_connectable(u16 index, u8 connectable)
+int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
{
struct mgmt_mode ev;
struct cmd_lookup match = { connectable, NULL };
int ret;
- mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
+ mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, mode_rsp, &match);
ev.val = connectable;
- ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk);
+ ret = mgmt_event(MGMT_EV_CONNECTABLE, hdev, &ev, sizeof(ev), match.sk);
if (match.sk)
sock_put(match.sk);
@@ -2048,9 +2060,23 @@ int mgmt_connectable(u16 index, u8 connectable)
return ret;
}
-int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
+int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
+{
+ if (scan & SCAN_PAGE)
+ mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
+ cmd_status_rsp, &status);
+
+ if (scan & SCAN_INQUIRY)
+ mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
+ cmd_status_rsp, &status);
+
+ return 0;
+}
+
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ u8 persistent)
{
- struct mgmt_ev_new_key ev;
+ struct mgmt_ev_new_link_key ev;
memset(&ev, 0, sizeof(ev));
@@ -2060,17 +2086,17 @@ int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
memcpy(ev.key.val, key->val, 16);
ev.key.pin_len = key->pin_len;
- return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type)
+int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type)
{
- struct mgmt_ev_connected ev;
+ struct mgmt_addr_info ev;
bacpy(&ev.bdaddr, bdaddr);
- ev.link_type = link_type;
+ ev.type = link_to_mgmt(link_type);
- return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_CONNECTED, hdev, &ev, sizeof(ev), NULL);
}
static void disconnect_rsp(struct pending_cmd *cmd, void *data)
@@ -2089,17 +2115,18 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
mgmt_pending_remove(cmd);
}
-int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
+int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
- struct mgmt_ev_disconnected ev;
+ struct mgmt_addr_info ev;
struct sock *sk = NULL;
int err;
- mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
+ mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
bacpy(&ev.bdaddr, bdaddr);
+ ev.type = link_to_mgmt(type);
- err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk);
+ err = mgmt_event(MGMT_EV_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
if (sk)
sock_put(sk);
@@ -2107,57 +2134,60 @@ int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
return err;
}
-int mgmt_disconnect_failed(u16 index)
+int mgmt_disconnect_failed(struct hci_dev *hdev)
{
struct pending_cmd *cmd;
int err;
- cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
+ cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
if (!cmd)
return -ENOENT;
- err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO);
+ err = cmd_status(cmd->sk, hdev->id, MGMT_OP_DISCONNECT, EIO);
mgmt_pending_remove(cmd);
return err;
}
-int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type,
+ u8 status)
{
struct mgmt_ev_connect_failed ev;
- bacpy(&ev.bdaddr, bdaddr);
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_mgmt(type);
ev.status = status;
- return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure)
+int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
{
struct mgmt_ev_pin_code_request ev;
bacpy(&ev.bdaddr, bdaddr);
ev.secure = secure;
- return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
+ return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
NULL);
}
-int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
{
struct pending_cmd *cmd;
struct mgmt_rp_pin_code_reply rp;
int err;
- cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
if (!cmd)
return -ENOENT;
bacpy(&rp.bdaddr, bdaddr);
rp.status = status;
- err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp,
+ err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, &rp,
sizeof(rp));
mgmt_pending_remove(cmd);
@@ -2165,20 +2195,21 @@ int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
return err;
}
-int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
{
struct pending_cmd *cmd;
struct mgmt_rp_pin_code_reply rp;
int err;
- cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
if (!cmd)
return -ENOENT;
bacpy(&rp.bdaddr, bdaddr);
rp.status = status;
- err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
+ err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
sizeof(rp));
mgmt_pending_remove(cmd);
@@ -2186,97 +2217,93 @@ int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
return err;
}
-int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value,
- u8 confirm_hint)
+int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ __le32 value, u8 confirm_hint)
{
struct mgmt_ev_user_confirm_request ev;
- BT_DBG("hci%u", index);
+ BT_DBG("%s", hdev->name);
bacpy(&ev.bdaddr, bdaddr);
ev.confirm_hint = confirm_hint;
put_unaligned_le32(value, &ev.value);
- return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
+ return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
NULL);
}
-static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status,
- u8 opcode)
+static int confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status, u8 opcode)
{
struct pending_cmd *cmd;
struct mgmt_rp_user_confirm_reply rp;
int err;
- cmd = mgmt_pending_find(opcode, index);
+ cmd = mgmt_pending_find(opcode, hdev);
if (!cmd)
return -ENOENT;
bacpy(&rp.bdaddr, bdaddr);
rp.status = status;
- err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp));
+ err = cmd_complete(cmd->sk, hdev->id, opcode, &rp, sizeof(rp));
mgmt_pending_remove(cmd);
return err;
}
-int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
{
- return confirm_reply_complete(index, bdaddr, status,
+ return confirm_reply_complete(hdev, bdaddr, status,
MGMT_OP_USER_CONFIRM_REPLY);
}
-int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 status)
{
- return confirm_reply_complete(index, bdaddr, status,
+ return confirm_reply_complete(hdev, bdaddr, status,
MGMT_OP_USER_CONFIRM_NEG_REPLY);
}
-int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)
{
struct mgmt_ev_auth_failed ev;
bacpy(&ev.bdaddr, bdaddr);
ev.status = status;
- return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status)
+int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
{
struct pending_cmd *cmd;
- struct hci_dev *hdev;
struct mgmt_cp_set_local_name ev;
int err;
memset(&ev, 0, sizeof(ev));
memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
- cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index);
+ cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
if (!cmd)
goto send_event;
if (status) {
- err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO);
+ err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
+ EIO);
goto failed;
}
- hdev = hci_dev_get(index);
- if (hdev) {
- hci_dev_lock_bh(hdev);
- update_eir(hdev);
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
- }
+ update_eir(hdev);
- err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev,
+ err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, &ev,
sizeof(ev));
if (err < 0)
goto failed;
send_event:
- err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev),
+ err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
cmd ? cmd->sk : NULL);
failed:
@@ -2285,29 +2312,30 @@ failed:
return err;
}
-int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
- u8 status)
+int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+ u8 *randomizer, u8 status)
{
struct pending_cmd *cmd;
int err;
- BT_DBG("hci%u status %u", index, status);
+ BT_DBG("%s status %u", hdev->name, status);
- cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index);
+ cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
if (!cmd)
return -ENOENT;
if (status) {
- err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- EIO);
+ err = cmd_status(cmd->sk, hdev->id,
+ MGMT_OP_READ_LOCAL_OOB_DATA, EIO);
} else {
struct mgmt_rp_read_local_oob_data rp;
memcpy(rp.hash, hash, sizeof(rp.hash));
memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
- err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- &rp, sizeof(rp));
+ err = cmd_complete(cmd->sk, hdev->id,
+ MGMT_OP_READ_LOCAL_OOB_DATA,
+ &rp, sizeof(rp));
}
mgmt_pending_remove(cmd);
@@ -2315,14 +2343,15 @@ int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
return err;
}
-int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
- u8 *eir)
+int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type,
+ u8 *dev_class, s8 rssi, u8 *eir)
{
struct mgmt_ev_device_found ev;
memset(&ev, 0, sizeof(ev));
- bacpy(&ev.bdaddr, bdaddr);
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_mgmt(type);
ev.rssi = rssi;
if (eir)
@@ -2331,10 +2360,10 @@ int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
if (dev_class)
memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
- return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name)
+int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name)
{
struct mgmt_ev_remote_name ev;
@@ -2343,37 +2372,64 @@ int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name)
bacpy(&ev.bdaddr, bdaddr);
memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
- return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_REMOTE_NAME, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_discovering(u16 index, u8 discovering)
+int mgmt_inquiry_failed(struct hci_dev *hdev, u8 status)
{
- return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering,
+ struct pending_cmd *cmd;
+ int err;
+
+ cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+ if (!cmd)
+ return -ENOENT;
+
+ err = cmd_status(cmd->sk, hdev->id, cmd->opcode, status);
+ mgmt_pending_remove(cmd);
+
+ return err;
+}
+
+int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
+{
+ struct pending_cmd *cmd;
+
+ if (discovering)
+ cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+ else
+ cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+
+ if (cmd != NULL) {
+ cmd_complete(cmd->sk, hdev->id, cmd->opcode, NULL, 0);
+ mgmt_pending_remove(cmd);
+ }
+
+ return mgmt_event(MGMT_EV_DISCOVERING, hdev, &discovering,
sizeof(discovering), NULL);
}
-int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr)
+int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct pending_cmd *cmd;
struct mgmt_ev_device_blocked ev;
- cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, index);
+ cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
bacpy(&ev.bdaddr, bdaddr);
- return mgmt_event(MGMT_EV_DEVICE_BLOCKED, index, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
+ return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
}
-int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr)
+int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct pending_cmd *cmd;
struct mgmt_ev_device_unblocked ev;
- cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, index);
+ cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
bacpy(&ev.bdaddr, bdaddr);
- return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, index, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
+ return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 4e32e18211f..8743f369ed3 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -65,7 +65,8 @@ static DEFINE_MUTEX(rfcomm_mutex);
static LIST_HEAD(session_list);
-static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len);
+static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len,
+ u32 priority);
static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci);
static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci);
static int rfcomm_queue_disc(struct rfcomm_dlc *d);
@@ -377,13 +378,11 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d)
static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci)
{
struct rfcomm_dlc *d;
- struct list_head *p;
- list_for_each(p, &s->dlcs) {
- d = list_entry(p, struct rfcomm_dlc, list);
+ list_for_each_entry(d, &s->dlcs, list)
if (d->dlci == dlci)
return d;
- }
+
return NULL;
}
@@ -749,19 +748,34 @@ void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *d
}
/* ---- RFCOMM frame sending ---- */
-static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len)
+static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len,
+ u32 priority)
{
struct socket *sock = s->sock;
+ struct sock *sk = sock->sk;
struct kvec iv = { data, len };
struct msghdr msg;
- BT_DBG("session %p len %d", s, len);
+ BT_DBG("session %p len %d priority %u", s, len, priority);
+
+ if (sk->sk_priority != priority) {
+ lock_sock(sk);
+ sk->sk_priority = priority;
+ release_sock(sk);
+ }
memset(&msg, 0, sizeof(msg));
return kernel_sendmsg(sock, &msg, &iv, 1, len);
}
+static int rfcomm_send_cmd(struct rfcomm_session *s, struct rfcomm_cmd *cmd)
+{
+ BT_DBG("%p cmd %u", s, cmd->ctrl);
+
+ return rfcomm_send_frame(s, (void *) cmd, sizeof(*cmd), HCI_PRIO_MAX);
+}
+
static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
{
struct rfcomm_cmd cmd;
@@ -773,7 +787,7 @@ static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
@@ -787,7 +801,7 @@ static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
@@ -801,7 +815,7 @@ static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_queue_disc(struct rfcomm_dlc *d)
@@ -815,6 +829,8 @@ static int rfcomm_queue_disc(struct rfcomm_dlc *d)
if (!skb)
return -ENOMEM;
+ skb->priority = HCI_PRIO_MAX;
+
cmd = (void *) __skb_put(skb, sizeof(*cmd));
cmd->addr = d->addr;
cmd->ctrl = __ctrl(RFCOMM_DISC, 1);
@@ -837,7 +853,7 @@ static int rfcomm_send_dm(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type)
@@ -862,7 +878,7 @@ static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type)
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d)
@@ -904,7 +920,7 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
@@ -942,7 +958,7 @@ int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
static int rfcomm_send_rls(struct rfcomm_session *s, int cr, u8 dlci, u8 status)
@@ -969,7 +985,7 @@ static int rfcomm_send_rls(struct rfcomm_session *s, int cr, u8 dlci, u8 status)
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig)
@@ -996,7 +1012,7 @@ static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
static int rfcomm_send_fcoff(struct rfcomm_session *s, int cr)
@@ -1018,7 +1034,7 @@ static int rfcomm_send_fcoff(struct rfcomm_session *s, int cr)
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
static int rfcomm_send_fcon(struct rfcomm_session *s, int cr)
@@ -1040,7 +1056,7 @@ static int rfcomm_send_fcon(struct rfcomm_session *s, int cr)
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int len)
@@ -1091,7 +1107,7 @@ static int rfcomm_send_credits(struct rfcomm_session *s, u8 addr, u8 credits)
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
static void rfcomm_make_uih(struct sk_buff *skb, u8 addr)
@@ -1769,7 +1785,8 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
return skb_queue_len(&d->tx_queue);
while (d->tx_credits && (skb = skb_dequeue(&d->tx_queue))) {
- err = rfcomm_send_frame(d->session, skb->data, skb->len);
+ err = rfcomm_send_frame(d->session, skb->data, skb->len,
+ skb->priority);
if (err < 0) {
skb_queue_head(&d->tx_queue, skb);
break;
@@ -2120,15 +2137,13 @@ static struct hci_cb rfcomm_cb = {
static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
{
struct rfcomm_session *s;
- struct list_head *pp, *p;
rfcomm_lock();
- list_for_each(p, &session_list) {
- s = list_entry(p, struct rfcomm_session, list);
- list_for_each(pp, &s->dlcs) {
+ list_for_each_entry(s, &session_list, list) {
+ struct rfcomm_dlc *d;
+ list_for_each_entry(d, &s->dlcs, list) {
struct sock *sk = s->sock->sk;
- struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
seq_printf(f, "%s %s %ld %d %d %d %d\n",
batostr(&bt_sk(sk)->src),
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 5417f612732..aea2bdd1510 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -600,6 +600,8 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
break;
}
+ skb->priority = sk->sk_priority;
+
err = rfcomm_dlc_send(d, skb);
if (err < 0) {
kfree_skb(skb);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index c258796313e..fa8f4de53b9 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -34,6 +34,7 @@
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
+#include <linux/workqueue.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -65,7 +66,7 @@ struct rfcomm_dev {
struct rfcomm_dlc *dlc;
struct tty_struct *tty;
wait_queue_head_t wait;
- struct tasklet_struct wakeup_task;
+ struct work_struct wakeup_task;
struct device *tty_dev;
@@ -81,7 +82,7 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig);
-static void rfcomm_tty_wakeup(unsigned long arg);
+static void rfcomm_tty_wakeup(struct work_struct *work);
/* ---- Device functions ---- */
static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
@@ -133,13 +134,10 @@ static inline void rfcomm_dev_put(struct rfcomm_dev *dev)
static struct rfcomm_dev *__rfcomm_dev_get(int id)
{
struct rfcomm_dev *dev;
- struct list_head *p;
- list_for_each(p, &rfcomm_dev_list) {
- dev = list_entry(p, struct rfcomm_dev, list);
+ list_for_each_entry(dev, &rfcomm_dev_list, list)
if (dev->id == id)
return dev;
- }
return NULL;
}
@@ -197,7 +195,7 @@ static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
{
- struct rfcomm_dev *dev;
+ struct rfcomm_dev *dev, *entry;
struct list_head *head = &rfcomm_dev_list, *p;
int err = 0;
@@ -212,8 +210,8 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
if (req->dev_id < 0) {
dev->id = 0;
- list_for_each(p, &rfcomm_dev_list) {
- if (list_entry(p, struct rfcomm_dev, list)->id != dev->id)
+ list_for_each_entry(entry, &rfcomm_dev_list, list) {
+ if (entry->id != dev->id)
break;
dev->id++;
@@ -222,9 +220,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
} else {
dev->id = req->dev_id;
- list_for_each(p, &rfcomm_dev_list) {
- struct rfcomm_dev *entry = list_entry(p, struct rfcomm_dev, list);
-
+ list_for_each_entry(entry, &rfcomm_dev_list, list) {
if (entry->id == dev->id) {
err = -EADDRINUSE;
goto out;
@@ -257,7 +253,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
atomic_set(&dev->opened, 0);
init_waitqueue_head(&dev->wait);
- tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev);
+ INIT_WORK(&dev->wakeup_task, rfcomm_tty_wakeup);
skb_queue_head_init(&dev->pending);
@@ -351,7 +347,7 @@ static void rfcomm_wfree(struct sk_buff *skb)
struct rfcomm_dev *dev = (void *) skb->sk;
atomic_sub(skb->truesize, &dev->wmem_alloc);
if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags))
- tasklet_schedule(&dev->wakeup_task);
+ queue_work(system_nrt_wq, &dev->wakeup_task);
rfcomm_dev_put(dev);
}
@@ -455,9 +451,9 @@ static int rfcomm_release_dev(void __user *arg)
static int rfcomm_get_dev_list(void __user *arg)
{
+ struct rfcomm_dev *dev;
struct rfcomm_dev_list_req *dl;
struct rfcomm_dev_info *di;
- struct list_head *p;
int n = 0, size, err;
u16 dev_num;
@@ -479,8 +475,7 @@ static int rfcomm_get_dev_list(void __user *arg)
read_lock_bh(&rfcomm_dev_lock);
- list_for_each(p, &rfcomm_dev_list) {
- struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list);
+ list_for_each_entry(dev, &rfcomm_dev_list, list) {
if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
continue;
(di + n)->id = dev->id;
@@ -635,9 +630,10 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
}
/* ---- TTY functions ---- */
-static void rfcomm_tty_wakeup(unsigned long arg)
+static void rfcomm_tty_wakeup(struct work_struct *work)
{
- struct rfcomm_dev *dev = (void *) arg;
+ struct rfcomm_dev *dev = container_of(work, struct rfcomm_dev,
+ wakeup_task);
struct tty_struct *tty = dev->tty;
if (!tty)
return;
@@ -762,7 +758,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
rfcomm_dlc_close(dev->dlc, 0);
clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
- tasklet_kill(&dev->wakeup_task);
+ cancel_work_sync(&dev->wakeup_task);
rfcomm_dlc_lock(dev->dlc);
tty->driver_data = NULL;
@@ -1155,9 +1151,11 @@ static const struct tty_operations rfcomm_ops = {
int __init rfcomm_init_ttys(void)
{
+ int error;
+
rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS);
if (!rfcomm_tty_driver)
- return -1;
+ return -ENOMEM;
rfcomm_tty_driver->owner = THIS_MODULE;
rfcomm_tty_driver->driver_name = "rfcomm";
@@ -1172,10 +1170,11 @@ int __init rfcomm_init_ttys(void)
rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON;
tty_set_operations(rfcomm_tty_driver, &rfcomm_ops);
- if (tty_register_driver(rfcomm_tty_driver)) {
+ error = tty_register_driver(rfcomm_tty_driver);
+ if (error) {
BT_ERR("Can't register RFCOMM TTY driver");
put_tty_driver(rfcomm_tty_driver);
- return -1;
+ return error;
}
BT_INFO("RFCOMM TTY layer initialized");
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 759b6357264..94e94ca3538 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -181,7 +181,8 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
if (!skb)
return;
- hci_send_acl(conn->hcon, skb, 0);
+ skb->priority = HCI_PRIO_MAX;
+ hci_send_acl(conn->hchan, skb, 0);
mod_timer(&conn->security_timer, jiffies +
msecs_to_jiffies(SMP_TIMEOUT));
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index feb77ea7b58..a3754ac262c 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -186,7 +186,8 @@ static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
strcpy(info->bus_info, "N/A");
}
-static u32 br_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t br_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct net_bridge *br = netdev_priv(dev);
@@ -341,10 +342,10 @@ void br_dev_setup(struct net_device *dev)
dev->priv_flags = IFF_EBRIDGE;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
- NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
+ NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX |
NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX;
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
- NETIF_F_GSO_MASK | NETIF_F_NO_CSUM |
+ NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX;
br->dev = dev;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c8e7861b88b..973813e3442 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -556,7 +556,7 @@ skip:
return skb->len;
}
-/* Create new static fdb entry */
+/* Update (create or replace) forwarding database entry */
static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
__u16 state, __u16 flags)
{
@@ -575,16 +575,21 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
} else {
if (flags & NLM_F_EXCL)
return -EEXIST;
+ }
+
+ if (fdb_to_nud(fdb) != state) {
+ if (state & NUD_PERMANENT)
+ fdb->is_local = fdb->is_static = 1;
+ else if (state & NUD_NOARP) {
+ fdb->is_local = 0;
+ fdb->is_static = 1;
+ } else
+ fdb->is_local = fdb->is_static = 0;
- if (flags & NLM_F_REPLACE)
- fdb->updated = fdb->used = jiffies;
- fdb->is_local = fdb->is_static = 0;
+ fdb->updated = fdb->used = jiffies;
+ fdb_notify(fdb, RTM_NEWNEIGH);
}
- if (state & NUD_PERMANENT)
- fdb->is_local = fdb->is_static = 1;
- else if (state & NUD_NOARP)
- fdb->is_static = 1;
return 0;
}
@@ -627,6 +632,11 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
return -EINVAL;
}
+ if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
+ pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
+ return -EINVAL;
+ }
+
p = br_port_get_rtnl(dev);
if (p == NULL) {
pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
@@ -634,9 +644,15 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
return -EINVAL;
}
- spin_lock_bh(&p->br->hash_lock);
- err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
- spin_unlock_bh(&p->br->hash_lock);
+ if (ndm->ndm_flags & NTF_USE) {
+ rcu_read_lock();
+ br_fdb_update(p->br, p, addr);
+ rcu_read_unlock();
+ } else {
+ spin_lock_bh(&p->br->hash_lock);
+ err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
+ spin_unlock_bh(&p->br->hash_lock);
+ }
return err;
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f603e5b0b93..0a942fbccc9 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -296,10 +296,11 @@ int br_min_mtu(const struct net_bridge *br)
/*
* Recomputes features using slave's features
*/
-u32 br_features_recompute(struct net_bridge *br, u32 features)
+netdev_features_t br_features_recompute(struct net_bridge *br,
+ netdev_features_t features)
{
struct net_bridge_port *p;
- u32 mask;
+ netdev_features_t mask;
if (list_empty(&br->port_list))
return features;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index a5f4e576980..7743e0d109e 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -127,7 +127,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(
{
struct br_ip br_dst;
- ipv6_addr_copy(&br_dst.u.ip6, dst);
+ br_dst.u.ip6 = *dst;
br_dst.proto = htons(ETH_P_IPV6);
return br_mdb_ip_get(mdb, &br_dst);
@@ -154,7 +154,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case htons(ETH_P_IPV6):
- ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr);
+ ip.u.ip6 = ipv6_hdr(skb)->daddr;
break;
#endif
default:
@@ -474,7 +474,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
mldq->mld_cksum = 0;
mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
mldq->mld_reserved = 0;
- ipv6_addr_copy(&mldq->mld_mca, group);
+ mldq->mld_mca = *group;
/* checksum */
mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
@@ -783,7 +783,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
if (!ipv6_is_transient_multicast(group))
return 0;
- ipv6_addr_copy(&br_group.u.ip6, group);
+ br_group.u.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
return br_multicast_add_group(br, port, &br_group);
@@ -1344,7 +1344,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
if (!ipv6_is_transient_multicast(group))
return;
- ipv6_addr_copy(&br_group.u.ip6, group);
+ br_group.u.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
br_multicast_leave_group(br, port, &br_group);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index d7d6fb05411..4027029aa5e 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -387,7 +387,8 @@ extern int br_add_if(struct net_bridge *br,
extern int br_del_if(struct net_bridge *br,
struct net_device *dev);
extern int br_min_mtu(const struct net_bridge *br);
-extern u32 br_features_recompute(struct net_bridge *br, u32 features);
+extern netdev_features_t br_features_recompute(struct net_bridge *br,
+ netdev_features_t features);
/* br_input.c */
extern int br_handle_frame_finish(struct sk_buff *skb);
diff --git a/net/core/Makefile b/net/core/Makefile
index 0d357b1c4e5..3606d40aae6 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -19,3 +19,4 @@ obj-$(CONFIG_FIB_RULES) += fib_rules.o
obj-$(CONFIG_TRACEPOINTS) += net-traces.o
obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
+obj-$(CONFIG_NETPRIO_CGROUP) += netprio_cgroup.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 6ba50a1e404..8afb244b205 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -137,6 +137,7 @@
#include <linux/if_pppox.h>
#include <linux/ppp_defs.h>
#include <linux/net_tstamp.h>
+#include <linux/jump_label.h>
#include "net-sysfs.h"
@@ -1320,8 +1321,6 @@ EXPORT_SYMBOL(dev_close);
*/
void dev_disable_lro(struct net_device *dev)
{
- u32 flags;
-
/*
* If we're trying to disable lro on a vlan device
* use the underlying physical device instead
@@ -1329,15 +1328,9 @@ void dev_disable_lro(struct net_device *dev)
if (is_vlan_dev(dev))
dev = vlan_dev_real_dev(dev);
- if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
- flags = dev->ethtool_ops->get_flags(dev);
- else
- flags = ethtool_op_get_flags(dev);
+ dev->wanted_features &= ~NETIF_F_LRO;
+ netdev_update_features(dev);
- if (!(flags & ETH_FLAG_LRO))
- return;
-
- __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
if (unlikely(dev->features & NETIF_F_LRO))
netdev_WARN(dev, "failed to disable LRO!\n");
}
@@ -1449,34 +1442,32 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
}
EXPORT_SYMBOL(call_netdevice_notifiers);
-/* When > 0 there are consumers of rx skb time stamps */
-static atomic_t netstamp_needed = ATOMIC_INIT(0);
+static struct jump_label_key netstamp_needed __read_mostly;
void net_enable_timestamp(void)
{
- atomic_inc(&netstamp_needed);
+ jump_label_inc(&netstamp_needed);
}
EXPORT_SYMBOL(net_enable_timestamp);
void net_disable_timestamp(void)
{
- atomic_dec(&netstamp_needed);
+ jump_label_dec(&netstamp_needed);
}
EXPORT_SYMBOL(net_disable_timestamp);
static inline void net_timestamp_set(struct sk_buff *skb)
{
- if (atomic_read(&netstamp_needed))
+ skb->tstamp.tv64 = 0;
+ if (static_branch(&netstamp_needed))
__net_timestamp(skb);
- else
- skb->tstamp.tv64 = 0;
}
-static inline void net_timestamp_check(struct sk_buff *skb)
-{
- if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
- __net_timestamp(skb);
-}
+#define net_timestamp_check(COND, SKB) \
+ if (static_branch(&netstamp_needed)) { \
+ if ((COND) && !(SKB)->tstamp.tv64) \
+ __net_timestamp(SKB); \
+ } \
static int net_hwtstamp_validate(struct ifreq *ifr)
{
@@ -1923,7 +1914,8 @@ EXPORT_SYMBOL(skb_checksum_help);
* It may return NULL if the skb requires no segmentation. This is
* only possible when GSO is used for verifying header integrity.
*/
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
struct packet_type *ptype;
@@ -1953,9 +1945,9 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
dev->ethtool_ops->get_drvinfo(dev, &info);
- WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
- info.driver, dev ? dev->features : 0L,
- skb->sk ? skb->sk->sk_route_caps : 0L,
+ WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d ip_summed=%d\n",
+ info.driver, dev ? &dev->features : NULL,
+ skb->sk ? &skb->sk->sk_route_caps : NULL,
skb->len, skb->data_len, skb->ip_summed);
if (skb_header_cloned(skb) &&
@@ -2064,7 +2056,7 @@ static void dev_gso_skb_destructor(struct sk_buff *skb)
* This function segments the given skb and stores the list of segments
* in skb->next.
*/
-static int dev_gso_segment(struct sk_buff *skb, int features)
+static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
{
struct sk_buff *segs;
@@ -2103,7 +2095,7 @@ static inline void skb_orphan_try(struct sk_buff *skb)
}
}
-static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
{
return ((features & NETIF_F_GEN_CSUM) ||
((features & NETIF_F_V4_CSUM) &&
@@ -2114,7 +2106,8 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
protocol == htons(ETH_P_FCOE)));
}
-static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
+static netdev_features_t harmonize_features(struct sk_buff *skb,
+ __be16 protocol, netdev_features_t features)
{
if (!can_checksum_protocol(features, protocol)) {
features &= ~NETIF_F_ALL_CSUM;
@@ -2126,10 +2119,10 @@ static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features
return features;
}
-u32 netif_skb_features(struct sk_buff *skb)
+netdev_features_t netif_skb_features(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
- u32 features = skb->dev->features;
+ netdev_features_t features = skb->dev->features;
if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2175,7 +2168,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
unsigned int skb_len;
if (likely(!skb->next)) {
- u32 features;
+ netdev_features_t features;
/*
* If device doesn't need skb->dst, release it right now while
@@ -2456,6 +2449,18 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
return rc;
}
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+static void skb_update_prio(struct sk_buff *skb)
+{
+ struct netprio_map *map = rcu_dereference(skb->dev->priomap);
+
+ if ((!skb->priority) && (skb->sk) && map)
+ skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
+}
+#else
+#define skb_update_prio(skb)
+#endif
+
static DEFINE_PER_CPU(int, xmit_recursion);
#define RECURSION_LIMIT 10
@@ -2496,6 +2501,8 @@ int dev_queue_xmit(struct sk_buff *skb)
*/
rcu_read_lock_bh();
+ skb_update_prio(skb);
+
txq = dev_pick_tx(dev, skb);
q = rcu_dereference_bh(txq->qdisc);
@@ -2718,6 +2725,8 @@ EXPORT_SYMBOL(__skb_get_rxhash);
struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
EXPORT_SYMBOL(rps_sock_flow_table);
+struct jump_label_key rps_needed __read_mostly;
+
static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *rflow, u16 next_cpu)
@@ -2997,12 +3006,11 @@ int netif_rx(struct sk_buff *skb)
if (netpoll_rx(skb))
return NET_RX_DROP;
- if (netdev_tstamp_prequeue)
- net_timestamp_check(skb);
+ net_timestamp_check(netdev_tstamp_prequeue, skb);
trace_netif_rx(skb);
#ifdef CONFIG_RPS
- {
+ if (static_branch(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -3017,14 +3025,13 @@ int netif_rx(struct sk_buff *skb)
rcu_read_unlock();
preempt_enable();
- }
-#else
+ } else
+#endif
{
unsigned int qtail;
ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
put_cpu();
}
-#endif
return ret;
}
EXPORT_SYMBOL(netif_rx);
@@ -3230,8 +3237,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
int ret = NET_RX_DROP;
__be16 type;
- if (!netdev_tstamp_prequeue)
- net_timestamp_check(skb);
+ net_timestamp_check(!netdev_tstamp_prequeue, skb);
trace_netif_receive_skb(skb);
@@ -3362,14 +3368,13 @@ out:
*/
int netif_receive_skb(struct sk_buff *skb)
{
- if (netdev_tstamp_prequeue)
- net_timestamp_check(skb);
+ net_timestamp_check(netdev_tstamp_prequeue, skb);
if (skb_defer_rx_timestamp(skb))
return NET_RX_SUCCESS;
#ifdef CONFIG_RPS
- {
+ if (static_branch(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu, ret;
@@ -3380,16 +3385,12 @@ int netif_receive_skb(struct sk_buff *skb)
if (cpu >= 0) {
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
- } else {
- rcu_read_unlock();
- ret = __netif_receive_skb(skb);
+ return ret;
}
-
- return ret;
+ rcu_read_unlock();
}
-#else
- return __netif_receive_skb(skb);
#endif
+ return __netif_receive_skb(skb);
}
EXPORT_SYMBOL(netif_receive_skb);
@@ -5362,7 +5363,8 @@ static void rollback_registered(struct net_device *dev)
list_del(&single);
}
-static u32 netdev_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t netdev_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/* Fix illegal checksum combinations */
if ((features & NETIF_F_HW_CSUM) &&
@@ -5371,12 +5373,6 @@ static u32 netdev_fix_features(struct net_device *dev, u32 features)
features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
}
- if ((features & NETIF_F_NO_CSUM) &&
- (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
- netdev_warn(dev, "mixed no checksumming and other settings.\n");
- features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
- }
-
/* Fix illegal SG+CSUM combinations. */
if ((features & NETIF_F_SG) &&
!(features & NETIF_F_ALL_CSUM)) {
@@ -5424,7 +5420,7 @@ static u32 netdev_fix_features(struct net_device *dev, u32 features)
int __netdev_update_features(struct net_device *dev)
{
- u32 features;
+ netdev_features_t features;
int err = 0;
ASSERT_RTNL();
@@ -5440,16 +5436,16 @@ int __netdev_update_features(struct net_device *dev)
if (dev->features == features)
return 0;
- netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
- dev->features, features);
+ netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
+ &dev->features, &features);
if (dev->netdev_ops->ndo_set_features)
err = dev->netdev_ops->ndo_set_features(dev, features);
if (unlikely(err < 0)) {
netdev_err(dev,
- "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
- err, features, dev->features);
+ "set_features() failed (%d); wanted %pNF, left %pNF\n",
+ err, &features, &dev->features);
return -1;
}
@@ -5633,11 +5629,12 @@ int register_netdevice(struct net_device *dev)
dev->wanted_features = dev->features & dev->hw_features;
/* Turn on no cache copy if HW is doing checksum */
- dev->hw_features |= NETIF_F_NOCACHE_COPY;
- if ((dev->features & NETIF_F_ALL_CSUM) &&
- !(dev->features & NETIF_F_NO_CSUM)) {
- dev->wanted_features |= NETIF_F_NOCACHE_COPY;
- dev->features |= NETIF_F_NOCACHE_COPY;
+ if (!(dev->flags & IFF_LOOPBACK)) {
+ dev->hw_features |= NETIF_F_NOCACHE_COPY;
+ if (dev->features & NETIF_F_ALL_CSUM) {
+ dev->wanted_features |= NETIF_F_NOCACHE_COPY;
+ dev->features |= NETIF_F_NOCACHE_COPY;
+ }
}
/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
@@ -6373,7 +6370,8 @@ static int dev_cpu_callback(struct notifier_block *nfb,
* @one to the master device with current feature set @all. Will not
* enable anything that is off in @mask. Returns the new feature set.
*/
-u32 netdev_increment_features(u32 all, u32 one, u32 mask)
+netdev_features_t netdev_increment_features(netdev_features_t all,
+ netdev_features_t one, netdev_features_t mask)
{
if (mask & NETIF_F_GEN_CSUM)
mask |= NETIF_F_ALL_CSUM;
@@ -6382,10 +6380,6 @@ u32 netdev_increment_features(u32 all, u32 one, u32 mask)
all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
all &= one | ~NETIF_F_ALL_FOR_ALL;
- /* If device needs checksumming, downgrade to it. */
- if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
- all &= ~NETIF_F_NO_CSUM;
-
/* If one device supports hw checksumming, set for all. */
if (all & NETIF_F_GEN_CSUM)
all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index f4448170712..31b0b7f5383 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -36,235 +36,44 @@ u32 ethtool_op_get_link(struct net_device *dev)
}
EXPORT_SYMBOL(ethtool_op_get_link);
-u32 ethtool_op_get_tx_csum(struct net_device *dev)
-{
- return (dev->features & NETIF_F_ALL_CSUM) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_tx_csum);
-
-int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= NETIF_F_IP_CSUM;
- else
- dev->features &= ~NETIF_F_IP_CSUM;
-
- return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tx_csum);
-
-int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= NETIF_F_HW_CSUM;
- else
- dev->features &= ~NETIF_F_HW_CSUM;
-
- return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
-
-int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- else
- dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-
- return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
-
-u32 ethtool_op_get_sg(struct net_device *dev)
-{
- return (dev->features & NETIF_F_SG) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_sg);
-
-int ethtool_op_set_sg(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= NETIF_F_SG;
- else
- dev->features &= ~NETIF_F_SG;
-
- return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_sg);
-
-u32 ethtool_op_get_tso(struct net_device *dev)
-{
- return (dev->features & NETIF_F_TSO) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_tso);
-
-int ethtool_op_set_tso(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= NETIF_F_TSO;
- else
- dev->features &= ~NETIF_F_TSO;
-
- return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tso);
-
-u32 ethtool_op_get_ufo(struct net_device *dev)
-{
- return (dev->features & NETIF_F_UFO) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_ufo);
-
-int ethtool_op_set_ufo(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= NETIF_F_UFO;
- else
- dev->features &= ~NETIF_F_UFO;
- return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_ufo);
-
-/* the following list of flags are the same as their associated
- * NETIF_F_xxx values in include/linux/netdevice.h
- */
-static const u32 flags_dup_features =
- (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | ETH_FLAG_NTUPLE |
- ETH_FLAG_RXHASH);
-
-u32 ethtool_op_get_flags(struct net_device *dev)
-{
- /* in the future, this function will probably contain additional
- * handling for flags which are not so easily handled
- * by a simple masking operation
- */
-
- return dev->features & flags_dup_features;
-}
-EXPORT_SYMBOL(ethtool_op_get_flags);
-
-/* Check if device can enable (or disable) particular feature coded in "data"
- * argument. Flags "supported" describe features that can be toggled by device.
- * If feature can not be toggled, it state (enabled or disabled) must match
- * hardcoded device features state, otherwise flags are marked as invalid.
- */
-bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported)
-{
- u32 features = dev->features & flags_dup_features;
- /* "data" can contain only flags_dup_features bits,
- * see __ethtool_set_flags */
-
- return (features & ~supported) != (data & ~supported);
-}
-EXPORT_SYMBOL(ethtool_invalid_flags);
-
-int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
-{
- if (ethtool_invalid_flags(dev, data, supported))
- return -EINVAL;
-
- dev->features = ((dev->features & ~flags_dup_features) |
- (data & flags_dup_features));
- return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_flags);
-
/* Handlers for each ethtool command */
-#define ETHTOOL_DEV_FEATURE_WORDS 1
-
-static void ethtool_get_features_compat(struct net_device *dev,
- struct ethtool_get_features_block *features)
-{
- if (!dev->ethtool_ops)
- return;
-
- /* getting RX checksum */
- if (dev->ethtool_ops->get_rx_csum)
- if (dev->ethtool_ops->get_rx_csum(dev))
- features[0].active |= NETIF_F_RXCSUM;
-
- /* mark legacy-changeable features */
- if (dev->ethtool_ops->set_sg)
- features[0].available |= NETIF_F_SG;
- if (dev->ethtool_ops->set_tx_csum)
- features[0].available |= NETIF_F_ALL_CSUM;
- if (dev->ethtool_ops->set_tso)
- features[0].available |= NETIF_F_ALL_TSO;
- if (dev->ethtool_ops->set_rx_csum)
- features[0].available |= NETIF_F_RXCSUM;
- if (dev->ethtool_ops->set_flags)
- features[0].available |= flags_dup_features;
-}
-
-static int ethtool_set_feature_compat(struct net_device *dev,
- int (*legacy_set)(struct net_device *, u32),
- struct ethtool_set_features_block *features, u32 mask)
-{
- u32 do_set;
-
- if (!legacy_set)
- return 0;
-
- if (!(features[0].valid & mask))
- return 0;
-
- features[0].valid &= ~mask;
-
- do_set = !!(features[0].requested & mask);
-
- if (legacy_set(dev, do_set) < 0)
- netdev_info(dev,
- "Legacy feature change (%s) failed for 0x%08x\n",
- do_set ? "set" : "clear", mask);
-
- return 1;
-}
-
-static int ethtool_set_flags_compat(struct net_device *dev,
- int (*legacy_set)(struct net_device *, u32),
- struct ethtool_set_features_block *features, u32 mask)
-{
- u32 value;
-
- if (!legacy_set)
- return 0;
-
- if (!(features[0].valid & mask))
- return 0;
-
- value = dev->features & ~features[0].valid;
- value |= features[0].requested;
-
- features[0].valid &= ~mask;
-
- if (legacy_set(dev, value & mask) < 0)
- netdev_info(dev, "Legacy flags change failed\n");
-
- return 1;
-}
-
-static int ethtool_set_features_compat(struct net_device *dev,
- struct ethtool_set_features_block *features)
-{
- int compat;
-
- if (!dev->ethtool_ops)
- return 0;
-
- compat = ethtool_set_feature_compat(dev, dev->ethtool_ops->set_sg,
- features, NETIF_F_SG);
- compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tx_csum,
- features, NETIF_F_ALL_CSUM);
- compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tso,
- features, NETIF_F_ALL_TSO);
- compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum,
- features, NETIF_F_RXCSUM);
- compat |= ethtool_set_flags_compat(dev, dev->ethtool_ops->set_flags,
- features, flags_dup_features);
-
- return compat;
-}
+#define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32)
+
+static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
+ [NETIF_F_SG_BIT] = "tx-scatter-gather",
+ [NETIF_F_IP_CSUM_BIT] = "tx-checksum-ipv4",
+ [NETIF_F_HW_CSUM_BIT] = "tx-checksum-ip-generic",
+ [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6",
+ [NETIF_F_HIGHDMA_BIT] = "highdma",
+ [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist",
+ [NETIF_F_HW_VLAN_TX_BIT] = "tx-vlan-hw-insert",
+
+ [NETIF_F_HW_VLAN_RX_BIT] = "rx-vlan-hw-parse",
+ [NETIF_F_HW_VLAN_FILTER_BIT] = "rx-vlan-filter",
+ [NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged",
+ [NETIF_F_GSO_BIT] = "tx-generic-segmentation",
+ [NETIF_F_LLTX_BIT] = "tx-lockless",
+ [NETIF_F_NETNS_LOCAL_BIT] = "netns-local",
+ [NETIF_F_GRO_BIT] = "rx-gro",
+ [NETIF_F_LRO_BIT] = "rx-lro",
+
+ [NETIF_F_TSO_BIT] = "tx-tcp-segmentation",
+ [NETIF_F_UFO_BIT] = "tx-udp-fragmentation",
+ [NETIF_F_GSO_ROBUST_BIT] = "tx-gso-robust",
+ [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation",
+ [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation",
+ [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation",
+
+ [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
+ [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp",
+ [NETIF_F_FCOE_MTU_BIT] = "fcoe-mtu",
+ [NETIF_F_NTUPLE_BIT] = "rx-ntuple-filter",
+ [NETIF_F_RXHASH_BIT] = "rx-hashing",
+ [NETIF_F_RXCSUM_BIT] = "rx-checksum",
+ [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy",
+ [NETIF_F_LOOPBACK_BIT] = "loopback",
+};
static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
{
@@ -272,18 +81,21 @@ static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
.cmd = ETHTOOL_GFEATURES,
.size = ETHTOOL_DEV_FEATURE_WORDS,
};
- struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS] = {
- {
- .available = dev->hw_features,
- .requested = dev->wanted_features,
- .active = dev->features,
- .never_changed = NETIF_F_NEVER_CHANGE,
- },
- };
+ struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
u32 __user *sizeaddr;
u32 copy_size;
+ int i;
- ethtool_get_features_compat(dev, features);
+ /* in case feature bits run out again */
+ BUILD_BUG_ON(ETHTOOL_DEV_FEATURE_WORDS * sizeof(u32) > sizeof(netdev_features_t));
+
+ for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) {
+ features[i].available = (u32)(dev->hw_features >> (32 * i));
+ features[i].requested = (u32)(dev->wanted_features >> (32 * i));
+ features[i].active = (u32)(dev->features >> (32 * i));
+ features[i].never_changed =
+ (u32)(NETIF_F_NEVER_CHANGE >> (32 * i));
+ }
sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size);
if (get_user(copy_size, sizeaddr))
@@ -305,7 +117,8 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
{
struct ethtool_sfeatures cmd;
struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
- int ret = 0;
+ netdev_features_t wanted = 0, valid = 0;
+ int i, ret = 0;
if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
return -EFAULT;
@@ -317,65 +130,29 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
if (copy_from_user(features, useraddr, sizeof(features)))
return -EFAULT;
- if (features[0].valid & ~NETIF_F_ETHTOOL_BITS)
- return -EINVAL;
+ for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) {
+ valid |= (netdev_features_t)features[i].valid << (32 * i);
+ wanted |= (netdev_features_t)features[i].requested << (32 * i);
+ }
- if (ethtool_set_features_compat(dev, features))
- ret |= ETHTOOL_F_COMPAT;
+ if (valid & ~NETIF_F_ETHTOOL_BITS)
+ return -EINVAL;
- if (features[0].valid & ~dev->hw_features) {
- features[0].valid &= dev->hw_features;
+ if (valid & ~dev->hw_features) {
+ valid &= dev->hw_features;
ret |= ETHTOOL_F_UNSUPPORTED;
}
- dev->wanted_features &= ~features[0].valid;
- dev->wanted_features |= features[0].valid & features[0].requested;
+ dev->wanted_features &= ~valid;
+ dev->wanted_features |= wanted & valid;
__netdev_update_features(dev);
- if ((dev->wanted_features ^ dev->features) & features[0].valid)
+ if ((dev->wanted_features ^ dev->features) & valid)
ret |= ETHTOOL_F_WISH;
return ret;
}
-static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GSTRING_LEN] = {
- /* NETIF_F_SG */ "tx-scatter-gather",
- /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4",
- /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded",
- /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic",
- /* NETIF_F_IPV6_CSUM */ "tx-checksum-ipv6",
- /* NETIF_F_HIGHDMA */ "highdma",
- /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist",
- /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert",
-
- /* NETIF_F_HW_VLAN_RX */ "rx-vlan-hw-parse",
- /* NETIF_F_HW_VLAN_FILTER */ "rx-vlan-filter",
- /* NETIF_F_VLAN_CHALLENGED */ "vlan-challenged",
- /* NETIF_F_GSO */ "tx-generic-segmentation",
- /* NETIF_F_LLTX */ "tx-lockless",
- /* NETIF_F_NETNS_LOCAL */ "netns-local",
- /* NETIF_F_GRO */ "rx-gro",
- /* NETIF_F_LRO */ "rx-lro",
-
- /* NETIF_F_TSO */ "tx-tcp-segmentation",
- /* NETIF_F_UFO */ "tx-udp-fragmentation",
- /* NETIF_F_GSO_ROBUST */ "tx-gso-robust",
- /* NETIF_F_TSO_ECN */ "tx-tcp-ecn-segmentation",
- /* NETIF_F_TSO6 */ "tx-tcp6-segmentation",
- /* NETIF_F_FSO */ "tx-fcoe-segmentation",
- "",
- "",
-
- /* NETIF_F_FCOE_CRC */ "tx-checksum-fcoe-crc",
- /* NETIF_F_SCTP_CSUM */ "tx-checksum-sctp",
- /* NETIF_F_FCOE_MTU */ "fcoe-mtu",
- /* NETIF_F_NTUPLE */ "rx-ntuple-filter",
- /* NETIF_F_RXHASH */ "rx-hashing",
- /* NETIF_F_RXCSUM */ "rx-checksum",
- /* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy",
- /* NETIF_F_LOOPBACK */ "loopback",
-};
-
static int __ethtool_get_sset_count(struct net_device *dev, int sset)
{
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -402,7 +179,7 @@ static void __ethtool_get_strings(struct net_device *dev,
ops->get_strings(dev, stringset, data);
}
-static u32 ethtool_get_feature_mask(u32 eth_cmd)
+static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd)
{
/* feature masks of legacy discrete ethtool ops */
@@ -433,136 +210,82 @@ static u32 ethtool_get_feature_mask(u32 eth_cmd)
}
}
-static void *__ethtool_get_one_feature_actor(struct net_device *dev, u32 ethcmd)
-{
- const struct ethtool_ops *ops = dev->ethtool_ops;
-
- if (!ops)
- return NULL;
-
- switch (ethcmd) {
- case ETHTOOL_GTXCSUM:
- return ops->get_tx_csum;
- case ETHTOOL_GRXCSUM:
- return ops->get_rx_csum;
- case ETHTOOL_SSG:
- return ops->get_sg;
- case ETHTOOL_STSO:
- return ops->get_tso;
- case ETHTOOL_SUFO:
- return ops->get_ufo;
- default:
- return NULL;
- }
-}
-
-static u32 __ethtool_get_rx_csum_oldbug(struct net_device *dev)
-{
- return !!(dev->features & NETIF_F_ALL_CSUM);
-}
-
static int ethtool_get_one_feature(struct net_device *dev,
char __user *useraddr, u32 ethcmd)
{
- u32 mask = ethtool_get_feature_mask(ethcmd);
+ netdev_features_t mask = ethtool_get_feature_mask(ethcmd);
struct ethtool_value edata = {
.cmd = ethcmd,
.data = !!(dev->features & mask),
};
- /* compatibility with discrete get_ ops */
- if (!(dev->hw_features & mask)) {
- u32 (*actor)(struct net_device *);
-
- actor = __ethtool_get_one_feature_actor(dev, ethcmd);
-
- /* bug compatibility with old get_rx_csum */
- if (ethcmd == ETHTOOL_GRXCSUM && !actor)
- actor = __ethtool_get_rx_csum_oldbug;
-
- if (actor)
- edata.data = actor(dev);
- }
-
if (copy_to_user(useraddr, &edata, sizeof(edata)))
return -EFAULT;
return 0;
}
-static int __ethtool_set_tx_csum(struct net_device *dev, u32 data);
-static int __ethtool_set_rx_csum(struct net_device *dev, u32 data);
-static int __ethtool_set_sg(struct net_device *dev, u32 data);
-static int __ethtool_set_tso(struct net_device *dev, u32 data);
-static int __ethtool_set_ufo(struct net_device *dev, u32 data);
-
static int ethtool_set_one_feature(struct net_device *dev,
void __user *useraddr, u32 ethcmd)
{
struct ethtool_value edata;
- u32 mask;
+ netdev_features_t mask;
if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT;
mask = ethtool_get_feature_mask(ethcmd);
mask &= dev->hw_features;
- if (mask) {
- if (edata.data)
- dev->wanted_features |= mask;
- else
- dev->wanted_features &= ~mask;
+ if (!mask)
+ return -EOPNOTSUPP;
- __netdev_update_features(dev);
- return 0;
- }
+ if (edata.data)
+ dev->wanted_features |= mask;
+ else
+ dev->wanted_features &= ~mask;
- /* Driver is not converted to ndo_fix_features or does not
- * support changing this offload. In the latter case it won't
- * have corresponding ethtool_ops field set.
- *
- * Following part is to be removed after all drivers advertise
- * their changeable features in netdev->hw_features and stop
- * using discrete offload setting ops.
- */
+ __netdev_update_features(dev);
- switch (ethcmd) {
- case ETHTOOL_STXCSUM:
- return __ethtool_set_tx_csum(dev, edata.data);
- case ETHTOOL_SRXCSUM:
- return __ethtool_set_rx_csum(dev, edata.data);
- case ETHTOOL_SSG:
- return __ethtool_set_sg(dev, edata.data);
- case ETHTOOL_STSO:
- return __ethtool_set_tso(dev, edata.data);
- case ETHTOOL_SUFO:
- return __ethtool_set_ufo(dev, edata.data);
- default:
- return -EOPNOTSUPP;
- }
+ return 0;
+}
+
+#define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \
+ ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH)
+#define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_RX | \
+ NETIF_F_HW_VLAN_TX | NETIF_F_NTUPLE | NETIF_F_RXHASH)
+
+static u32 __ethtool_get_flags(struct net_device *dev)
+{
+ u32 flags = 0;
+
+ if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO;
+ if (dev->features & NETIF_F_HW_VLAN_RX) flags |= ETH_FLAG_RXVLAN;
+ if (dev->features & NETIF_F_HW_VLAN_TX) flags |= ETH_FLAG_TXVLAN;
+ if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE;
+ if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH;
+
+ return flags;
}
-int __ethtool_set_flags(struct net_device *dev, u32 data)
+static int __ethtool_set_flags(struct net_device *dev, u32 data)
{
- u32 changed;
+ netdev_features_t features = 0, changed;
- if (data & ~flags_dup_features)
+ if (data & ~ETH_ALL_FLAGS)
return -EINVAL;
- /* legacy set_flags() op */
- if (dev->ethtool_ops->set_flags) {
- if (unlikely(dev->hw_features & flags_dup_features))
- netdev_warn(dev,
- "driver BUG: mixed hw_features and set_flags()\n");
- return dev->ethtool_ops->set_flags(dev, data);
- }
+ if (data & ETH_FLAG_LRO) features |= NETIF_F_LRO;
+ if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_RX;
+ if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_TX;
+ if (data & ETH_FLAG_NTUPLE) features |= NETIF_F_NTUPLE;
+ if (data & ETH_FLAG_RXHASH) features |= NETIF_F_RXHASH;
/* allow changing only bits set in hw_features */
- changed = (data ^ dev->features) & flags_dup_features;
+ changed = (features ^ dev->features) & ETH_ALL_FEATURES;
if (changed & ~dev->hw_features)
return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
dev->wanted_features =
- (dev->wanted_features & ~changed) | (data & dev->hw_features);
+ (dev->wanted_features & ~changed) | (features & changed);
__netdev_update_features(dev);
@@ -1231,81 +954,6 @@ static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
return dev->ethtool_ops->set_pauseparam(dev, &pauseparam);
}
-static int __ethtool_set_sg(struct net_device *dev, u32 data)
-{
- int err;
-
- if (!dev->ethtool_ops->set_sg)
- return -EOPNOTSUPP;
-
- if (data && !(dev->features & NETIF_F_ALL_CSUM))
- return -EINVAL;
-
- if (!data && dev->ethtool_ops->set_tso) {
- err = dev->ethtool_ops->set_tso(dev, 0);
- if (err)
- return err;
- }
-
- if (!data && dev->ethtool_ops->set_ufo) {
- err = dev->ethtool_ops->set_ufo(dev, 0);
- if (err)
- return err;
- }
- return dev->ethtool_ops->set_sg(dev, data);
-}
-
-static int __ethtool_set_tx_csum(struct net_device *dev, u32 data)
-{
- int err;
-
- if (!dev->ethtool_ops->set_tx_csum)
- return -EOPNOTSUPP;
-
- if (!data && dev->ethtool_ops->set_sg) {
- err = __ethtool_set_sg(dev, 0);
- if (err)
- return err;
- }
-
- return dev->ethtool_ops->set_tx_csum(dev, data);
-}
-
-static int __ethtool_set_rx_csum(struct net_device *dev, u32 data)
-{
- if (!dev->ethtool_ops->set_rx_csum)
- return -EOPNOTSUPP;
-
- if (!data)
- dev->features &= ~NETIF_F_GRO;
-
- return dev->ethtool_ops->set_rx_csum(dev, data);
-}
-
-static int __ethtool_set_tso(struct net_device *dev, u32 data)
-{
- if (!dev->ethtool_ops->set_tso)
- return -EOPNOTSUPP;
-
- if (data && !(dev->features & NETIF_F_SG))
- return -EINVAL;
-
- return dev->ethtool_ops->set_tso(dev, data);
-}
-
-static int __ethtool_set_ufo(struct net_device *dev, u32 data)
-{
- if (!dev->ethtool_ops->set_ufo)
- return -EOPNOTSUPP;
- if (data && !(dev->features & NETIF_F_SG))
- return -EINVAL;
- if (data && !((dev->features & NETIF_F_GEN_CSUM) ||
- (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
- == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
- return -EINVAL;
- return dev->ethtool_ops->set_ufo(dev, data);
-}
-
static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
{
struct ethtool_test test;
@@ -1771,9 +1419,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
break;
case ETHTOOL_GFLAGS:
rc = ethtool_get_value(dev, useraddr, ethcmd,
- (dev->ethtool_ops->get_flags ?
- dev->ethtool_ops->get_flags :
- ethtool_op_get_flags));
+ __ethtool_get_flags);
break;
case ETHTOOL_SFLAGS:
rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 5ac07d31fbc..27d3fefeaa1 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -238,6 +238,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
it to safe state.
*/
skb_queue_purge(&n->arp_queue);
+ n->arp_queue_len_bytes = 0;
n->output = neigh_blackhole;
if (n->nud_state & NUD_VALID)
n->nud_state = NUD_NOARP;
@@ -702,6 +703,7 @@ void neigh_destroy(struct neighbour *neigh)
printk(KERN_WARNING "Impossible event.\n");
skb_queue_purge(&neigh->arp_queue);
+ neigh->arp_queue_len_bytes = 0;
dev_put(neigh->dev);
neigh_parms_put(neigh->parms);
@@ -842,6 +844,7 @@ static void neigh_invalidate(struct neighbour *neigh)
write_lock(&neigh->lock);
}
skb_queue_purge(&neigh->arp_queue);
+ neigh->arp_queue_len_bytes = 0;
}
static void neigh_probe(struct neighbour *neigh)
@@ -980,15 +983,20 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
if (neigh->nud_state == NUD_INCOMPLETE) {
if (skb) {
- if (skb_queue_len(&neigh->arp_queue) >=
- neigh->parms->queue_len) {
+ while (neigh->arp_queue_len_bytes + skb->truesize >
+ neigh->parms->queue_len_bytes) {
struct sk_buff *buff;
+
buff = __skb_dequeue(&neigh->arp_queue);
+ if (!buff)
+ break;
+ neigh->arp_queue_len_bytes -= buff->truesize;
kfree_skb(buff);
NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
}
skb_dst_force(skb);
__skb_queue_tail(&neigh->arp_queue, skb);
+ neigh->arp_queue_len_bytes += skb->truesize;
}
rc = 1;
}
@@ -1175,6 +1183,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
write_lock_bh(&neigh->lock);
}
skb_queue_purge(&neigh->arp_queue);
+ neigh->arp_queue_len_bytes = 0;
}
out:
if (update_isrouter) {
@@ -1747,7 +1756,11 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
- NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
+ NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes);
+ /* approximative value for deprecated QUEUE_LEN (in packets) */
+ NLA_PUT_U32(skb, NDTPA_QUEUE_LEN,
+ DIV_ROUND_UP(parms->queue_len_bytes,
+ SKB_TRUESIZE(ETH_FRAME_LEN)));
NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
@@ -1974,7 +1987,11 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
switch (i) {
case NDTPA_QUEUE_LEN:
- p->queue_len = nla_get_u32(tbp[i]);
+ p->queue_len_bytes = nla_get_u32(tbp[i]) *
+ SKB_TRUESIZE(ETH_FRAME_LEN);
+ break;
+ case NDTPA_QUEUE_LENBYTES:
+ p->queue_len_bytes = nla_get_u32(tbp[i]);
break;
case NDTPA_PROXY_QLEN:
p->proxy_qlen = nla_get_u32(tbp[i]);
@@ -2638,117 +2655,158 @@ EXPORT_SYMBOL(neigh_app_ns);
#ifdef CONFIG_SYSCTL
-#define NEIGH_VARS_MAX 19
+static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int size, ret;
+ ctl_table tmp = *ctl;
+
+ tmp.data = &size;
+ size = DIV_ROUND_UP(*(int *)ctl->data, SKB_TRUESIZE(ETH_FRAME_LEN));
+ ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
+ if (write && !ret)
+ *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
+ return ret;
+}
+
+enum {
+ NEIGH_VAR_MCAST_PROBE,
+ NEIGH_VAR_UCAST_PROBE,
+ NEIGH_VAR_APP_PROBE,
+ NEIGH_VAR_RETRANS_TIME,
+ NEIGH_VAR_BASE_REACHABLE_TIME,
+ NEIGH_VAR_DELAY_PROBE_TIME,
+ NEIGH_VAR_GC_STALETIME,
+ NEIGH_VAR_QUEUE_LEN,
+ NEIGH_VAR_QUEUE_LEN_BYTES,
+ NEIGH_VAR_PROXY_QLEN,
+ NEIGH_VAR_ANYCAST_DELAY,
+ NEIGH_VAR_PROXY_DELAY,
+ NEIGH_VAR_LOCKTIME,
+ NEIGH_VAR_RETRANS_TIME_MS,
+ NEIGH_VAR_BASE_REACHABLE_TIME_MS,
+ NEIGH_VAR_GC_INTERVAL,
+ NEIGH_VAR_GC_THRESH1,
+ NEIGH_VAR_GC_THRESH2,
+ NEIGH_VAR_GC_THRESH3,
+ NEIGH_VAR_MAX
+};
static struct neigh_sysctl_table {
struct ctl_table_header *sysctl_header;
- struct ctl_table neigh_vars[NEIGH_VARS_MAX];
+ struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
char *dev_name;
} neigh_sysctl_template __read_mostly = {
.neigh_vars = {
- {
+ [NEIGH_VAR_MCAST_PROBE] = {
.procname = "mcast_solicit",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NEIGH_VAR_UCAST_PROBE] = {
.procname = "ucast_solicit",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NEIGH_VAR_APP_PROBE] = {
.procname = "app_solicit",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NEIGH_VAR_RETRANS_TIME] = {
.procname = "retrans_time",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_userhz_jiffies,
},
- {
+ [NEIGH_VAR_BASE_REACHABLE_TIME] = {
.procname = "base_reachable_time",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- {
+ [NEIGH_VAR_DELAY_PROBE_TIME] = {
.procname = "delay_first_probe_time",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- {
+ [NEIGH_VAR_GC_STALETIME] = {
.procname = "gc_stale_time",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- {
+ [NEIGH_VAR_QUEUE_LEN] = {
.procname = "unres_qlen",
.maxlen = sizeof(int),
.mode = 0644,
+ .proc_handler = proc_unres_qlen,
+ },
+ [NEIGH_VAR_QUEUE_LEN_BYTES] = {
+ .procname = "unres_qlen_bytes",
+ .maxlen = sizeof(int),
+ .mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NEIGH_VAR_PROXY_QLEN] = {
.procname = "proxy_qlen",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NEIGH_VAR_ANYCAST_DELAY] = {
.procname = "anycast_delay",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_userhz_jiffies,
},
- {
+ [NEIGH_VAR_PROXY_DELAY] = {
.procname = "proxy_delay",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_userhz_jiffies,
},
- {
+ [NEIGH_VAR_LOCKTIME] = {
.procname = "locktime",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_userhz_jiffies,
},
- {
+ [NEIGH_VAR_RETRANS_TIME_MS] = {
.procname = "retrans_time_ms",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
- {
+ [NEIGH_VAR_BASE_REACHABLE_TIME_MS] = {
.procname = "base_reachable_time_ms",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
- {
+ [NEIGH_VAR_GC_INTERVAL] = {
.procname = "gc_interval",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
- {
+ [NEIGH_VAR_GC_THRESH1] = {
.procname = "gc_thresh1",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NEIGH_VAR_GC_THRESH2] = {
.procname = "gc_thresh2",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NEIGH_VAR_GC_THRESH3] = {
.procname = "gc_thresh3",
.maxlen = sizeof(int),
.mode = 0644,
@@ -2781,47 +2839,49 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
if (!t)
goto err;
- t->neigh_vars[0].data = &p->mcast_probes;
- t->neigh_vars[1].data = &p->ucast_probes;
- t->neigh_vars[2].data = &p->app_probes;
- t->neigh_vars[3].data = &p->retrans_time;
- t->neigh_vars[4].data = &p->base_reachable_time;
- t->neigh_vars[5].data = &p->delay_probe_time;
- t->neigh_vars[6].data = &p->gc_staletime;
- t->neigh_vars[7].data = &p->queue_len;
- t->neigh_vars[8].data = &p->proxy_qlen;
- t->neigh_vars[9].data = &p->anycast_delay;
- t->neigh_vars[10].data = &p->proxy_delay;
- t->neigh_vars[11].data = &p->locktime;
- t->neigh_vars[12].data = &p->retrans_time;
- t->neigh_vars[13].data = &p->base_reachable_time;
+ t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data = &p->mcast_probes;
+ t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data = &p->ucast_probes;
+ t->neigh_vars[NEIGH_VAR_APP_PROBE].data = &p->app_probes;
+ t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data = &p->retrans_time;
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data = &p->base_reachable_time;
+ t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data = &p->delay_probe_time;
+ t->neigh_vars[NEIGH_VAR_GC_STALETIME].data = &p->gc_staletime;
+ t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data = &p->queue_len_bytes;
+ t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data = &p->queue_len_bytes;
+ t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data = &p->proxy_qlen;
+ t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data = &p->anycast_delay;
+ t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay;
+ t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime;
+ t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data = &p->retrans_time;
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data = &p->base_reachable_time;
if (dev) {
dev_name_source = dev->name;
/* Terminate the table early */
- memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
+ memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
+ sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
} else {
dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
- t->neigh_vars[14].data = (int *)(p + 1);
- t->neigh_vars[15].data = (int *)(p + 1) + 1;
- t->neigh_vars[16].data = (int *)(p + 1) + 2;
- t->neigh_vars[17].data = (int *)(p + 1) + 3;
+ t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
+ t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
+ t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
+ t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
}
if (handler) {
/* RetransTime */
- t->neigh_vars[3].proc_handler = handler;
- t->neigh_vars[3].extra1 = dev;
+ t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
+ t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev;
/* ReachableTime */
- t->neigh_vars[4].proc_handler = handler;
- t->neigh_vars[4].extra1 = dev;
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev;
/* RetransTime (in milliseconds)*/
- t->neigh_vars[12].proc_handler = handler;
- t->neigh_vars[12].extra1 = dev;
+ t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
+ t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev;
/* ReachableTime (in milliseconds) */
- t->neigh_vars[13].proc_handler = handler;
- t->neigh_vars[13].extra1 = dev;
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
}
t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index c71c434a4c0..db6c2f83633 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -606,9 +606,12 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
rcu_assign_pointer(queue->rps_map, map);
spin_unlock(&rps_map_lock);
- if (old_map)
+ if (map)
+ jump_label_inc(&rps_needed);
+ if (old_map) {
kfree_rcu(old_map, rcu);
-
+ jump_label_dec(&rps_needed);
+ }
free_cpumask_var(mask);
return len;
}
@@ -780,7 +783,7 @@ net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
#endif
}
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SYSFS
/*
* netdev_queue sysfs structures and functions.
*/
@@ -826,6 +829,23 @@ static const struct sysfs_ops netdev_queue_sysfs_ops = {
.store = netdev_queue_attr_store,
};
+static ssize_t show_trans_timeout(struct netdev_queue *queue,
+ struct netdev_queue_attribute *attribute,
+ char *buf)
+{
+ unsigned long trans_timeout;
+
+ spin_lock_irq(&queue->_xmit_lock);
+ trans_timeout = queue->trans_timeout;
+ spin_unlock_irq(&queue->_xmit_lock);
+
+ return sprintf(buf, "%lu", trans_timeout);
+}
+
+static struct netdev_queue_attribute queue_trans_timeout =
+ __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
+
+#ifdef CONFIG_XPS
static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
{
struct net_device *dev = queue->dev;
@@ -901,7 +921,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
struct xps_map *map, *new_map;
struct xps_dev_maps *dev_maps, *new_dev_maps;
int nonempty = 0;
- int numa_node = -2;
+ int numa_node_id = -2;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -944,10 +964,10 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
#ifdef CONFIG_NUMA
if (need_set) {
- if (numa_node == -2)
- numa_node = cpu_to_node(cpu);
- else if (numa_node != cpu_to_node(cpu))
- numa_node = -1;
+ if (numa_node_id == -2)
+ numa_node_id = cpu_to_node(cpu);
+ else if (numa_node_id != cpu_to_node(cpu))
+ numa_node_id = -1;
}
#endif
if (need_set && pos >= map_len) {
@@ -997,7 +1017,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
if (dev_maps)
kfree_rcu(dev_maps, rcu);
- netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node :
+ netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id :
NUMA_NO_NODE);
mutex_unlock(&xps_map_mutex);
@@ -1020,12 +1040,17 @@ error:
static struct netdev_queue_attribute xps_cpus_attribute =
__ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
+#endif /* CONFIG_XPS */
static struct attribute *netdev_queue_default_attrs[] = {
+ &queue_trans_timeout.attr,
+#ifdef CONFIG_XPS
&xps_cpus_attribute.attr,
+#endif
NULL
};
+#ifdef CONFIG_XPS
static void netdev_queue_release(struct kobject *kobj)
{
struct netdev_queue *queue = to_netdev_queue(kobj);
@@ -1076,10 +1101,13 @@ static void netdev_queue_release(struct kobject *kobj)
memset(kobj, 0, sizeof(*kobj));
dev_put(queue->dev);
}
+#endif /* CONFIG_XPS */
static struct kobj_type netdev_queue_ktype = {
.sysfs_ops = &netdev_queue_sysfs_ops,
+#ifdef CONFIG_XPS
.release = netdev_queue_release,
+#endif
.default_attrs = netdev_queue_default_attrs,
};
@@ -1102,12 +1130,12 @@ static int netdev_queue_add_kobject(struct net_device *net, int index)
return error;
}
-#endif /* CONFIG_XPS */
+#endif /* CONFIG_SYSFS */
int
netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
{
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SYSFS
int i;
int error = 0;
@@ -1125,14 +1153,14 @@ netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
return error;
#else
return 0;
-#endif
+#endif /* CONFIG_SYSFS */
}
static int register_queue_kobjects(struct net_device *net)
{
int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
net->queues_kset = kset_create_and_add("queues",
NULL, &net->dev.kobj);
if (!net->queues_kset)
@@ -1173,7 +1201,7 @@ static void remove_queue_kobjects(struct net_device *net)
net_rx_queue_update_kobjects(net, real_rx, 0);
netdev_queue_update_kobjects(net, real_tx, 0);
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
kset_unregister(net->queues_kset);
#endif
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index cf64c1ffa4c..1a7d8e2c976 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -422,6 +422,7 @@ static void arp_reply(struct sk_buff *skb)
struct sk_buff *send_skb;
struct netpoll *np, *tmp;
unsigned long flags;
+ int hlen, tlen;
int hits = 0;
if (list_empty(&npinfo->rx_np))
@@ -479,8 +480,9 @@ static void arp_reply(struct sk_buff *skb)
if (tip != np->local_ip)
continue;
- send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
- LL_RESERVED_SPACE(np->dev));
+ hlen = LL_RESERVED_SPACE(np->dev);
+ tlen = np->dev->needed_tailroom;
+ send_skb = find_skb(np, size + hlen + tlen, hlen);
if (!send_skb)
continue;
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
new file mode 100644
index 00000000000..3a9fd4826b7
--- /dev/null
+++ b/net/core/netprio_cgroup.c
@@ -0,0 +1,344 @@
+/*
+ * net/core/netprio_cgroup.c Priority Control Group
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Neil Horman <nhorman@tuxdriver.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/cgroup.h>
+#include <linux/rcupdate.h>
+#include <linux/atomic.h>
+#include <net/rtnetlink.h>
+#include <net/pkt_cls.h>
+#include <net/sock.h>
+#include <net/netprio_cgroup.h>
+
+static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
+ struct cgroup *cgrp);
+static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
+static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
+
+struct cgroup_subsys net_prio_subsys = {
+ .name = "net_prio",
+ .create = cgrp_create,
+ .destroy = cgrp_destroy,
+ .populate = cgrp_populate,
+#ifdef CONFIG_NETPRIO_CGROUP
+ .subsys_id = net_prio_subsys_id,
+#endif
+ .module = THIS_MODULE
+};
+
+#define PRIOIDX_SZ 128
+
+static unsigned long prioidx_map[PRIOIDX_SZ];
+static DEFINE_SPINLOCK(prioidx_map_lock);
+static atomic_t max_prioidx = ATOMIC_INIT(0);
+
+static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp)
+{
+ return container_of(cgroup_subsys_state(cgrp, net_prio_subsys_id),
+ struct cgroup_netprio_state, css);
+}
+
+static int get_prioidx(u32 *prio)
+{
+ unsigned long flags;
+ u32 prioidx;
+
+ spin_lock_irqsave(&prioidx_map_lock, flags);
+ prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
+ set_bit(prioidx, prioidx_map);
+ spin_unlock_irqrestore(&prioidx_map_lock, flags);
+ if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ)
+ return -ENOSPC;
+
+ atomic_set(&max_prioidx, prioidx);
+ *prio = prioidx;
+ return 0;
+}
+
+static void put_prioidx(u32 idx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&prioidx_map_lock, flags);
+ clear_bit(idx, prioidx_map);
+ spin_unlock_irqrestore(&prioidx_map_lock, flags);
+}
+
+static void extend_netdev_table(struct net_device *dev, u32 new_len)
+{
+ size_t new_size = sizeof(struct netprio_map) +
+ ((sizeof(u32) * new_len));
+ struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL);
+ struct netprio_map *old_priomap;
+ int i;
+
+ old_priomap = rtnl_dereference(dev->priomap);
+
+ if (!new_priomap) {
+ printk(KERN_WARNING "Unable to alloc new priomap!\n");
+ return;
+ }
+
+ for (i = 0;
+ old_priomap && (i < old_priomap->priomap_len);
+ i++)
+ new_priomap->priomap[i] = old_priomap->priomap[i];
+
+ new_priomap->priomap_len = new_len;
+
+ rcu_assign_pointer(dev->priomap, new_priomap);
+ if (old_priomap)
+ kfree_rcu(old_priomap, rcu);
+}
+
+static void update_netdev_tables(void)
+{
+ struct net_device *dev;
+ u32 max_len = atomic_read(&max_prioidx);
+ struct netprio_map *map;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, dev) {
+ map = rtnl_dereference(dev->priomap);
+ if ((!map) ||
+ (map->priomap_len < max_len))
+ extend_netdev_table(dev, max_len);
+ }
+ rtnl_unlock();
+}
+
+static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
+ struct cgroup *cgrp)
+{
+ struct cgroup_netprio_state *cs;
+ int ret;
+
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return ERR_PTR(-ENOMEM);
+
+ if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) {
+ kfree(cs);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = get_prioidx(&cs->prioidx);
+ if (ret != 0) {
+ printk(KERN_WARNING "No space in priority index array\n");
+ kfree(cs);
+ return ERR_PTR(ret);
+ }
+
+ return &cs->css;
+}
+
+static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
+{
+ struct cgroup_netprio_state *cs;
+ struct net_device *dev;
+ struct netprio_map *map;
+
+ cs = cgrp_netprio_state(cgrp);
+ rtnl_lock();
+ for_each_netdev(&init_net, dev) {
+ map = rtnl_dereference(dev->priomap);
+ if (map)
+ map->priomap[cs->prioidx] = 0;
+ }
+ rtnl_unlock();
+ put_prioidx(cs->prioidx);
+ kfree(cs);
+}
+
+static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft)
+{
+ return (u64)cgrp_netprio_state(cgrp)->prioidx;
+}
+
+static int read_priomap(struct cgroup *cont, struct cftype *cft,
+ struct cgroup_map_cb *cb)
+{
+ struct net_device *dev;
+ u32 prioidx = cgrp_netprio_state(cont)->prioidx;
+ u32 priority;
+ struct netprio_map *map;
+
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
+ map = rcu_dereference(dev->priomap);
+ priority = map ? map->priomap[prioidx] : 0;
+ cb->fill(cb, dev->name, priority);
+ }
+ rcu_read_unlock();
+ return 0;
+}
+
+static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
+ const char *buffer)
+{
+ char *devname = kstrdup(buffer, GFP_KERNEL);
+ int ret = -EINVAL;
+ u32 prioidx = cgrp_netprio_state(cgrp)->prioidx;
+ unsigned long priority;
+ char *priostr;
+ struct net_device *dev;
+ struct netprio_map *map;
+
+ if (!devname)
+ return -ENOMEM;
+
+ /*
+ * Minimally sized valid priomap string
+ */
+ if (strlen(devname) < 3)
+ goto out_free_devname;
+
+ priostr = strstr(devname, " ");
+ if (!priostr)
+ goto out_free_devname;
+
+ /*
+ *Separate the devname from the associated priority
+ *and advance the priostr poitner to the priority value
+ */
+ *priostr = '\0';
+ priostr++;
+
+ /*
+ * If the priostr points to NULL, we're at the end of the passed
+ * in string, and its not a valid write
+ */
+ if (*priostr == '\0')
+ goto out_free_devname;
+
+ ret = kstrtoul(priostr, 10, &priority);
+ if (ret < 0)
+ goto out_free_devname;
+
+ ret = -ENODEV;
+
+ dev = dev_get_by_name(&init_net, devname);
+ if (!dev)
+ goto out_free_devname;
+
+ update_netdev_tables();
+ ret = 0;
+ rcu_read_lock();
+ map = rcu_dereference(dev->priomap);
+ if (map)
+ map->priomap[prioidx] = priority;
+ rcu_read_unlock();
+ dev_put(dev);
+
+out_free_devname:
+ kfree(devname);
+ return ret;
+}
+
+static struct cftype ss_files[] = {
+ {
+ .name = "prioidx",
+ .read_u64 = read_prioidx,
+ },
+ {
+ .name = "ifpriomap",
+ .read_map = read_priomap,
+ .write_string = write_priomap,
+ },
+};
+
+static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
+{
+ return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
+}
+
+static int netprio_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct netprio_map *old;
+ u32 max_len = atomic_read(&max_prioidx);
+
+ /*
+ * Note this is called with rtnl_lock held so we have update side
+ * protection on our rcu assignments
+ */
+
+ switch (event) {
+
+ case NETDEV_REGISTER:
+ if (max_len)
+ extend_netdev_table(dev, max_len);
+ break;
+ case NETDEV_UNREGISTER:
+ old = rtnl_dereference(dev->priomap);
+ RCU_INIT_POINTER(dev->priomap, NULL);
+ if (old)
+ kfree_rcu(old, rcu);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block netprio_device_notifier = {
+ .notifier_call = netprio_device_event
+};
+
+static int __init init_cgroup_netprio(void)
+{
+ int ret;
+
+ ret = cgroup_load_subsys(&net_prio_subsys);
+ if (ret)
+ goto out;
+#ifndef CONFIG_NETPRIO_CGROUP
+ smp_wmb();
+ net_prio_subsys_id = net_prio_subsys.subsys_id;
+#endif
+
+ register_netdevice_notifier(&netprio_device_notifier);
+
+out:
+ return ret;
+}
+
+static void __exit exit_cgroup_netprio(void)
+{
+ struct netprio_map *old;
+ struct net_device *dev;
+
+ unregister_netdevice_notifier(&netprio_device_notifier);
+
+ cgroup_unload_subsys(&net_prio_subsys);
+
+#ifndef CONFIG_NETPRIO_CGROUP
+ net_prio_subsys_id = -1;
+ synchronize_rcu();
+#endif
+
+ rtnl_lock();
+ for_each_netdev(&init_net, dev) {
+ old = rtnl_dereference(dev->priomap);
+ RCU_INIT_POINTER(dev->priomap, NULL);
+ if (old)
+ kfree_rcu(old, rcu);
+ }
+ rtnl_unlock();
+}
+
+module_init(init_cgroup_netprio);
+module_exit(exit_cgroup_netprio);
+MODULE_LICENSE("GPL v2");
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 0001c243b35..aa53a35a631 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1304,7 +1304,7 @@ static ssize_t pktgen_if_write(struct file *file,
scan_ip6(buf, pkt_dev->in6_daddr.s6_addr);
snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr);
- ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr);
+ pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr;
if (debug)
printk(KERN_DEBUG "pktgen: dst6 set to: %s\n", buf);
@@ -1327,8 +1327,7 @@ static ssize_t pktgen_if_write(struct file *file,
scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr);
snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr);
- ipv6_addr_copy(&pkt_dev->cur_in6_daddr,
- &pkt_dev->min_in6_daddr);
+ pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr;
if (debug)
printk(KERN_DEBUG "pktgen: dst6_min set to: %s\n", buf);
@@ -1371,7 +1370,7 @@ static ssize_t pktgen_if_write(struct file *file,
scan_ip6(buf, pkt_dev->in6_saddr.s6_addr);
snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr);
- ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr);
+ pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr;
if (debug)
printk(KERN_DEBUG "pktgen: src6 set to: %s\n", buf);
@@ -2079,9 +2078,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
ifp = ifp->if_next) {
if (ifp->scope == IFA_LINK &&
!(ifp->flags & IFA_F_TENTATIVE)) {
- ipv6_addr_copy(&pkt_dev->
- cur_in6_saddr,
- &ifp->addr);
+ pkt_dev->cur_in6_saddr = ifp->addr;
err = 0;
break;
}
@@ -2958,8 +2955,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
iph->payload_len = htons(sizeof(struct udphdr) + datalen);
iph->nexthdr = IPPROTO_UDP;
- ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr);
- ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr);
+ iph->daddr = pkt_dev->cur_in6_daddr;
+ iph->saddr = pkt_dev->cur_in6_saddr;
skb->mac_header = (skb->network_header - ETH_HLEN -
pkt_dev->pkt_overhead);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3c30ee4a571..678ae4e783a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -245,6 +245,55 @@ nodata:
EXPORT_SYMBOL(__alloc_skb);
/**
+ * build_skb - build a network buffer
+ * @data: data buffer provided by caller
+ *
+ * Allocate a new &sk_buff. Caller provides space holding head and
+ * skb_shared_info. @data must have been allocated by kmalloc()
+ * The return is the new skb buffer.
+ * On a failure the return is %NULL, and @data is not freed.
+ * Notes :
+ * Before IO, driver allocates only data buffer where NIC put incoming frame
+ * Driver should add room at head (NET_SKB_PAD) and
+ * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
+ * After IO, driver calls build_skb(), to allocate sk_buff and populate it
+ * before giving packet to stack.
+ * RX rings only contains data buffers, not full skbs.
+ */
+struct sk_buff *build_skb(void *data)
+{
+ struct skb_shared_info *shinfo;
+ struct sk_buff *skb;
+ unsigned int size;
+
+ skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb->truesize = SKB_TRUESIZE(size);
+ atomic_set(&skb->users, 1);
+ skb->head = data;
+ skb->data = data;
+ skb_reset_tail_pointer(skb);
+ skb->end = skb->tail + size;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb->mac_header = ~0U;
+#endif
+
+ /* make sure we initialize shinfo sequentially */
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+ kmemcheck_annotate_variable(shinfo->destructor_arg);
+
+ return skb;
+}
+EXPORT_SYMBOL(build_skb);
+
+/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
* @length: length to allocate
@@ -2621,7 +2670,7 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
* a pointer to the first in a list of new skbs for the segments.
* In case of error it returns ERR_PTR(err).
*/
-struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
{
struct sk_buff *segs = NULL;
struct sk_buff *tail = NULL;
@@ -3169,6 +3218,26 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
}
EXPORT_SYMBOL_GPL(skb_tstamp_tx);
+void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
+{
+ struct sock *sk = skb->sk;
+ struct sock_exterr_skb *serr;
+ int err;
+
+ skb->wifi_acked_valid = 1;
+ skb->wifi_acked = acked;
+
+ serr = SKB_EXT_ERR(skb);
+ memset(serr, 0, sizeof(*serr));
+ serr->ee.ee_errno = ENOMSG;
+ serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
+
+ err = sock_queue_err_skb(sk, skb);
+ if (err)
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
+
/**
* skb_partial_csum_set - set up and verify partial csum values for packet
diff --git a/net/core/sock.c b/net/core/sock.c
index 4ed7b1d12f5..16069139797 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -125,6 +125,7 @@
#include <net/xfrm.h>
#include <linux/ipsec.h>
#include <net/cls_cgroup.h>
+#include <net/netprio_cgroup.h>
#include <linux/filter.h>
@@ -221,10 +222,16 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
EXPORT_SYMBOL(sysctl_optmem_max);
-#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
+#if defined(CONFIG_CGROUPS)
+#if !defined(CONFIG_NET_CLS_CGROUP)
int net_cls_subsys_id = -1;
EXPORT_SYMBOL_GPL(net_cls_subsys_id);
#endif
+#if !defined(CONFIG_NETPRIO_CGROUP)
+int net_prio_subsys_id = -1;
+EXPORT_SYMBOL_GPL(net_prio_subsys_id);
+#endif
+#endif
static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
{
@@ -740,6 +747,11 @@ set_rcvbuf:
case SO_RXQ_OVFL:
sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
break;
+
+ case SO_WIFI_STATUS:
+ sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
+ break;
+
default:
ret = -ENOPROTOOPT;
break;
@@ -961,6 +973,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
break;
+ case SO_WIFI_STATUS:
+ v.val = !!sock_flag(sk, SOCK_WIFI_STATUS);
+ break;
+
default:
return -ENOPROTOOPT;
}
@@ -1111,6 +1127,18 @@ void sock_update_classid(struct sock *sk)
sk->sk_classid = classid;
}
EXPORT_SYMBOL(sock_update_classid);
+
+void sock_update_netprioidx(struct sock *sk)
+{
+ struct cgroup_netprio_state *state;
+ if (in_interrupt())
+ return;
+ rcu_read_lock();
+ state = task_netprio_state(current);
+ sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(sock_update_netprioidx);
#endif
/**
@@ -1138,6 +1166,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
atomic_set(&sk->sk_wmem_alloc, 1);
sock_update_classid(sk);
+ sock_update_netprioidx(sk);
}
return sk;
@@ -1204,7 +1233,14 @@ void sk_release_kernel(struct sock *sk)
}
EXPORT_SYMBOL(sk_release_kernel);
-struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
+/**
+ * sk_clone_lock - clone a socket, and lock its clone
+ * @sk: the socket to clone
+ * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ *
+ * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
+ */
+struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
{
struct sock *newsk;
@@ -1297,7 +1333,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
out:
return newsk;
}
-EXPORT_SYMBOL_GPL(sk_clone);
+EXPORT_SYMBOL_GPL(sk_clone_lock);
void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 77a65f03148..d05559d4d9c 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -68,8 +68,13 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
if (sock_table != orig_sock_table) {
rcu_assign_pointer(rps_sock_flow_table, sock_table);
- synchronize_rcu();
- vfree(orig_sock_table);
+ if (sock_table)
+ jump_label_inc(&rps_needed);
+ if (orig_sock_table) {
+ jump_label_dec(&rps_needed);
+ synchronize_rcu();
+ vfree(orig_sock_table);
+ }
}
}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 17ee85ce148..ce903f747e6 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -150,8 +150,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
*/
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
- ipv6_addr_copy(&fl6.saddr, &np->saddr);
+ fl6.daddr = np->daddr;
+ fl6.saddr = np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = inet->inet_dport;
fl6.fl6_sport = inet->inet_sport;
@@ -244,8 +244,8 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
- ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
- ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+ fl6.daddr = ireq6->rmt_addr;
+ fl6.saddr = ireq6->loc_addr;
fl6.flowlabel = 0;
fl6.flowi6_oif = ireq6->iif;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
@@ -270,7 +270,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
dh->dccph_checksum = dccp_v6_csum_finish(skb,
&ireq6->loc_addr,
&ireq6->rmt_addr);
- ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+ fl6.daddr = ireq6->rmt_addr;
err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
err = net_xmit_eval(err);
}
@@ -313,8 +313,8 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
&rxip6h->daddr);
memset(&fl6, 0, sizeof(fl6));
- ipv6_addr_copy(&fl6.daddr, &rxip6h->saddr);
- ipv6_addr_copy(&fl6.saddr, &rxip6h->daddr);
+ fl6.daddr = rxip6h->saddr;
+ fl6.saddr = rxip6h->daddr;
fl6.flowi6_proto = IPPROTO_DCCP;
fl6.flowi6_oif = inet6_iif(rxskb);
@@ -419,8 +419,8 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free;
ireq6 = inet6_rsk(req);
- ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
- ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
+ ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
+ ireq6->loc_addr = ipv6_hdr(skb)->daddr;
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
@@ -491,7 +491,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
- ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
+ newnp->rcv_saddr = newnp->saddr;
inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
@@ -526,9 +526,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
- ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+ fl6.daddr = ireq6->rmt_addr;
final_p = fl6_update_dst(&fl6, opt, &final);
- ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+ fl6.saddr = ireq6->loc_addr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
fl6.fl6_sport = inet_rsk(req)->loc_port;
@@ -559,9 +559,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
- ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
- ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
- ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
+ newnp->daddr = ireq6->rmt_addr;
+ newnp->saddr = ireq6->loc_addr;
+ newnp->rcv_saddr = ireq6->loc_addr;
newsk->sk_bound_dev_if = ireq6->iif;
/* Now IPv6 options...
@@ -877,7 +877,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+ usin->sin6_addr = flowlabel->dst;
fl6_sock_release(flowlabel);
}
}
@@ -910,7 +910,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
return -EINVAL;
}
- ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
+ np->daddr = usin->sin6_addr;
np->flow_label = fl6.flowlabel;
/*
@@ -949,8 +949,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
saddr = &np->rcv_saddr;
fl6.flowi6_proto = IPPROTO_DCCP;
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
- ipv6_addr_copy(&fl6.saddr, saddr ? saddr : &np->saddr);
+ fl6.daddr = np->daddr;
+ fl6.saddr = saddr ? *saddr : np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
@@ -966,11 +966,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (saddr == NULL) {
saddr = &fl6.saddr;
- ipv6_addr_copy(&np->rcv_saddr, saddr);
+ np->rcv_saddr = *saddr;
}
/* set the source address */
- ipv6_addr_copy(&np->saddr, saddr);
+ np->saddr = *saddr;
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
__ip6_dst_store(sk, dst, NULL, NULL);
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index d7041a0963a..b50d5fd3d69 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -60,8 +60,8 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
tw6 = inet6_twsk((struct sock *)tw);
- ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
- ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
+ tw6->tw_v6_daddr = np->daddr;
+ tw6->tw_v6_rcv_saddr = np->rcv_saddr;
tw->tw_ipv6only = np->ipv6only;
}
#endif
@@ -100,7 +100,7 @@ struct sock *dccp_create_openreq_child(struct sock *sk,
* (* Generate a new socket and switch to that socket *)
* Set S := new socket for this port pair
*/
- struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
+ struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
if (newsk != NULL) {
struct dccp_request_sock *dreq = dccp_rsk(req);
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 7f0eb087dc1..3532ac64c82 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -107,7 +107,7 @@ struct neigh_table dn_neigh_table = {
.gc_staletime = 60 * HZ,
.reachable_time = 30 * HZ,
.delay_probe_time = 5 * HZ,
- .queue_len = 3,
+ .queue_len_bytes = 64*1024,
.ucast_probes = 0,
.app_probes = 0,
.mcast_probes = 0,
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 1c1f26c5d67..7e717cb35ad 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -322,6 +322,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Real hardware Econet. We're not worthy etc. */
#ifdef CONFIG_ECONET_NATIVE
unsigned short proto = 0;
+ int hlen, tlen;
int res;
if (len + 15 > dev->mtu) {
@@ -331,12 +332,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
dev_hold(dev);
- skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev),
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
+ skb = sock_alloc_send_skb(sk, len + hlen + tlen,
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out_unlock;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
eb = (struct ec_cb *)&skb->cb;
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 19d6aefe97d..e4ecc1eef98 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -50,8 +50,6 @@
* SUCH DAMAGE.
*/
-#define DEBUG
-
#include <linux/bitops.h>
#include <linux/if_arp.h>
#include <linux/module.h>
@@ -113,6 +111,20 @@ struct lowpan_dev_record {
struct list_head list;
};
+struct lowpan_fragment {
+ struct sk_buff *skb; /* skb to be assembled */
+ spinlock_t lock; /* concurency lock */
+ u16 length; /* length to be assemled */
+ u32 bytes_rcv; /* bytes received */
+ u16 tag; /* current fragment tag */
+ struct timer_list timer; /* assembling timer */
+ struct list_head list; /* fragments list */
+};
+
+static unsigned short fragment_tag;
+static LIST_HEAD(lowpan_fragments);
+spinlock_t flist_lock;
+
static inline struct
lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
{
@@ -234,6 +246,50 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
return 0;
}
+static void
+lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_hdr(skb);
+
+ pr_debug("(%s): UDP header compression\n", __func__);
+
+ if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) ==
+ LOWPAN_NHC_UDP_4BIT_PORT) &&
+ ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) ==
+ LOWPAN_NHC_UDP_4BIT_PORT)) {
+ pr_debug("(%s): both ports compression to 4 bits\n", __func__);
+ **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11;
+ **(hc06_ptr + 1) = /* subtraction is faster */
+ (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) +
+ ((uh->source & LOWPAN_NHC_UDP_4BIT_PORT) << 4));
+ *hc06_ptr += 2;
+ } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) ==
+ LOWPAN_NHC_UDP_8BIT_PORT) {
+ pr_debug("(%s): remove 8 bits of dest\n", __func__);
+ **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01;
+ memcpy(*hc06_ptr + 1, &uh->source, 2);
+ **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT);
+ *hc06_ptr += 4;
+ } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) ==
+ LOWPAN_NHC_UDP_8BIT_PORT) {
+ pr_debug("(%s): remove 8 bits of source\n", __func__);
+ **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10;
+ memcpy(*hc06_ptr + 1, &uh->dest, 2);
+ **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT);
+ *hc06_ptr += 4;
+ } else {
+ pr_debug("(%s): can't compress header\n", __func__);
+ **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00;
+ memcpy(*hc06_ptr + 1, &uh->source, 2);
+ memcpy(*hc06_ptr + 3, &uh->dest, 2);
+ *hc06_ptr += 5;
+ }
+
+ /* checksum is always inline */
+ memcpy(*hc06_ptr, &uh->check, 2);
+ *hc06_ptr += 2;
+}
+
static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
{
u8 ret;
@@ -244,6 +300,73 @@ static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
return ret;
}
+static u16 lowpan_fetch_skb_u16(struct sk_buff *skb)
+{
+ u16 ret;
+
+ BUG_ON(!pskb_may_pull(skb, 2));
+
+ ret = skb->data[0] | (skb->data[1] << 8);
+ skb_pull(skb, 2);
+ return ret;
+}
+
+static int
+lowpan_uncompress_udp_header(struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_hdr(skb);
+ u8 tmp;
+
+ tmp = lowpan_fetch_skb_u8(skb);
+
+ if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
+ pr_debug("(%s): UDP header uncompression\n", __func__);
+ switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
+ case LOWPAN_NHC_UDP_CS_P_00:
+ memcpy(&uh->source, &skb->data[0], 2);
+ memcpy(&uh->dest, &skb->data[2], 2);
+ skb_pull(skb, 4);
+ break;
+ case LOWPAN_NHC_UDP_CS_P_01:
+ memcpy(&uh->source, &skb->data[0], 2);
+ uh->dest =
+ skb->data[2] + LOWPAN_NHC_UDP_8BIT_PORT;
+ skb_pull(skb, 3);
+ break;
+ case LOWPAN_NHC_UDP_CS_P_10:
+ uh->source = skb->data[0] + LOWPAN_NHC_UDP_8BIT_PORT;
+ memcpy(&uh->dest, &skb->data[1], 2);
+ skb_pull(skb, 3);
+ break;
+ case LOWPAN_NHC_UDP_CS_P_11:
+ uh->source =
+ LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] >> 4);
+ uh->dest =
+ LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] & 0x0f);
+ skb_pull(skb, 1);
+ break;
+ default:
+ pr_debug("(%s) ERROR: unknown UDP format\n", __func__);
+ goto err;
+ break;
+ }
+
+ pr_debug("(%s): uncompressed UDP ports: src = %d, dst = %d\n",
+ __func__, uh->source, uh->dest);
+
+ /* copy checksum */
+ memcpy(&uh->check, &skb->data[0], 2);
+ skb_pull(skb, 2);
+ } else {
+ pr_debug("(%s): ERROR: unsupported NH format\n", __func__);
+ goto err;
+ }
+
+ return 0;
+err:
+ return -EINVAL;
+}
+
static int lowpan_header_create(struct sk_buff *skb,
struct net_device *dev,
unsigned short type, const void *_daddr,
@@ -342,8 +465,6 @@ static int lowpan_header_create(struct sk_buff *skb,
if (hdr->nexthdr == UIP_PROTO_UDP)
iphc0 |= LOWPAN_IPHC_NH_C;
-/* TODO: next header compression */
-
if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
*hc06_ptr = hdr->nexthdr;
hc06_ptr += 1;
@@ -431,8 +552,9 @@ static int lowpan_header_create(struct sk_buff *skb,
}
}
- /* TODO: UDP header compression */
- /* TODO: Next Header compression */
+ /* UDP header compression */
+ if (hdr->nexthdr == UIP_PROTO_UDP)
+ lowpan_compress_udp_header(&hc06_ptr, skb);
head[0] = iphc0;
head[1] = iphc1;
@@ -467,6 +589,7 @@ static int lowpan_header_create(struct sk_buff *skb,
memcpy(&(sa.hwaddr), saddr, 8);
mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
+
return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
type, (void *)&da, (void *)&sa, skb->len);
}
@@ -511,6 +634,21 @@ static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
return stat;
}
+static void lowpan_fragment_timer_expired(unsigned long entry_addr)
+{
+ struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
+
+ pr_debug("%s: timer expired for frame with tag %d\n", __func__,
+ entry->tag);
+
+ spin_lock(&flist_lock);
+ list_del(&entry->list);
+ spin_unlock(&flist_lock);
+
+ dev_kfree_skb(entry->skb);
+ kfree(entry);
+}
+
static int
lowpan_process_data(struct sk_buff *skb)
{
@@ -525,6 +663,107 @@ lowpan_process_data(struct sk_buff *skb)
if (skb->len < 2)
goto drop;
iphc0 = lowpan_fetch_skb_u8(skb);
+
+ /* fragments assembling */
+ switch (iphc0 & LOWPAN_DISPATCH_MASK) {
+ case LOWPAN_DISPATCH_FRAG1:
+ case LOWPAN_DISPATCH_FRAGN:
+ {
+ struct lowpan_fragment *frame;
+ u8 len, offset;
+ u16 tag;
+ bool found = false;
+
+ len = lowpan_fetch_skb_u8(skb); /* frame length */
+ tag = lowpan_fetch_skb_u16(skb);
+
+ /*
+ * check if frame assembling with the same tag is
+ * already in progress
+ */
+ spin_lock(&flist_lock);
+
+ list_for_each_entry(frame, &lowpan_fragments, list)
+ if (frame->tag == tag) {
+ found = true;
+ break;
+ }
+
+ /* alloc new frame structure */
+ if (!found) {
+ frame = kzalloc(sizeof(struct lowpan_fragment),
+ GFP_ATOMIC);
+ if (!frame)
+ goto unlock_and_drop;
+
+ INIT_LIST_HEAD(&frame->list);
+
+ frame->length = (iphc0 & 7) | (len << 3);
+ frame->tag = tag;
+
+ /* allocate buffer for frame assembling */
+ frame->skb = alloc_skb(frame->length +
+ sizeof(struct ipv6hdr), GFP_ATOMIC);
+
+ if (!frame->skb) {
+ kfree(frame);
+ goto unlock_and_drop;
+ }
+
+ frame->skb->priority = skb->priority;
+ frame->skb->dev = skb->dev;
+
+ /* reserve headroom for uncompressed ipv6 header */
+ skb_reserve(frame->skb, sizeof(struct ipv6hdr));
+ skb_put(frame->skb, frame->length);
+
+ init_timer(&frame->timer);
+ /* time out is the same as for ipv6 - 60 sec */
+ frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
+ frame->timer.data = (unsigned long)frame;
+ frame->timer.function = lowpan_fragment_timer_expired;
+
+ add_timer(&frame->timer);
+
+ list_add_tail(&frame->list, &lowpan_fragments);
+ }
+
+ if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
+ goto unlock_and_drop;
+
+ offset = lowpan_fetch_skb_u8(skb); /* fetch offset */
+
+ /* if payload fits buffer, copy it */
+ if (likely((offset * 8 + skb->len) <= frame->length))
+ skb_copy_to_linear_data_offset(frame->skb, offset * 8,
+ skb->data, skb->len);
+ else
+ goto unlock_and_drop;
+
+ frame->bytes_rcv += skb->len;
+
+ /* frame assembling complete */
+ if ((frame->bytes_rcv == frame->length) &&
+ frame->timer.expires > jiffies) {
+ /* if timer haven't expired - first of all delete it */
+ del_timer(&frame->timer);
+ list_del(&frame->list);
+ spin_unlock(&flist_lock);
+
+ dev_kfree_skb(skb);
+ skb = frame->skb;
+ kfree(frame);
+ iphc0 = lowpan_fetch_skb_u8(skb);
+ break;
+ }
+ spin_unlock(&flist_lock);
+
+ return kfree_skb(skb), 0;
+ }
+ default:
+ break;
+ }
+
iphc1 = lowpan_fetch_skb_u8(skb);
_saddr = mac_cb(skb)->sa.hwaddr;
@@ -659,7 +898,10 @@ lowpan_process_data(struct sk_buff *skb)
goto drop;
}
- /* TODO: UDP header parse */
+ /* UDP data uncompression */
+ if (iphc0 & LOWPAN_IPHC_NH_C)
+ if (lowpan_uncompress_udp_header(skb))
+ goto drop;
/* Not fragmented package */
hdr.payload_len = htons(skb->len);
@@ -674,6 +916,9 @@ lowpan_process_data(struct sk_buff *skb)
lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
sizeof(hdr));
return lowpan_skb_deliver(skb, &hdr);
+
+unlock_and_drop:
+ spin_unlock(&flist_lock);
drop:
kfree_skb(skb);
return -EINVAL;
@@ -692,18 +937,115 @@ static int lowpan_set_address(struct net_device *dev, void *p)
return 0;
}
+static int lowpan_get_mac_header_length(struct sk_buff *skb)
+{
+ /*
+ * Currently long addressing mode is supported only, so the overall
+ * header size is 21:
+ * FC SeqNum DPAN DA SA Sec
+ * 2 + 1 + 2 + 8 + 8 + 0 = 21
+ */
+ return 21;
+}
+
+static int
+lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
+ int mlen, int plen, int offset)
+{
+ struct sk_buff *frag;
+ int hlen, ret;
+
+ /* if payload length is zero, therefore it's a first fragment */
+ hlen = (plen == 0 ? LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE);
+
+ lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
+
+ frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE);
+ if (!frag)
+ return -ENOMEM;
+
+ frag->priority = skb->priority;
+ frag->dev = skb->dev;
+
+ /* copy header, MFR and payload */
+ memcpy(skb_put(frag, mlen), skb->data, mlen);
+ memcpy(skb_put(frag, hlen), head, hlen);
+
+ if (plen)
+ skb_copy_from_linear_data_offset(skb, offset + mlen,
+ skb_put(frag, plen), plen);
+
+ lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data,
+ frag->len);
+
+ ret = dev_queue_xmit(frag);
+
+ return ret;
+}
+
+static int
+lowpan_skb_fragmentation(struct sk_buff *skb)
+{
+ int err, header_length, payload_length, tag, offset = 0;
+ u8 head[5];
+
+ header_length = lowpan_get_mac_header_length(skb);
+ payload_length = skb->len - header_length;
+ tag = fragment_tag++;
+
+ /* first fragment header */
+ head[0] = LOWPAN_DISPATCH_FRAG1 | (payload_length & 0x7);
+ head[1] = (payload_length >> 3) & 0xff;
+ head[2] = tag & 0xff;
+ head[3] = tag >> 8;
+
+ err = lowpan_fragment_xmit(skb, head, header_length, 0, 0);
+
+ /* next fragment header */
+ head[0] &= ~LOWPAN_DISPATCH_FRAG1;
+ head[0] |= LOWPAN_DISPATCH_FRAGN;
+
+ while ((payload_length - offset > 0) && (err >= 0)) {
+ int len = LOWPAN_FRAG_SIZE;
+
+ head[4] = offset / 8;
+
+ if (payload_length - offset < len)
+ len = payload_length - offset;
+
+ err = lowpan_fragment_xmit(skb, head, header_length,
+ len, offset);
+ offset += len;
+ }
+
+ return err;
+}
+
static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int err = 0;
+ int err = -1;
pr_debug("(%s): package xmit\n", __func__);
skb->dev = lowpan_dev_info(dev)->real_dev;
if (skb->dev == NULL) {
pr_debug("(%s) ERROR: no real wpan device found\n", __func__);
- dev_kfree_skb(skb);
- } else
+ goto error;
+ }
+
+ if (skb->len <= IEEE802154_MTU) {
err = dev_queue_xmit(skb);
+ goto out;
+ }
+
+ pr_debug("(%s): frame is too big, fragmentation is needed\n",
+ __func__);
+ err = lowpan_skb_fragmentation(skb);
+error:
+ dev_kfree_skb(skb);
+out:
+ if (err < 0)
+ pr_debug("(%s): ERROR: xmit failed\n", __func__);
return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
}
@@ -730,13 +1072,12 @@ static void lowpan_setup(struct net_device *dev)
dev->addr_len = IEEE802154_ADDR_LEN;
memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
dev->type = ARPHRD_IEEE802154;
- dev->features = NETIF_F_NO_CSUM;
/* Frame Control + Sequence Number + Address fields + Security Header */
dev->hard_header_len = 2 + 1 + 20 + 14;
dev->needed_tailroom = 2; /* FCS */
dev->mtu = 1281;
dev->tx_queue_len = 0;
- dev->flags = IFF_NOARP | IFF_BROADCAST;
+ dev->flags = IFF_BROADCAST | IFF_MULTICAST;
dev->watchdog_timeo = 0;
dev->netdev_ops = &lowpan_netdev_ops;
@@ -765,8 +1106,15 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
goto drop;
/* check that it's our buffer */
- if ((skb->data[0] & 0xe0) == 0x60)
+ switch (skb->data[0] & 0xe0) {
+ case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
+ case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
+ case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
lowpan_process_data(skb);
+ break;
+ default:
+ break;
+ }
return NET_RX_SUCCESS;
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index 5d8cf80b930..aeff3f31048 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -159,6 +159,24 @@
#define LOWPAN_DISPATCH_FRAG1 0xc0 /* 11000xxx */
#define LOWPAN_DISPATCH_FRAGN 0xe0 /* 11100xxx */
+#define LOWPAN_DISPATCH_MASK 0xf8 /* 11111000 */
+
+#define LOWPAN_FRAG_TIMEOUT (HZ * 60) /* time-out 60 sec */
+
+#define LOWPAN_FRAG1_HEAD_SIZE 0x4
+#define LOWPAN_FRAGN_HEAD_SIZE 0x5
+
+/*
+ * According IEEE802.15.4 standard:
+ * - MTU is 127 octets
+ * - maximum MHR size is 37 octets
+ * - MFR size is 2 octets
+ *
+ * so minimal payload size that we may guarantee is:
+ * MTU - MHR - MFR = 88 octets
+ */
+#define LOWPAN_FRAG_SIZE 88
+
/*
* Values of fields within the IPHC encoding first byte
* (C stands for compressed and I for inline)
@@ -201,6 +219,11 @@
#define LOWPAN_NHC_UDP_CHECKSUMC 0x04
#define LOWPAN_NHC_UDP_CHECKSUMI 0x00
+#define LOWPAN_NHC_UDP_4BIT_PORT 0xF0B0
+#define LOWPAN_NHC_UDP_4BIT_MASK 0xFFF0
+#define LOWPAN_NHC_UDP_8BIT_PORT 0xF000
+#define LOWPAN_NHC_UDP_8BIT_MASK 0xFF00
+
/* values for port compression, _with checksum_ ie bit 5 set to 0 */
#define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */
#define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline,
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index faecf648123..1b09eaabaac 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -209,6 +209,7 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
unsigned mtu;
struct sk_buff *skb;
struct dgram_sock *ro = dgram_sk(sk);
+ int hlen, tlen;
int err;
if (msg->msg_flags & MSG_OOB) {
@@ -229,13 +230,15 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
mtu = dev->mtu;
pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
- skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size,
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
+ skb = sock_alloc_send_skb(sk, hlen + tlen + size,
msg->msg_flags & MSG_DONTWAIT,
&err);
if (!skb)
goto out_dev;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index 10970ca8574..f96bae8fd33 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -108,6 +108,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
struct net_device *dev;
unsigned mtu;
struct sk_buff *skb;
+ int hlen, tlen;
int err;
if (msg->msg_flags & MSG_OOB) {
@@ -137,12 +138,14 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
goto out_dev;
}
- skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size,
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
+ skb = sock_alloc_send_skb(sk, hlen + tlen + size,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
goto out_dev;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1b5096a9875..15dc4c4828d 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1250,7 +1250,8 @@ out:
return err;
}
-static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features)
+static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct iphdr *iph;
@@ -1572,9 +1573,9 @@ static __net_init int ipv4_mib_init_net(struct net *net)
sizeof(struct icmp_mib),
__alignof__(struct icmp_mib)) < 0)
goto err_icmp_mib;
- if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
- sizeof(struct icmpmsg_mib),
- __alignof__(struct icmpmsg_mib)) < 0)
+ net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
+ GFP_KERNEL);
+ if (!net->mib.icmpmsg_statistics)
goto err_icmpmsg_mib;
tcp_mib_init(net);
@@ -1598,7 +1599,7 @@ err_tcp_mib:
static __net_exit void ipv4_mib_exit_net(struct net *net)
{
- snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics);
+ kfree(net->mib.icmpmsg_statistics);
snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
snmp_mib_free((void __percpu **)net->mib.udp_statistics);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 96a164aa136..5c29ac5b0c3 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -177,7 +177,7 @@ struct neigh_table arp_tbl = {
.gc_staletime = 60 * HZ,
.reachable_time = 30 * HZ,
.delay_probe_time = 5 * HZ,
- .queue_len = 3,
+ .queue_len_bytes = 64*1024,
.ucast_probes = 3,
.mcast_probes = 3,
.anycast_delay = 1 * HZ,
@@ -592,16 +592,18 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
struct sk_buff *skb;
struct arphdr *arp;
unsigned char *arp_ptr;
+ int hlen = LL_RESERVED_SPACE(dev);
+ int tlen = dev->needed_tailroom;
/*
* Allocate a buffer
*/
- skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+ skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC);
if (skb == NULL)
return NULL;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev));
skb->dev = dev;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index b2ca095cb9d..313ad93d2f7 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -304,9 +304,11 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
struct igmpv3_report *pig;
struct net *net = dev_net(dev);
struct flowi4 fl4;
+ int hlen = LL_RESERVED_SPACE(dev);
+ int tlen = dev->needed_tailroom;
while (1) {
- skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev),
+ skb = alloc_skb(size + hlen + tlen,
GFP_ATOMIC | __GFP_NOWARN);
if (skb)
break;
@@ -327,7 +329,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
skb_dst_set(skb, &rt->dst);
skb->dev = dev;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
pip = ip_hdr(skb);
@@ -647,6 +649,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
__be32 group = pmc ? pmc->multiaddr : 0;
struct flowi4 fl4;
__be32 dst;
+ int hlen, tlen;
if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
return igmpv3_send_report(in_dev, pmc);
@@ -661,7 +664,9 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
if (IS_ERR(rt))
return -1;
- skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
+ skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
if (skb == NULL) {
ip_rt_put(rt);
return -1;
@@ -669,7 +674,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
skb_dst_set(skb, &rt->dst);
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
iph = ip_hdr(skb);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index c14d88ad348..a598768c616 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -588,10 +588,19 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
}
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
-struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
- const gfp_t priority)
+/**
+ * inet_csk_clone_lock - clone an inet socket, and lock its clone
+ * @sk: the socket to clone
+ * @req: request_sock
+ * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ *
+ * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
+ */
+struct sock *inet_csk_clone_lock(const struct sock *sk,
+ const struct request_sock *req,
+ const gfp_t priority)
{
- struct sock *newsk = sk_clone(sk, priority);
+ struct sock *newsk = sk_clone_lock(sk, priority);
if (newsk != NULL) {
struct inet_connection_sock *newicsk = inet_csk(newsk);
@@ -615,7 +624,7 @@ struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
}
return newsk;
}
-EXPORT_SYMBOL_GPL(inet_csk_clone);
+EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
/*
* At this point, there should be no process reference to this
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index ccee270a9b6..0a46c541b47 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -132,13 +132,10 @@ static int inet_csk_diag_fill(struct sock *sk,
if (r->idiag_family == AF_INET6) {
const struct ipv6_pinfo *np = inet6_sk(sk);
+ *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
+ *(struct in6_addr *)r->id.idiag_dst = np->daddr;
if (ext & (1 << (INET_DIAG_TCLASS - 1)))
RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
-
- ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
- &np->rcv_saddr);
- ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
- &np->daddr);
}
#endif
@@ -228,10 +225,8 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
const struct inet6_timewait_sock *tw6 =
inet6_twsk((struct sock *)tw);
- ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
- &tw6->tw_v6_rcv_saddr);
- ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
- &tw6->tw_v6_daddr);
+ *(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
+ *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
}
#endif
nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail;
@@ -607,10 +602,8 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
r->idiag_inode = 0;
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
if (r->idiag_family == AF_INET6) {
- ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
- &inet6_rsk(req)->loc_addr);
- ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
- &inet6_rsk(req)->rmt_addr);
+ *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr;
+ *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
}
#endif
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d55110e9312..2b32296b795 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -171,7 +171,7 @@ struct pcpu_tstats {
unsigned long rx_bytes;
unsigned long tx_packets;
unsigned long tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
{
@@ -835,6 +835,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
(skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
+ if (max_headroom > dev->needed_headroom)
+ dev->needed_headroom = max_headroom;
if (!new_skb) {
ip_rt_put(rt);
dev->stats.tx_dropped++;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 09ff51bf16a..80d5fa45021 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -55,20 +55,13 @@
/*
* SOL_IP control messages.
*/
+#define PKTINFO_SKB_CB(__skb) ((struct in_pktinfo *)((__skb)->cb))
static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
{
- struct in_pktinfo info;
- struct rtable *rt = skb_rtable(skb);
+ struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
- if (rt) {
- info.ipi_ifindex = rt->rt_iif;
- info.ipi_spec_dst.s_addr = rt->rt_spec_dst;
- } else {
- info.ipi_ifindex = 0;
- info.ipi_spec_dst.s_addr = 0;
- }
put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
}
@@ -992,20 +985,28 @@ e_inval:
}
/**
- * ip_queue_rcv_skb - Queue an skb into sock receive queue
+ * ipv4_pktinfo_prepare - transfert some info from rtable to skb
* @sk: socket
* @skb: buffer
*
- * Queues an skb into socket receive queue. If IP_CMSG_PKTINFO option
- * is not set, we drop skb dst entry now, while dst cache line is hot.
+ * To support IP_CMSG_PKTINFO option, we store rt_iif and rt_spec_dst
+ * in skb->cb[] before dst drop.
+ * This way, receiver doesnt make cache line misses to read rtable.
*/
-int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+void ipv4_pktinfo_prepare(struct sk_buff *skb)
{
- if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO))
- skb_dst_drop(skb);
- return sock_queue_rcv_skb(sk, skb);
+ struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
+ const struct rtable *rt = skb_rtable(skb);
+
+ if (rt) {
+ pktinfo->ipi_ifindex = rt->rt_iif;
+ pktinfo->ipi_spec_dst.s_addr = rt->rt_spec_dst;
+ } else {
+ pktinfo->ipi_ifindex = 0;
+ pktinfo->ipi_spec_dst.s_addr = 0;
+ }
+ skb_dst_drop(skb);
}
-EXPORT_SYMBOL(ip_queue_rcv_skb);
int ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen)
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 0da2afc97f3..915eb5265b2 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -763,13 +763,15 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
struct sk_buff *skb;
struct bootp_pkt *b;
struct iphdr *h;
+ int hlen = LL_RESERVED_SPACE(dev);
+ int tlen = dev->needed_tailroom;
/* Allocate packet */
- skb = alloc_skb(sizeof(struct bootp_pkt) + LL_ALLOCATED_SPACE(dev) + 15,
+ skb = alloc_skb(sizeof(struct bootp_pkt) + hlen + tlen + 15,
GFP_KERNEL);
if (!skb)
return;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt));
memset(b, 0, sizeof(struct bootp_pkt));
@@ -822,8 +824,13 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
if (dev_hard_header(skb, dev, ntohs(skb->protocol),
- dev->broadcast, dev->dev_addr, skb->len) < 0 ||
- dev_queue_xmit(skb) < 0)
+ dev->broadcast, dev->dev_addr, skb->len) < 0) {
+ kfree_skb(skb);
+ printk("E");
+ return;
+ }
+
+ if (dev_queue_xmit(skb) < 0)
printk("E");
}
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 065effd8349..94906908a41 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -148,7 +148,7 @@ struct pcpu_tstats {
unsigned long rx_bytes;
unsigned long tx_packets;
unsigned long tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
static struct net_device_stats *ipip_get_stats(struct net_device *dev)
{
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 466ea8bb7a4..961eed4f510 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -288,7 +288,7 @@ static void icmpmsg_put(struct seq_file *seq)
count = 0;
for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
- val = snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, i);
+ val = atomic_long_read(&net->mib.icmpmsg_statistics->mibs[i]);
if (val) {
type[count] = i;
vals[count++] = val;
@@ -307,6 +307,7 @@ static void icmp_put(struct seq_file *seq)
{
int i;
struct net *net = seq->private;
+ atomic_long_t *ptr = net->mib.icmpmsg_statistics->mibs;
seq_puts(seq, "\nIcmp: InMsgs InErrors");
for (i=0; icmpmibmap[i].name != NULL; i++)
@@ -319,15 +320,13 @@ static void icmp_put(struct seq_file *seq)
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS));
for (i=0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
- icmpmibmap[i].index));
+ atomic_long_read(ptr + icmpmibmap[i].index));
seq_printf(seq, " %lu %lu",
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
for (i=0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
- icmpmibmap[i].index | 0x100));
+ atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
}
/*
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 007e2eb769d..3ccda5ae8a2 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -292,7 +292,8 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
/* Charge it to the socket. */
- if (ip_queue_rcv_skb(sk, skb) < 0) {
+ ipv4_pktinfo_prepare(skb);
+ if (sock_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
@@ -327,6 +328,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
unsigned int iphlen;
int err;
struct rtable *rt = *rtp;
+ int hlen, tlen;
if (length > rt->dst.dev->mtu) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -336,12 +338,14 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
if (flags&MSG_PROBE)
goto out;
+ hlen = LL_RESERVED_SPACE(rt->dst.dev);
+ tlen = rt->dst.dev->needed_tailroom;
skb = sock_alloc_send_skb(sk,
- length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
+ length + hlen + tlen + 15,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto error;
- skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
+ skb_reserve(skb, hlen);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 34f5db1e1c8..50c35964566 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2653,7 +2653,8 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
EXPORT_SYMBOL(compat_tcp_getsockopt);
#endif
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct tcphdr *th;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 66363b689ad..945efffdd92 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -343,8 +343,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
tw6 = inet6_twsk((struct sock *)tw);
- ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
- ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
+ tw6->tw_v6_daddr = np->daddr;
+ tw6->tw_v6_rcv_saddr = np->rcv_saddr;
tw->tw_tclass = np->tclass;
tw->tw_ipv6only = np->ipv6only;
}
@@ -425,7 +425,7 @@ static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
*/
struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
{
- struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
+ struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
if (newsk != NULL) {
const struct inet_request_sock *ireq = inet_rsk(req);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ab0966df1e2..b867ea23ece 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1357,7 +1357,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (inet_sk(sk)->inet_daddr)
sock_rps_save_rxhash(sk, skb);
- rc = ip_queue_rcv_skb(sk, skb);
+ rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
@@ -1473,6 +1473,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
rc = 0;
+ ipv4_pktinfo_prepare(skb);
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
rc = __udp_queue_rcv_skb(sk, skb);
@@ -2246,7 +2247,8 @@ int udp4_ufo_send_check(struct sk_buff *skb)
return 0;
}
-struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features)
+struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index cf88df82e2c..58605172634 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -636,7 +636,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
goto out;
}
- ipv6_addr_copy(&ifa->addr, addr);
+ ifa->addr = *addr;
spin_lock_init(&ifa->lock);
spin_lock_init(&ifa->state_lock);
@@ -1228,7 +1228,7 @@ try_nextdev:
if (!hiscore->ifa)
return -EADDRNOTAVAIL;
- ipv6_addr_copy(saddr, &hiscore->ifa->addr);
+ *saddr = hiscore->ifa->addr;
in6_ifa_put(hiscore->ifa);
return 0;
}
@@ -1249,7 +1249,7 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
list_for_each_entry(ifp, &idev->addr_list, if_list) {
if (ifp->scope == IFA_LINK &&
!(ifp->flags & banned_flags)) {
- ipv6_addr_copy(addr, &ifp->addr);
+ *addr = ifp->addr;
err = 0;
break;
}
@@ -1700,7 +1700,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
.fc_protocol = RTPROT_KERNEL,
};
- ipv6_addr_copy(&cfg.fc_dst, pfx);
+ cfg.fc_dst = *pfx;
/* Prevent useless cloning on PtP SIT.
This thing is done here expecting that the whole
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index d27c797f9f0..7694c82e629 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -347,7 +347,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
*/
v4addr = LOOPBACK4_IPV6;
if (!(addr_type & IPV6_ADDR_MULTICAST)) {
- if (!inet->transparent &&
+ if (!(inet->freebind || inet->transparent) &&
!ipv6_chk_addr(net, &addr->sin6_addr,
dev, 0)) {
err = -EADDRNOTAVAIL;
@@ -361,10 +361,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
inet->inet_rcv_saddr = v4addr;
inet->inet_saddr = v4addr;
- ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
+ np->rcv_saddr = addr->sin6_addr;
if (!(addr_type & IPV6_ADDR_MULTICAST))
- ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
+ np->saddr = addr->sin6_addr;
/* Make sure we are allowed to bind here. */
if (sk->sk_prot->get_port(sk, snum)) {
@@ -458,14 +458,14 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
peer == 1)
return -ENOTCONN;
sin->sin6_port = inet->inet_dport;
- ipv6_addr_copy(&sin->sin6_addr, &np->daddr);
+ sin->sin6_addr = np->daddr;
if (np->sndflow)
sin->sin6_flowinfo = np->flow_label;
} else {
if (ipv6_addr_any(&np->rcv_saddr))
- ipv6_addr_copy(&sin->sin6_addr, &np->saddr);
+ sin->sin6_addr = np->saddr;
else
- ipv6_addr_copy(&sin->sin6_addr, &np->rcv_saddr);
+ sin->sin6_addr = np->rcv_saddr;
sin->sin6_port = inet->inet_sport;
}
@@ -660,8 +660,8 @@ int inet6_sk_rebuild_header(struct sock *sk)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = sk->sk_protocol;
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
- ipv6_addr_copy(&fl6.saddr, &np->saddr);
+ fl6.daddr = np->daddr;
+ fl6.saddr = np->saddr;
fl6.flowlabel = np->flow_label;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
@@ -769,7 +769,8 @@ out:
return err;
}
-static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u32 features)
+static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct ipv6hdr *ipv6h;
@@ -985,9 +986,9 @@ static int __net_init ipv6_init_mibs(struct net *net)
sizeof(struct icmpv6_mib),
__alignof__(struct icmpv6_mib)) < 0)
goto err_icmp_mib;
- if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics,
- sizeof(struct icmpv6msg_mib),
- __alignof__(struct icmpv6msg_mib)) < 0)
+ net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib),
+ GFP_KERNEL);
+ if (!net->mib.icmpv6msg_statistics)
goto err_icmpmsg_mib;
return 0;
@@ -1008,7 +1009,7 @@ static void ipv6_cleanup_mibs(struct net *net)
snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
- snmp_mib_free((void __percpu **)net->mib.icmpv6msg_statistics);
+ kfree(net->mib.icmpv6msg_statistics);
}
static int __net_init inet6_net_init(struct net *net)
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 4c0f894d084..2ae79dbeec2 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -193,9 +193,9 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des
printk(KERN_WARNING "destopt hao: invalid header length: %u\n", hao->length);
goto bad;
}
- ipv6_addr_copy(&final_addr, &hao->addr);
- ipv6_addr_copy(&hao->addr, &iph->saddr);
- ipv6_addr_copy(&iph->saddr, &final_addr);
+ final_addr = hao->addr;
+ hao->addr = iph->saddr;
+ iph->saddr = final_addr;
}
break;
}
@@ -241,13 +241,13 @@ static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
segments = rthdr->hdrlen >> 1;
addrs = ((struct rt0_hdr *)rthdr)->addr;
- ipv6_addr_copy(&final_addr, addrs + segments - 1);
+ final_addr = addrs[segments - 1];
addrs += segments - segments_left;
memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
- ipv6_addr_copy(addrs, &iph->daddr);
- ipv6_addr_copy(&iph->daddr, &final_addr);
+ addrs[0] = iph->daddr;
+ iph->daddr = final_addr;
}
static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 674255f5e6b..fc1cdcd7041 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -75,7 +75,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
if (pac == NULL)
return -ENOMEM;
pac->acl_next = NULL;
- ipv6_addr_copy(&pac->acl_addr, addr);
+ pac->acl_addr = *addr;
rcu_read_lock();
if (ifindex == 0) {
@@ -296,7 +296,7 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
goto out;
}
- ipv6_addr_copy(&aca->aca_addr, addr);
+ aca->aca_addr = *addr;
aca->aca_idev = idev;
aca->aca_rt = rt;
aca->aca_users = 1;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index e2480691c22..ae08aee1773 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -71,7 +71,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+ usin->sin6_addr = flowlabel->dst;
}
}
@@ -143,7 +143,7 @@ ipv4_connected:
}
}
- ipv6_addr_copy(&np->daddr, daddr);
+ np->daddr = *daddr;
np->flow_label = fl6.flowlabel;
inet->inet_dport = usin->sin6_port;
@@ -154,8 +154,8 @@ ipv4_connected:
*/
fl6.flowi6_proto = sk->sk_protocol;
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
- ipv6_addr_copy(&fl6.saddr, &np->saddr);
+ fl6.daddr = np->daddr;
+ fl6.saddr = np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet->inet_dport;
@@ -179,10 +179,10 @@ ipv4_connected:
/* source address lookup done in ip6_dst_lookup */
if (ipv6_addr_any(&np->saddr))
- ipv6_addr_copy(&np->saddr, &fl6.saddr);
+ np->saddr = fl6.saddr;
if (ipv6_addr_any(&np->rcv_saddr)) {
- ipv6_addr_copy(&np->rcv_saddr, &fl6.saddr);
+ np->rcv_saddr = fl6.saddr;
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
if (sk->sk_prot->rehash)
sk->sk_prot->rehash(sk);
@@ -257,7 +257,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
skb_put(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
- ipv6_addr_copy(&iph->daddr, &fl6->daddr);
+ iph->daddr = fl6->daddr;
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
@@ -294,7 +294,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
skb_put(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
- ipv6_addr_copy(&iph->daddr, &fl6->daddr);
+ iph->daddr = fl6->daddr;
mtu_info = IP6CBMTU(skb);
@@ -303,7 +303,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
mtu_info->ip6m_addr.sin6_port = 0;
mtu_info->ip6m_addr.sin6_flowinfo = 0;
mtu_info->ip6m_addr.sin6_scope_id = fl6->flowi6_oif;
- ipv6_addr_copy(&mtu_info->ip6m_addr.sin6_addr, &ipv6_hdr(skb)->daddr);
+ mtu_info->ip6m_addr.sin6_addr = ipv6_hdr(skb)->daddr;
__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
skb_reset_transport_header(skb);
@@ -354,8 +354,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
sin->sin6_port = serr->port;
sin->sin6_scope_id = 0;
if (skb->protocol == htons(ETH_P_IPV6)) {
- ipv6_addr_copy(&sin->sin6_addr,
- (struct in6_addr *)(nh + serr->addr_offset));
+ sin->sin6_addr =
+ *(struct in6_addr *)(nh + serr->addr_offset);
if (np->sndflow)
sin->sin6_flowinfo =
(*(__be32 *)(nh + serr->addr_offset - 24) &
@@ -376,7 +376,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
sin->sin6_flowinfo = 0;
sin->sin6_scope_id = 0;
if (skb->protocol == htons(ETH_P_IPV6)) {
- ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr);
+ sin->sin6_addr = ipv6_hdr(skb)->saddr;
if (np->rxopt.all)
datagram_recv_ctl(sk, msg, skb);
if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -451,7 +451,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
sin->sin6_flowinfo = 0;
sin->sin6_port = 0;
sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
- ipv6_addr_copy(&sin->sin6_addr, &mtu_info.ip6m_addr.sin6_addr);
+ sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr;
}
put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
@@ -475,7 +475,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = opt->iif;
- ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr);
+ src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
}
@@ -550,7 +550,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = opt->iif;
- ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr);
+ src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
put_cmsg(msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
}
if (np->rxopt.bits.rxohlim) {
@@ -584,7 +584,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
*/
sin6.sin6_family = AF_INET6;
- ipv6_addr_copy(&sin6.sin6_addr, &ipv6_hdr(skb)->daddr);
+ sin6.sin6_addr = ipv6_hdr(skb)->daddr;
sin6.sin6_port = ports[1];
sin6.sin6_flowinfo = 0;
sin6.sin6_scope_id = 0;
@@ -654,12 +654,12 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
if (addr_type != IPV6_ADDR_ANY) {
int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
- if (!inet_sk(sk)->transparent &&
+ if (!(inet_sk(sk)->freebind || inet_sk(sk)->transparent) &&
!ipv6_chk_addr(net, &src_info->ipi6_addr,
strict ? dev : NULL, 0))
err = -EINVAL;
else
- ipv6_addr_copy(&fl6->saddr, &src_info->ipi6_addr);
+ fl6->saddr = src_info->ipi6_addr;
}
rcu_read_unlock();
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index bf22a225f42..3d641b6e9b0 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -243,9 +243,9 @@ static int ipv6_dest_hao(struct sk_buff *skb, int optoff)
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
- ipv6_addr_copy(&tmp_addr, &ipv6h->saddr);
- ipv6_addr_copy(&ipv6h->saddr, &hao->addr);
- ipv6_addr_copy(&hao->addr, &tmp_addr);
+ tmp_addr = ipv6h->saddr;
+ ipv6h->saddr = hao->addr;
+ hao->addr = tmp_addr;
if (skb->tstamp.tv64 == 0)
__net_timestamp(skb);
@@ -461,9 +461,9 @@ looped_back:
return -1;
}
- ipv6_addr_copy(&daddr, addr);
- ipv6_addr_copy(addr, &ipv6_hdr(skb)->daddr);
- ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &daddr);
+ daddr = *addr;
+ *addr = ipv6_hdr(skb)->daddr;
+ ipv6_hdr(skb)->daddr = daddr;
skb_dst_drop(skb);
ip6_route_input(skb);
@@ -690,7 +690,7 @@ static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
memcpy(phdr->addr, ihdr->addr + 1,
(hops - 1) * sizeof(struct in6_addr));
- ipv6_addr_copy(phdr->addr + (hops - 1), *addr_p);
+ phdr->addr[hops - 1] = **addr_p;
*addr_p = ihdr->addr;
phdr->rt_hdr.nexthdr = *proto;
@@ -888,8 +888,8 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
if (!opt || !opt->srcrt)
return NULL;
- ipv6_addr_copy(orig, &fl6->daddr);
- ipv6_addr_copy(&fl6->daddr, ((struct rt0_hdr *)opt->srcrt)->addr);
+ *orig = fl6->daddr;
+ fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
return orig;
}
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 295571576f8..b6c57315206 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -96,7 +96,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
if (!ipv6_prefix_equal(&saddr, &r->src.addr,
r->src.plen))
goto again;
- ipv6_addr_copy(&flp6->saddr, &saddr);
+ flp6->saddr = saddr;
}
goto out;
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 90868fb4275..9e2bdccf914 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -290,9 +290,9 @@ static void mip6_addr_swap(struct sk_buff *skb)
if (likely(off >= 0)) {
hao = (struct ipv6_destopt_hao *)
(skb_network_header(skb) + off);
- ipv6_addr_copy(&tmp, &iph->saddr);
- ipv6_addr_copy(&iph->saddr, &hao->addr);
- ipv6_addr_copy(&hao->addr, &tmp);
+ tmp = iph->saddr;
+ iph->saddr = hao->addr;
+ hao->addr = tmp;
}
}
}
@@ -444,9 +444,9 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_ICMPV6;
- ipv6_addr_copy(&fl6.daddr, &hdr->saddr);
+ fl6.daddr = hdr->saddr;
if (saddr)
- ipv6_addr_copy(&fl6.saddr, saddr);
+ fl6.saddr = *saddr;
fl6.flowi6_oif = iif;
fl6.fl6_icmp_type = type;
fl6.fl6_icmp_code = code;
@@ -538,9 +538,9 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_ICMPV6;
- ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
+ fl6.daddr = ipv6_hdr(skb)->saddr;
if (saddr)
- ipv6_addr_copy(&fl6.saddr, saddr);
+ fl6.saddr = *saddr;
fl6.flowi6_oif = skb->dev->ifindex;
fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@ -786,8 +786,8 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
int oif)
{
memset(fl6, 0, sizeof(*fl6));
- ipv6_addr_copy(&fl6->saddr, saddr);
- ipv6_addr_copy(&fl6->daddr, daddr);
+ fl6->saddr = *saddr;
+ fl6->daddr = *daddr;
fl6->flowi6_proto = IPPROTO_ICMPV6;
fl6->fl6_icmp_type = type;
fl6->fl6_icmp_code = 0;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 1567fb12039..02dd203d9ea 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -65,9 +65,9 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
- ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
+ fl6.daddr = treq->rmt_addr;
final_p = fl6_update_dst(&fl6, np->opt, &final);
- ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
+ fl6.saddr = treq->loc_addr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
@@ -157,7 +157,7 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
sin6->sin6_family = AF_INET6;
- ipv6_addr_copy(&sin6->sin6_addr, &np->daddr);
+ sin6->sin6_addr = np->daddr;
sin6->sin6_port = inet_sk(sk)->inet_dport;
/* We do not store received flowlabel for TCP */
sin6->sin6_flowinfo = 0;
@@ -215,8 +215,8 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = sk->sk_protocol;
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
- ipv6_addr_copy(&fl6.saddr, &np->saddr);
+ fl6.daddr = np->daddr;
+ fl6.saddr = np->saddr;
fl6.flowlabel = np->flow_label;
IP6_ECN_flow_xmit(sk, fl6.flowlabel);
fl6.flowi6_oif = sk->sk_bound_dev_if;
@@ -246,7 +246,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
skb_dst_set_noref(skb, dst);
/* Restore final destination back after routing done */
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
+ fl6.daddr = np->daddr;
res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
rcu_read_unlock();
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 93718f3db79..424f063fb22 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -425,7 +425,8 @@ out:
static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
int addrlen, int plen,
- int offset)
+ int offset, int allow_create,
+ int replace_required)
{
struct fib6_node *fn, *in, *ln;
struct fib6_node *pn = NULL;
@@ -447,8 +448,18 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
* Prefix match
*/
if (plen < fn->fn_bit ||
- !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit))
+ !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) {
+ if (!allow_create) {
+ if (replace_required) {
+ pr_warn("IPv6: Can't replace route, "
+ "no match found\n");
+ return ERR_PTR(-ENOENT);
+ }
+ pr_warn("IPv6: NLM_F_CREATE should be set "
+ "when creating new route\n");
+ }
goto insert_above;
+ }
/*
* Exact match ?
@@ -477,6 +488,23 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
fn = dir ? fn->right: fn->left;
} while (fn);
+ if (!allow_create) {
+ /* We should not create new node because
+ * NLM_F_REPLACE was specified without NLM_F_CREATE
+ * I assume it is safe to require NLM_F_CREATE when
+ * REPLACE flag is used! Later we may want to remove the
+ * check for replace_required, because according
+ * to netlink specification, NLM_F_CREATE
+ * MUST be specified if new route is created.
+ * That would keep IPv6 consistent with IPv4
+ */
+ if (replace_required) {
+ pr_warn("IPv6: Can't replace route, no match found\n");
+ return ERR_PTR(-ENOENT);
+ }
+ pr_warn("IPv6: NLM_F_CREATE should be set "
+ "when creating new route\n");
+ }
/*
* We walked to the bottom of tree.
* Create new leaf node without children.
@@ -614,6 +642,11 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
{
struct rt6_info *iter = NULL;
struct rt6_info **ins;
+ int replace = (NULL != info->nlh &&
+ (info->nlh->nlmsg_flags&NLM_F_REPLACE));
+ int add = (NULL == info->nlh ||
+ (info->nlh->nlmsg_flags&NLM_F_CREATE));
+ int found = 0;
ins = &fn->leaf;
@@ -626,6 +659,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
/*
* Same priority level
*/
+ if (NULL != info->nlh &&
+ (info->nlh->nlmsg_flags&NLM_F_EXCL))
+ return -EEXIST;
+ if (replace) {
+ found++;
+ break;
+ }
if (iter->rt6i_dev == rt->rt6i_dev &&
iter->rt6i_idev == rt->rt6i_idev &&
@@ -655,17 +695,40 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
/*
* insert node
*/
+ if (!replace) {
+ if (!add)
+ pr_warn("IPv6: NLM_F_CREATE should be set when creating new route\n");
+
+add:
+ rt->dst.rt6_next = iter;
+ *ins = rt;
+ rt->rt6i_node = fn;
+ atomic_inc(&rt->rt6i_ref);
+ inet6_rt_notify(RTM_NEWROUTE, rt, info);
+ info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
+
+ if ((fn->fn_flags & RTN_RTINFO) == 0) {
+ info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
+ fn->fn_flags |= RTN_RTINFO;
+ }
- rt->dst.rt6_next = iter;
- *ins = rt;
- rt->rt6i_node = fn;
- atomic_inc(&rt->rt6i_ref);
- inet6_rt_notify(RTM_NEWROUTE, rt, info);
- info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
-
- if ((fn->fn_flags & RTN_RTINFO) == 0) {
- info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
- fn->fn_flags |= RTN_RTINFO;
+ } else {
+ if (!found) {
+ if (add)
+ goto add;
+ pr_warn("IPv6: NLM_F_REPLACE set, but no existing node found!\n");
+ return -ENOENT;
+ }
+ *ins = rt;
+ rt->rt6i_node = fn;
+ rt->dst.rt6_next = iter->dst.rt6_next;
+ atomic_inc(&rt->rt6i_ref);
+ inet6_rt_notify(RTM_NEWROUTE, rt, info);
+ rt6_release(iter);
+ if ((fn->fn_flags & RTN_RTINFO) == 0) {
+ info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
+ fn->fn_flags |= RTN_RTINFO;
+ }
}
return 0;
@@ -696,9 +759,25 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
{
struct fib6_node *fn, *pn = NULL;
int err = -ENOMEM;
+ int allow_create = 1;
+ int replace_required = 0;
+ if (NULL != info->nlh) {
+ if (!(info->nlh->nlmsg_flags&NLM_F_CREATE))
+ allow_create = 0;
+ if ((info->nlh->nlmsg_flags&NLM_F_REPLACE))
+ replace_required = 1;
+ }
+ if (!allow_create && !replace_required)
+ pr_warn("IPv6: RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
- rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst));
+ rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
+ allow_create, replace_required);
+
+ if (IS_ERR(fn)) {
+ err = PTR_ERR(fn);
+ fn = NULL;
+ }
if (fn == NULL)
goto out;
@@ -736,7 +815,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
sizeof(struct in6_addr), rt->rt6i_src.plen,
- offsetof(struct rt6_info, rt6i_src));
+ offsetof(struct rt6_info, rt6i_src),
+ allow_create, replace_required);
if (sn == NULL) {
/* If it is failed, discard just allocated
@@ -753,8 +833,13 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
} else {
sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
sizeof(struct in6_addr), rt->rt6i_src.plen,
- offsetof(struct rt6_info, rt6i_src));
+ offsetof(struct rt6_info, rt6i_src),
+ allow_create, replace_required);
+ if (IS_ERR(sn)) {
+ err = PTR_ERR(sn);
+ sn = NULL;
+ }
if (sn == NULL)
goto st_failure;
}
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 4566dbd916d..b7867a1215b 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -386,7 +386,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
err = -EINVAL;
goto done;
}
- ipv6_addr_copy(&fl->dst, &freq->flr_dst);
+ fl->dst = freq->flr_dst;
atomic_set(&fl->users, 1);
switch (fl->share) {
case IPV6_FL_S_EXCL:
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 84d0bd5cac9..a24e1555784 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -238,8 +238,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
hdr->nexthdr = proto;
hdr->hop_limit = hlimit;
- ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
- ipv6_addr_copy(&hdr->daddr, first_hop);
+ hdr->saddr = fl6->saddr;
+ hdr->daddr = *first_hop;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
@@ -290,8 +290,8 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
hdr->nexthdr = proto;
hdr->hop_limit = np->hop_limit;
- ipv6_addr_copy(&hdr->saddr, saddr);
- ipv6_addr_copy(&hdr->daddr, daddr);
+ hdr->saddr = *saddr;
+ hdr->daddr = *daddr;
return 0;
}
@@ -631,6 +631,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
struct ipv6hdr *tmp_hdr;
struct frag_hdr *fh;
unsigned int mtu, hlen, left, len;
+ int hroom, troom;
__be32 frag_id = 0;
int ptr, offset = 0, err=0;
u8 *prevhdr, nexthdr = 0;
@@ -797,6 +798,8 @@ slow_path:
*/
*prevhdr = NEXTHDR_FRAGMENT;
+ hroom = LL_RESERVED_SPACE(rt->dst.dev);
+ troom = rt->dst.dev->needed_tailroom;
/*
* Keep copying data until we run out.
@@ -815,7 +818,8 @@ slow_path:
* Allocate buffer.
*/
- if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) {
+ if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
+ hroom + troom, GFP_ATOMIC)) == NULL) {
NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGFAILS);
@@ -828,7 +832,7 @@ slow_path:
*/
ip6_copy_metadata(frag, skb);
- skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev));
+ skb_reserve(frag, hroom);
skb_put(frag, len + hlen + sizeof(struct frag_hdr));
skb_reset_network_header(frag);
fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
@@ -1059,7 +1063,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
if (err)
return ERR_PTR(err);
if (final_dst)
- ipv6_addr_copy(&fl6->daddr, final_dst);
+ fl6->daddr = *final_dst;
if (can_sleep)
fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
@@ -1095,7 +1099,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
if (err)
return ERR_PTR(err);
if (final_dst)
- ipv6_addr_copy(&fl6->daddr, final_dst);
+ fl6->daddr = *final_dst;
if (can_sleep)
fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
@@ -1588,7 +1592,7 @@ int ip6_push_pending_frames(struct sock *sk)
if (np->pmtudisc < IPV6_PMTUDISC_DO)
skb->local_df = 1;
- ipv6_addr_copy(final_dst, &fl6->daddr);
+ *final_dst = fl6->daddr;
__skb_pull(skb, skb_network_header_len(skb));
if (opt && opt->opt_flen)
ipv6_push_frag_opts(skb, opt, &proto);
@@ -1604,8 +1608,8 @@ int ip6_push_pending_frames(struct sock *sk)
hdr->hop_limit = np->cork.hop_limit;
hdr->nexthdr = proto;
- ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
- ipv6_addr_copy(&hdr->daddr, final_dst);
+ hdr->saddr = fl6->saddr;
+ hdr->daddr = *final_dst;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 4e2e9ff67ef..f5f98f558ac 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -93,7 +93,7 @@ struct pcpu_tstats {
unsigned long rx_bytes;
unsigned long tx_packets;
unsigned long tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
static struct net_device_stats *ip6_get_stats(struct net_device *dev)
{
@@ -979,8 +979,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
ipv6h->hop_limit = t->parms.hop_limit;
ipv6h->nexthdr = proto;
- ipv6_addr_copy(&ipv6h->saddr, &fl6->saddr);
- ipv6_addr_copy(&ipv6h->daddr, &fl6->daddr);
+ ipv6h->saddr = fl6->saddr;
+ ipv6h->daddr = fl6->daddr;
nf_reset(skb);
pkt_len = skb->len;
err = ip6_local_out(skb);
@@ -1155,8 +1155,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
/* Set up flowi template */
- ipv6_addr_copy(&fl6->saddr, &p->laddr);
- ipv6_addr_copy(&fl6->daddr, &p->raddr);
+ fl6->saddr = p->laddr;
+ fl6->daddr = p->raddr;
fl6->flowi6_oif = p->link;
fl6->flowlabel = 0;
@@ -1212,8 +1212,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
static int
ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
{
- ipv6_addr_copy(&t->parms.laddr, &p->laddr);
- ipv6_addr_copy(&t->parms.raddr, &p->raddr);
+ t->parms.laddr = p->laddr;
+ t->parms.raddr = p->raddr;
t->parms.flags = p->flags;
t->parms.hop_limit = p->hop_limit;
t->parms.encap_limit = p->encap_limit;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 449a9185b8f..c7e95c8c579 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1105,8 +1105,8 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
msg->im6_msgtype = MRT6MSG_WHOLEPKT;
msg->im6_mif = mrt->mroute_reg_vif_num;
msg->im6_pad = 0;
- ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
- ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
+ msg->im6_src = ipv6_hdr(pkt)->saddr;
+ msg->im6_dst = ipv6_hdr(pkt)->daddr;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
@@ -1131,8 +1131,8 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
msg->im6_msgtype = assert;
msg->im6_mif = mifi;
msg->im6_pad = 0;
- ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
- ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
+ msg->im6_src = ipv6_hdr(pkt)->saddr;
+ msg->im6_dst = ipv6_hdr(pkt)->daddr;
skb_dst_set(skb, dst_clone(skb_dst(pkt)));
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2181,8 +2181,8 @@ int ip6mr_get_route(struct net *net,
iph->payload_len = 0;
iph->nexthdr = IPPROTO_NONE;
iph->hop_limit = 0;
- ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
- ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
+ iph->saddr = rt->rt6i_src.addr;
+ iph->daddr = rt->rt6i_dst.addr;
err = ip6mr_cache_unresolved(mrt, vif, skb2);
read_unlock(&mrt_lock);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index c99e3ee9781..29993b7079a 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -435,7 +435,7 @@ sticky_done:
goto e_inval;
np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex;
- ipv6_addr_copy(&np->sticky_pktinfo.ipi6_addr, &pkt.ipi6_addr);
+ np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr;
retv = 0;
break;
}
@@ -980,8 +980,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
np->sticky_pktinfo.ipi6_ifindex;
- np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) :
- ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr));
+ src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
}
if (np->rxopt.bits.rxhlim) {
@@ -992,8 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
np->sticky_pktinfo.ipi6_ifindex;
- np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) :
- ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr));
+ src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
}
if (np->rxopt.bits.rxohlim) {
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index ee7839f4d6e..6cc4d1fb8c1 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -155,7 +155,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
return -ENOMEM;
mc_lst->next = NULL;
- ipv6_addr_copy(&mc_lst->addr, addr);
+ mc_lst->addr = *addr;
rcu_read_lock();
if (ifindex == 0) {
@@ -858,7 +858,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
- ipv6_addr_copy(&mc->mca_addr, addr);
+ mc->mca_addr = *addr;
mc->idev = idev; /* (reference taken) */
mc->mca_users = 1;
/* mca_stamp should be updated upon changes */
@@ -1343,13 +1343,15 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
struct mld2_report *pmr;
struct in6_addr addr_buf;
const struct in6_addr *saddr;
+ int hlen = LL_RESERVED_SPACE(dev);
+ int tlen = dev->needed_tailroom;
int err;
u8 ra[8] = { IPPROTO_ICMPV6, 0,
IPV6_TLV_ROUTERALERT, 2, 0, 0,
IPV6_TLV_PADN, 0 };
/* we assume size > sizeof(ra) here */
- size += LL_ALLOCATED_SPACE(dev);
+ size += hlen + tlen;
/* limit our allocations to order-0 page */
size = min_t(int, size, SKB_MAX_ORDER(0, 0));
skb = sock_alloc_send_skb(sk, size, 1, &err);
@@ -1357,7 +1359,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
if (!skb)
return NULL;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
/* <draft-ietf-magma-mld-source-05.txt>:
@@ -1723,6 +1725,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
struct mld_msg *hdr;
const struct in6_addr *snd_addr, *saddr;
struct in6_addr addr_buf;
+ int hlen = LL_RESERVED_SPACE(dev);
+ int tlen = dev->needed_tailroom;
int err, len, payload_len, full_len;
u8 ra[8] = { IPPROTO_ICMPV6, 0,
IPV6_TLV_ROUTERALERT, 2, 0, 0,
@@ -1744,7 +1748,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
IPSTATS_MIB_OUT, full_len);
rcu_read_unlock();
- skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err);
+ skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
if (skb == NULL) {
rcu_read_lock();
@@ -1754,7 +1758,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
return;
}
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
/* <draft-ietf-magma-mld-source-05.txt>:
@@ -1772,7 +1776,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
memset(hdr, 0, sizeof(struct mld_msg));
hdr->mld_type = type;
- ipv6_addr_copy(&hdr->mld_mca, addr);
+ hdr->mld_mca = *addr;
hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
IPPROTO_ICMPV6,
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 43242e6e610..7e1e0fbfef2 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -195,8 +195,8 @@ static inline int mip6_report_rl_allow(struct timeval *stamp,
mip6_report_rl.stamp.tv_sec = stamp->tv_sec;
mip6_report_rl.stamp.tv_usec = stamp->tv_usec;
mip6_report_rl.iif = iif;
- ipv6_addr_copy(&mip6_report_rl.src, src);
- ipv6_addr_copy(&mip6_report_rl.dst, dst);
+ mip6_report_rl.src = *src;
+ mip6_report_rl.dst = *dst;
allow = 1;
}
spin_unlock_bh(&mip6_report_rl.lock);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0cb78d7ddaf..2854705b15e 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -141,7 +141,7 @@ struct neigh_table nd_tbl = {
.gc_staletime = 60 * HZ,
.reachable_time = ND_REACHABLE_TIME,
.delay_probe_time = 5 * HZ,
- .queue_len = 3,
+ .queue_len_bytes = 64*1024,
.ucast_probes = 3,
.mcast_probes = 3,
.anycast_delay = 1 * HZ,
@@ -446,6 +446,8 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
struct sock *sk = net->ipv6.ndisc_sk;
struct sk_buff *skb;
struct icmp6hdr *hdr;
+ int hlen = LL_RESERVED_SPACE(dev);
+ int tlen = dev->needed_tailroom;
int len;
int err;
u8 *opt;
@@ -459,7 +461,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
skb = sock_alloc_send_skb(sk,
(MAX_HEADER + sizeof(struct ipv6hdr) +
- len + LL_ALLOCATED_SPACE(dev)),
+ len + hlen + tlen),
1, &err);
if (!skb) {
ND_PRINTK0(KERN_ERR
@@ -468,7 +470,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
return NULL;
}
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);
skb->transport_header = skb->tail;
@@ -479,7 +481,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
opt = skb_transport_header(skb) + sizeof(struct icmp6hdr);
if (target) {
- ipv6_addr_copy((struct in6_addr *)opt, target);
+ *(struct in6_addr *)opt = *target;
opt += sizeof(*target);
}
@@ -1533,6 +1535,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
struct inet6_dev *idev;
struct flowi6 fl6;
u8 *opt;
+ int hlen, tlen;
int rd_len;
int err;
u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
@@ -1590,9 +1593,11 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
rd_len &= ~0x7;
len += rd_len;
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
buff = sock_alloc_send_skb(sk,
(MAX_HEADER + sizeof(struct ipv6hdr) +
- len + LL_ALLOCATED_SPACE(dev)),
+ len + hlen + tlen),
1, &err);
if (buff == NULL) {
ND_PRINTK0(KERN_ERR
@@ -1601,7 +1606,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
goto release;
}
- skb_reserve(buff, LL_RESERVED_SPACE(dev));
+ skb_reserve(buff, hlen);
ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,
IPPROTO_ICMPV6, len);
@@ -1617,9 +1622,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
*/
addrp = (struct in6_addr *)(icmph + 1);
- ipv6_addr_copy(addrp, target);
+ *addrp = *target;
addrp++;
- ipv6_addr_copy(addrp, &ipv6_hdr(skb)->daddr);
+ *addrp = ipv6_hdr(skb)->daddr;
opt = (u8*) (addrp + 1);
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index a5a4c5dd539..b5a2aa58a03 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -93,8 +93,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
- ipv6_addr_copy(&fl6.saddr, &oip6h->daddr);
- ipv6_addr_copy(&fl6.daddr, &oip6h->saddr);
+ fl6.saddr = oip6h->daddr;
+ fl6.daddr = oip6h->saddr;
fl6.fl6_sport = otcph.dest;
fl6.fl6_dport = otcph.source;
security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
@@ -129,8 +129,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
*(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20));
ip6h->hop_limit = ip6_dst_hoplimit(dst);
ip6h->nexthdr = IPPROTO_TCP;
- ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
- ipv6_addr_copy(&ip6h->daddr, &oip6h->saddr);
+ ip6h->saddr = oip6h->daddr;
+ ip6h->daddr = oip6h->saddr;
tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
/* Truncate to length (no data) */
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 1008ce94bc3..fdeb6d03da8 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -142,11 +142,7 @@ static const struct snmp_mib snmp6_udplite6_list[] = {
SNMP_MIB_SENTINEL
};
-/* can be called either with percpu mib (pcpumib != NULL),
- * or shared one (smib != NULL)
- */
-static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **pcpumib,
- atomic_long_t *smib)
+static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
{
char name[32];
int i;
@@ -163,14 +159,14 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **pcpum
snprintf(name, sizeof(name), "Icmp6%s%s",
i & 0x100 ? "Out" : "In", p);
seq_printf(seq, "%-32s\t%lu\n", name,
- pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i));
+ atomic_long_read(smib + i));
}
/* print by number (nonzero only) - ICMPMsgStat format */
for (i = 0; i < ICMP6MSG_MIB_MAX; i++) {
unsigned long val;
- val = pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i);
+ val = atomic_long_read(smib + i);
if (!val)
continue;
snprintf(name, sizeof(name), "Icmp6%sType%u",
@@ -215,8 +211,7 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
NULL, snmp6_icmp6_list);
- snmp6_seq_show_icmpv6msg(seq,
- (void __percpu **)net->mib.icmpv6msg_statistics, NULL);
+ snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs);
snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6,
NULL, snmp6_udp6_list);
snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6,
@@ -246,7 +241,7 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
snmp6_ipstats_list);
snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
snmp6_icmp6_list);
- snmp6_seq_show_icmpv6msg(seq, NULL, idev->stats.icmpv6msgdev->mibs);
+ snmp6_seq_show_icmpv6msg(seq, idev->stats.icmpv6msgdev->mibs);
return 0;
}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 331af3b882a..a4894f4f194 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -299,9 +299,9 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
}
inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
- ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
+ np->rcv_saddr = addr->sin6_addr;
if (!(addr_type & IPV6_ADDR_MULTICAST))
- ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
+ np->saddr = addr->sin6_addr;
err = 0;
out_unlock:
rcu_read_unlock();
@@ -383,7 +383,8 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
/* Charge it to the socket. */
- if (ip_queue_rcv_skb(sk, skb) < 0) {
+ skb_dst_drop(skb);
+ if (sock_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
@@ -494,7 +495,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
if (sin6) {
sin6->sin6_family = AF_INET6;
sin6->sin6_port = 0;
- ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr);
+ sin6->sin6_addr = ipv6_hdr(skb)->saddr;
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = 0;
if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -610,6 +611,8 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
struct sk_buff *skb;
int err;
struct rt6_info *rt = (struct rt6_info *)*dstp;
+ int hlen = LL_RESERVED_SPACE(rt->dst.dev);
+ int tlen = rt->dst.dev->needed_tailroom;
if (length > rt->dst.dev->mtu) {
ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
@@ -619,11 +622,11 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
goto out;
skb = sock_alloc_send_skb(sk,
- length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
+ length + hlen + tlen + 15,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto error;
- skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
+ skb_reserve(skb, hlen);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
@@ -843,11 +846,11 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out;
if (!ipv6_addr_any(daddr))
- ipv6_addr_copy(&fl6.daddr, daddr);
+ fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
- ipv6_addr_copy(&fl6.saddr, &np->saddr);
+ fl6.saddr = np->saddr;
final_p = fl6_update_dst(&fl6, opt, &final);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index dfb164e9051..b69fae76a6f 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -153,8 +153,8 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
fq->id = arg->id;
fq->user = arg->user;
- ipv6_addr_copy(&fq->saddr, arg->src);
- ipv6_addr_copy(&fq->daddr, arg->dst);
+ fq->saddr = *arg->src;
+ fq->daddr = *arg->dst;
}
EXPORT_SYMBOL(ip6_frag_init);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 3399dd32628..0e381bb9468 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -731,14 +731,14 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
if (rt->rt6i_dst.plen != 128 &&
ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
rt->rt6i_flags |= RTF_ANYCAST;
- ipv6_addr_copy(&rt->rt6i_gateway, daddr);
+ rt->rt6i_gateway = *daddr;
}
rt->rt6i_flags |= RTF_CACHE;
#ifdef CONFIG_IPV6_SUBTREES
if (rt->rt6i_src.plen && saddr) {
- ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
+ rt->rt6i_src.addr = *saddr;
rt->rt6i_src.plen = 128;
}
#endif
@@ -934,7 +934,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
in6_dev_hold(rt->rt6i_idev);
rt->rt6i_expires = 0;
- ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
+ rt->rt6i_gateway = ort->rt6i_gateway;
rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
rt->rt6i_metric = 0;
@@ -1094,7 +1094,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
rt->dst.output = ip6_output;
dst_set_neighbour(&rt->dst, neigh);
atomic_set(&rt->dst.__refcnt, 1);
- ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+ rt->rt6i_dst.addr = *addr;
rt->rt6i_dst.plen = 128;
rt->rt6i_idev = idev;
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
@@ -1237,9 +1237,18 @@ int ip6_route_add(struct fib6_config *cfg)
if (cfg->fc_metric == 0)
cfg->fc_metric = IP6_RT_PRIO_USER;
- table = fib6_new_table(net, cfg->fc_table);
+ err = -ENOBUFS;
+ if (NULL != cfg->fc_nlinfo.nlh &&
+ !(cfg->fc_nlinfo.nlh->nlmsg_flags&NLM_F_CREATE)) {
+ table = fib6_get_table(net, cfg->fc_table);
+ if (table == NULL) {
+ printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n");
+ table = fib6_new_table(net, cfg->fc_table);
+ }
+ } else {
+ table = fib6_new_table(net, cfg->fc_table);
+ }
if (table == NULL) {
- err = -ENOBUFS;
goto out;
}
@@ -1322,7 +1331,7 @@ int ip6_route_add(struct fib6_config *cfg)
int gwa_type;
gw_addr = &cfg->fc_gateway;
- ipv6_addr_copy(&rt->rt6i_gateway, gw_addr);
+ rt->rt6i_gateway = *gw_addr;
gwa_type = ipv6_addr_type(gw_addr);
if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
@@ -1376,7 +1385,7 @@ int ip6_route_add(struct fib6_config *cfg)
err = -EINVAL;
goto out;
}
- ipv6_addr_copy(&rt->rt6i_prefsrc.addr, &cfg->fc_prefsrc);
+ rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
rt->rt6i_prefsrc.plen = 128;
} else
rt->rt6i_prefsrc.plen = 0;
@@ -1573,7 +1582,7 @@ static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest,
},
};
- ipv6_addr_copy(&rdfl.gateway, gateway);
+ rdfl.gateway = *gateway;
if (rt6_need_strict(dest))
flags |= RT6_LOOKUP_F_IFACE;
@@ -1629,7 +1638,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
if (on_link)
nrt->rt6i_flags &= ~RTF_GATEWAY;
- ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
+ nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
if (ip6_ins_rt(nrt))
@@ -1775,7 +1784,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
rt->dst.output = ort->dst.output;
rt->dst.flags |= DST_HOST;
- ipv6_addr_copy(&rt->rt6i_dst.addr, dest);
+ rt->rt6i_dst.addr = *dest;
rt->rt6i_dst.plen = 128;
dst_copy_metrics(&rt->dst, &ort->dst);
rt->dst.error = ort->dst.error;
@@ -1785,7 +1794,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
rt->dst.lastuse = jiffies;
rt->rt6i_expires = 0;
- ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
+ rt->rt6i_gateway = ort->rt6i_gateway;
rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
rt->rt6i_metric = 0;
@@ -1848,8 +1857,8 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
.fc_nlinfo.nl_net = net,
};
- ipv6_addr_copy(&cfg.fc_dst, prefix);
- ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
+ cfg.fc_dst = *prefix;
+ cfg.fc_gateway = *gwaddr;
/* We should treat it as a default route if prefix length is 0. */
if (!prefixlen)
@@ -1898,7 +1907,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
.fc_nlinfo.nl_net = dev_net(dev),
};
- ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
+ cfg.fc_gateway = *gwaddr;
ip6_route_add(&cfg);
@@ -1944,9 +1953,9 @@ static void rtmsg_to_fib6_config(struct net *net,
cfg->fc_nlinfo.nl_net = net;
- ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
- ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
- ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
+ cfg->fc_dst = rtmsg->rtmsg_dst;
+ cfg->fc_src = rtmsg->rtmsg_src;
+ cfg->fc_gateway = rtmsg->rtmsg_gateway;
}
int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
@@ -2080,7 +2089,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
}
dst_set_neighbour(&rt->dst, neigh);
- ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+ rt->rt6i_dst.addr = *addr;
rt->rt6i_dst.plen = 128;
rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
@@ -2098,7 +2107,7 @@ int ip6_route_get_saddr(struct net *net,
struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
int err = 0;
if (rt->rt6i_prefsrc.plen)
- ipv6_addr_copy(saddr, &rt->rt6i_prefsrc.addr);
+ *saddr = rt->rt6i_prefsrc.addr;
else
err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
daddr, prefs, saddr);
@@ -2437,7 +2446,7 @@ static int rt6_fill_node(struct net *net,
if (rt->rt6i_prefsrc.plen) {
struct in6_addr saddr_buf;
- ipv6_addr_copy(&saddr_buf, &rt->rt6i_prefsrc.addr);
+ saddr_buf = rt->rt6i_prefsrc.addr;
NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
}
@@ -2511,14 +2520,14 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
goto errout;
- ipv6_addr_copy(&fl6.saddr, nla_data(tb[RTA_SRC]));
+ fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
}
if (tb[RTA_DST]) {
if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
goto errout;
- ipv6_addr_copy(&fl6.daddr, nla_data(tb[RTA_DST]));
+ fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
}
if (tb[RTA_IIF])
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index a7a18602a04..50968f226e7 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -91,7 +91,7 @@ struct pcpu_tstats {
unsigned long rx_bytes;
unsigned long tx_packets;
unsigned long tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
static struct net_device_stats *ipip6_get_stats(struct net_device *dev)
{
@@ -914,7 +914,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
goto done;
#ifdef CONFIG_IPV6_SIT_6RD
} else {
- ipv6_addr_copy(&ip6rd.prefix, &t->ip6rd.prefix);
+ ip6rd.prefix = t->ip6rd.prefix;
ip6rd.relay_prefix = t->ip6rd.relay_prefix;
ip6rd.prefixlen = t->ip6rd.prefixlen;
ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen;
@@ -1082,7 +1082,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
if (relay_prefix != ip6rd.relay_prefix)
goto done;
- ipv6_addr_copy(&t->ip6rd.prefix, &prefix);
+ t->ip6rd.prefix = prefix;
t->ip6rd.relay_prefix = relay_prefix;
t->ip6rd.prefixlen = ip6rd.prefixlen;
t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 5a0d6648bbb..8e951d8d3b8 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -200,8 +200,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
req->mss = mss;
ireq->rmt_port = th->source;
ireq->loc_port = th->dest;
- ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
- ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
+ ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
+ ireq6->loc_addr = ipv6_hdr(skb)->daddr;
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -237,9 +237,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
- ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+ fl6.daddr = ireq6->rmt_addr;
final_p = fl6_update_dst(&fl6, np->opt, &final);
- ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+ fl6.saddr = ireq6->loc_addr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 2dea4bb7b54..9d74eee334d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -153,7 +153,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+ usin->sin6_addr = flowlabel->dst;
fl6_sock_release(flowlabel);
}
}
@@ -195,7 +195,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
tp->write_seq = 0;
}
- ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
+ np->daddr = usin->sin6_addr;
np->flow_label = fl6.flowlabel;
/*
@@ -244,9 +244,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
saddr = &np->rcv_saddr;
fl6.flowi6_proto = IPPROTO_TCP;
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
- ipv6_addr_copy(&fl6.saddr,
- (saddr ? saddr : &np->saddr));
+ fl6.daddr = np->daddr;
+ fl6.saddr = saddr ? *saddr : np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = usin->sin6_port;
@@ -264,11 +263,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (saddr == NULL) {
saddr = &fl6.saddr;
- ipv6_addr_copy(&np->rcv_saddr, saddr);
+ np->rcv_saddr = *saddr;
}
/* set the source address */
- ipv6_addr_copy(&np->saddr, saddr);
+ np->saddr = *saddr;
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
sk->sk_gso_type = SKB_GSO_TCPV6;
@@ -398,8 +397,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
*/
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
- ipv6_addr_copy(&fl6.daddr, &np->daddr);
- ipv6_addr_copy(&fl6.saddr, &np->saddr);
+ fl6.daddr = np->daddr;
+ fl6.saddr = np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet->inet_dport;
@@ -489,8 +488,8 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
- ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
- ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
+ fl6.daddr = treq->rmt_addr;
+ fl6.saddr = treq->loc_addr;
fl6.flowlabel = 0;
fl6.flowi6_oif = treq->iif;
fl6.flowi6_mark = sk->sk_mark;
@@ -512,7 +511,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
if (skb) {
__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
- ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
+ fl6.daddr = treq->rmt_addr;
err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
err = net_xmit_eval(err);
}
@@ -617,8 +616,7 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
tp->md5sig_info->alloced6++;
}
- ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
- peer);
+ tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr = *peer;
tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
@@ -750,8 +748,8 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
bp = &hp->md5_blk.ip6;
/* 1. TCP pseudo-header (RFC2460) */
- ipv6_addr_copy(&bp->saddr, saddr);
- ipv6_addr_copy(&bp->daddr, daddr);
+ bp->saddr = *saddr;
+ bp->daddr = *daddr;
bp->protocol = cpu_to_be32(IPPROTO_TCP);
bp->len = cpu_to_be32(nbytes);
@@ -1039,8 +1037,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
#endif
memset(&fl6, 0, sizeof(fl6));
- ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
- ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
+ fl6.daddr = ipv6_hdr(skb)->saddr;
+ fl6.saddr = ipv6_hdr(skb)->daddr;
buff->ip_summed = CHECKSUM_PARTIAL;
buff->csum = 0;
@@ -1250,8 +1248,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_openreq_init(req, &tmp_opt, skb);
treq = inet6_rsk(req);
- ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
- ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
+ treq->rmt_addr = ipv6_hdr(skb)->saddr;
+ treq->loc_addr = ipv6_hdr(skb)->daddr;
if (!want_cookie || tmp_opt.tstamp_ok)
TCP_ECN_create_request(req, tcp_hdr(skb));
@@ -1381,7 +1379,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
- ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
+ newnp->rcv_saddr = newnp->saddr;
inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
newsk->sk_backlog_rcv = tcp_v4_do_rcv;
@@ -1445,9 +1443,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
- ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
- ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
- ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
+ newnp->daddr = treq->rmt_addr;
+ newnp->saddr = treq->loc_addr;
+ newnp->rcv_saddr = treq->loc_addr;
newsk->sk_bound_dev_if = treq->iif;
/* Now IPv6 options...
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 846f4757eb8..84ec9db86ee 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -417,8 +417,7 @@ try_again:
ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
&sin6->sin6_addr);
else {
- ipv6_addr_copy(&sin6->sin6_addr,
- &ipv6_hdr(skb)->saddr);
+ sin6->sin6_addr = ipv6_hdr(skb)->saddr;
if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin6->sin6_scope_id = IP6CB(skb)->iif;
}
@@ -538,7 +537,9 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
goto drop;
}
- if ((rc = ip_queue_rcv_skb(sk, skb)) < 0) {
+ skb_dst_drop(skb);
+ rc = sock_queue_rcv_skb(sk, skb);
+ if (rc < 0) {
/* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM)
UDP6_INC_STATS_BH(sock_net(sk),
@@ -1113,11 +1114,11 @@ do_udp_sendmsg:
fl6.flowi6_proto = sk->sk_protocol;
if (!ipv6_addr_any(daddr))
- ipv6_addr_copy(&fl6.daddr, daddr);
+ fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
- ipv6_addr_copy(&fl6.saddr, &np->saddr);
+ fl6.saddr = np->saddr;
fl6.fl6_sport = inet->inet_sport;
final_p = fl6_update_dst(&fl6, opt, &final);
@@ -1298,7 +1299,8 @@ static int udp6_ufo_send_check(struct sk_buff *skb)
return 0;
}
-static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
index 3437d7d4eed..a81ce945075 100644
--- a/net/ipv6/xfrm6_mode_beet.c
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -72,8 +72,8 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
top_iph->nexthdr = IPPROTO_BEETPH;
}
- ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
- ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
+ top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
+ top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
return 0;
}
@@ -99,8 +99,8 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
ip6h = ipv6_hdr(skb);
ip6h->payload_len = htons(skb->len - size);
- ipv6_addr_copy(&ip6h->daddr, (struct in6_addr *) &x->sel.daddr.a6);
- ipv6_addr_copy(&ip6h->saddr, (struct in6_addr *) &x->sel.saddr.a6);
+ ip6h->daddr = *(struct in6_addr *)&x->sel.daddr.a6;
+ ip6h->saddr = *(struct in6_addr *)&x->sel.saddr.a6;
err = 0;
out:
return err;
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 4d6edff0498..261e6e6f487 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -55,8 +55,8 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
dsfield &= ~INET_ECN_MASK;
ipv6_change_dsfield(top_iph, 0, dsfield);
top_iph->hop_limit = ip6_dst_hoplimit(dst->child);
- ipv6_addr_copy(&top_iph->saddr, (const struct in6_addr *)&x->props.saddr);
- ipv6_addr_copy(&top_iph->daddr, (const struct in6_addr *)&x->id.daddr);
+ top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
+ top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
return 0;
}
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index faae41737fc..4eeff89c1aa 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -49,7 +49,7 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
struct sock *sk = skb->sk;
fl6.flowi6_oif = sk->sk_bound_dev_if;
- ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
+ fl6.daddr = ipv6_hdr(skb)->daddr;
ipv6_local_rxpmtu(sk, &fl6, mtu);
}
@@ -60,7 +60,7 @@ static void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
struct sock *sk = skb->sk;
fl6.fl6_dport = inet_sk(sk)->inet_dport;
- ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
+ fl6.daddr = ipv6_hdr(skb)->daddr;
ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index d879f7efbd1..8ea65e03273 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -132,8 +132,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
memset(fl6, 0, sizeof(struct flowi6));
fl6->flowi6_mark = skb->mark;
- ipv6_addr_copy(&fl6->daddr, reverse ? &hdr->saddr : &hdr->daddr);
- ipv6_addr_copy(&fl6->saddr, reverse ? &hdr->daddr : &hdr->saddr);
+ fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
+ fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
while (nh + offset + 1 < skb->data ||
pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index f2d72b8a3fa..3f2f7c4ab72 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -27,8 +27,8 @@ __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
/* Initialize temporary selector matching only
* to current session. */
- ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl6->daddr);
- ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl6->saddr);
+ *(struct in6_addr *)&sel->daddr = fl6->daddr;
+ *(struct in6_addr *)&sel->saddr = fl6->saddr;
sel->dport = xfrm_flowi_dport(fl, &fl6->uli);
sel->dport_mask = htons(0xffff);
sel->sport = xfrm_flowi_sport(fl, &fl6->uli);
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 32e3bb02611..5c93f2952b0 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -1461,14 +1461,12 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
}
/* Allocate a new instance */
- new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
+ new = kmemdup(orig, sizeof(struct tsap_cb), GFP_ATOMIC);
if (!new) {
IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__);
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
return NULL;
}
- /* Dup */
- memcpy(new, orig, sizeof(struct tsap_cb));
spin_lock_init(&new->lock);
/* We don't need the old instance any more */
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 1e733e9073d..bfc0bef170c 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -712,7 +712,7 @@ static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port
sin6->sin6_family = AF_INET6;
sin6->sin6_port = port;
sin6->sin6_flowinfo = 0;
- ipv6_addr_copy(&sin6->sin6_addr, (const struct in6_addr *)xaddr->a6);
+ sin6->sin6_addr = *(struct in6_addr *)xaddr->a6;
sin6->sin6_scope_id = 0;
return 128;
}
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 93b24342265..476b106c0b1 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -177,7 +177,8 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index b3f65520e7a..39d72ccaffb 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -78,7 +78,8 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
@@ -372,13 +373,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
pubsta->addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- /*
- * The aggregation code is not prepared to handle
- * anything but STA/AP due to the BSSID handling.
- * IBSS could work in the code but isn't supported
- * by drivers or the standard.
- */
if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+ sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
sdata->vif.type != NL80211_IFTYPE_AP)
return -EINVAL;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d06c65fa552..1063a7e57d6 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -411,7 +411,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
BIT(NL80211_STA_FLAG_WME) |
BIT(NL80211_STA_FLAG_MFP) |
- BIT(NL80211_STA_FLAG_AUTHENTICATED);
+ BIT(NL80211_STA_FLAG_AUTHENTICATED) |
+ BIT(NL80211_STA_FLAG_TDLS_PEER);
if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE))
@@ -422,6 +423,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP);
if (test_sta_flag(sta, WLAN_STA_AUTH))
sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
+ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
}
@@ -488,6 +491,31 @@ static void ieee80211_config_ap_ssid(struct ieee80211_sub_if_data *sdata,
(params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
}
+static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
+ u8 *resp, size_t resp_len)
+{
+ struct sk_buff *new, *old;
+
+ if (!resp || !resp_len)
+ return -EINVAL;
+
+ old = sdata->u.ap.probe_resp;
+
+ new = dev_alloc_skb(resp_len);
+ if (!new)
+ return -ENOMEM;
+
+ memcpy(skb_put(new, resp_len), resp, resp_len);
+
+ rcu_assign_pointer(sdata->u.ap.probe_resp, new);
+ synchronize_rcu();
+
+ if (old)
+ dev_kfree_skb(old);
+
+ return 0;
+}
+
/*
* This handles both adding a beacon and setting new beacon info
*/
@@ -498,6 +526,7 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
int new_head_len, new_tail_len;
int size;
int err = -EINVAL;
+ u32 changed = 0;
old = rtnl_dereference(sdata->u.ap.beacon);
@@ -581,11 +610,17 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
kfree(old);
+ err = ieee80211_set_probe_resp(sdata, params->probe_resp,
+ params->probe_resp_len);
+ if (!err)
+ changed |= BSS_CHANGED_AP_PROBE_RESP;
+
ieee80211_config_ap_ssid(sdata, params);
+ changed |= BSS_CHANGED_BEACON_ENABLED |
+ BSS_CHANGED_BEACON |
+ BSS_CHANGED_SSID;
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
- BSS_CHANGED_BEACON |
- BSS_CHANGED_SSID);
+ ieee80211_bss_info_change_notify(sdata, changed);
return 0;
}
@@ -594,6 +629,8 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
{
struct ieee80211_sub_if_data *sdata;
struct beacon_data *old;
+ struct ieee80211_sub_if_data *vlan;
+ int ret;
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -601,7 +638,24 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
if (old)
return -EALREADY;
- return ieee80211_config_beacon(sdata, params);
+ ret = ieee80211_config_beacon(sdata, params);
+ if (ret)
+ return ret;
+
+ /*
+ * Apply control port protocol, this allows us to
+ * not encrypt dynamic WEP control frames.
+ */
+ sdata->control_port_protocol = params->crypto.control_port_ethertype;
+ sdata->control_port_no_encrypt = params->crypto.control_port_no_encrypt;
+ list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
+ vlan->control_port_protocol =
+ params->crypto.control_port_ethertype;
+ vlan->control_port_no_encrypt =
+ params->crypto.control_port_no_encrypt;
+ }
+
+ return 0;
}
static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
@@ -847,7 +901,12 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
sta_apply_parameters(local, sta, params);
- rate_control_rate_init(sta);
+ /*
+ * for TDLS, rate control should be initialized only when supported
+ * rates are known.
+ */
+ if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+ rate_control_rate_init(sta);
layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
sdata->vif.type == NL80211_IFTYPE_AP;
@@ -931,6 +990,9 @@ static int ieee80211_change_station(struct wiphy *wiphy,
sta_apply_parameters(local, sta, params);
+ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && params->supported_rates)
+ rate_control_rate_init(sta);
+
rcu_read_unlock();
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
@@ -1394,7 +1456,7 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
(old_oper_type != local->_oper_channel_type))
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- if ((sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) &&
+ if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR &&
old_vif_oper_type != sdata->vif.bss_conf.channel_type)
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
@@ -1917,7 +1979,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, bool no_cck,
- u64 *cookie)
+ bool dont_wait_for_ack, u64 *cookie)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
@@ -1925,10 +1987,15 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
struct sta_info *sta;
struct ieee80211_work *wk;
const struct ieee80211_mgmt *mgmt = (void *)buf;
- u32 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
- IEEE80211_TX_CTL_REQ_TX_STATUS;
+ u32 flags;
bool is_offchan = false;
+ if (dont_wait_for_ack)
+ flags = IEEE80211_TX_CTL_NO_ACK;
+ else
+ flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
+
/* Check that we are on the requested channel for transmission */
if (chan != local->tmp_channel &&
chan != local->oper_channel)
@@ -2488,6 +2555,82 @@ static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
+static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u64 *cookie)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_qos_hdr *nullfunc;
+ struct sk_buff *skb;
+ int size = sizeof(*nullfunc);
+ __le16 fc;
+ bool qos;
+ struct ieee80211_tx_info *info;
+ struct sta_info *sta;
+
+ rcu_read_lock();
+ sta = sta_info_get(sdata, peer);
+ if (sta) {
+ qos = test_sta_flag(sta, WLAN_STA_WME);
+ rcu_read_unlock();
+ } else {
+ rcu_read_unlock();
+ return -ENOLINK;
+ }
+
+ if (qos) {
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+ } else {
+ size -= 2;
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+ }
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
+ if (!skb)
+ return -ENOMEM;
+
+ skb->dev = dev;
+
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ nullfunc = (void *) skb_put(skb, size);
+ nullfunc->frame_control = fc;
+ nullfunc->duration_id = 0;
+ memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
+ memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
+ nullfunc->seq_ctrl = 0;
+
+ info = IEEE80211_SKB_CB(skb);
+
+ info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
+ IEEE80211_TX_INTFL_NL80211_FRAME_TX;
+
+ skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+ skb->priority = 7;
+ if (qos)
+ nullfunc->qos_ctrl = cpu_to_le16(7);
+
+ local_bh_disable();
+ ieee80211_xmit(sdata, skb);
+ local_bh_enable();
+
+ *cookie = (unsigned long) skb;
+ return 0;
+}
+
+static struct ieee80211_channel *
+ieee80211_wiphy_get_channel(struct wiphy *wiphy)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+
+ return local->oper_channel;
+}
+
struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -2553,4 +2696,6 @@ struct cfg80211_ops mac80211_config_ops = {
.set_rekey_data = ieee80211_set_rekey_data,
.tdls_oper = ieee80211_tdls_oper,
.tdls_mgmt = ieee80211_tdls_mgmt,
+ .probe_client = ieee80211_probe_client,
+ .get_channel = ieee80211_wiphy_get_channel,
};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 883996b2f99..00cefcb493e 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -190,7 +190,7 @@ static ssize_t uapsd_max_sp_len_write(struct file *file,
return -EFAULT;
buf[len] = '\0';
- ret = strict_strtoul(buf, 0, &val);
+ ret = kstrtoul(buf, 0, &val);
if (ret)
return -EINVAL;
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 5f165d7eb2d..b12ed52732c 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -5,6 +5,11 @@
#include "ieee80211_i.h"
#include "driver-trace.h"
+static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
+{
+ WARN_ON(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER));
+}
+
static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
{
local->ops->tx(&local->hw, skb);
@@ -69,15 +74,23 @@ static inline int drv_resume(struct ieee80211_local *local)
#endif
static inline int drv_add_interface(struct ieee80211_local *local,
- struct ieee80211_vif *vif)
+ struct ieee80211_sub_if_data *sdata)
{
int ret;
might_sleep();
- trace_drv_add_interface(local, vif_to_sdata(vif));
- ret = local->ops->add_interface(&local->hw, vif);
+ if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_MONITOR))
+ return -EINVAL;
+
+ trace_drv_add_interface(local, sdata);
+ ret = local->ops->add_interface(&local->hw, &sdata->vif);
trace_drv_return_int(local, ret);
+
+ if (ret == 0)
+ sdata->flags |= IEEE80211_SDATA_IN_DRIVER;
+
return ret;
}
@@ -89,6 +102,8 @@ static inline int drv_change_interface(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_change_interface(local, sdata, type, p2p);
ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p);
trace_drv_return_int(local, ret);
@@ -96,12 +111,15 @@ static inline int drv_change_interface(struct ieee80211_local *local,
}
static inline void drv_remove_interface(struct ieee80211_local *local,
- struct ieee80211_vif *vif)
+ struct ieee80211_sub_if_data *sdata)
{
might_sleep();
- trace_drv_remove_interface(local, vif_to_sdata(vif));
- local->ops->remove_interface(&local->hw, vif);
+ check_sdata_in_driver(sdata);
+
+ trace_drv_remove_interface(local, sdata);
+ local->ops->remove_interface(&local->hw, &sdata->vif);
+ sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
trace_drv_return_void(local);
}
@@ -124,6 +142,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
{
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_bss_info_changed(local, sdata, info, changed);
if (local->ops->bss_info_changed)
local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed);
@@ -139,6 +159,8 @@ static inline int drv_tx_sync(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_tx_sync(local, sdata, bssid, type);
if (local->ops->tx_sync)
ret = local->ops->tx_sync(&local->hw, &sdata->vif,
@@ -154,6 +176,8 @@ static inline void drv_finish_tx_sync(struct ieee80211_local *local,
{
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_finish_tx_sync(local, sdata, bssid, type);
if (local->ops->finish_tx_sync)
local->ops->finish_tx_sync(&local->hw, &sdata->vif,
@@ -211,6 +235,8 @@ static inline int drv_set_key(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_set_key(local, cmd, sdata, sta, key);
ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
trace_drv_return_int(local, ret);
@@ -228,6 +254,8 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
if (sta)
ista = &sta->sta;
+ check_sdata_in_driver(sdata);
+
trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
if (local->ops->update_tkip_key)
local->ops->update_tkip_key(&local->hw, &sdata->vif, conf,
@@ -243,6 +271,8 @@ static inline int drv_hw_scan(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_hw_scan(local, sdata);
ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
trace_drv_return_int(local, ret);
@@ -254,6 +284,8 @@ static inline void drv_cancel_hw_scan(struct ieee80211_local *local,
{
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_cancel_hw_scan(local, sdata);
local->ops->cancel_hw_scan(&local->hw, &sdata->vif);
trace_drv_return_void(local);
@@ -269,6 +301,8 @@ drv_sched_scan_start(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_sched_scan_start(local, sdata);
ret = local->ops->sched_scan_start(&local->hw, &sdata->vif,
req, ies);
@@ -281,6 +315,8 @@ static inline void drv_sched_scan_stop(struct ieee80211_local *local,
{
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_sched_scan_stop(local, sdata);
local->ops->sched_scan_stop(&local->hw, &sdata->vif);
trace_drv_return_void(local);
@@ -377,6 +413,8 @@ static inline void drv_sta_notify(struct ieee80211_local *local,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta)
{
+ check_sdata_in_driver(sdata);
+
trace_drv_sta_notify(local, sdata, cmd, sta);
if (local->ops->sta_notify)
local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta);
@@ -391,6 +429,8 @@ static inline int drv_sta_add(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_sta_add(local, sdata, sta);
if (local->ops->sta_add)
ret = local->ops->sta_add(&local->hw, &sdata->vif, sta);
@@ -406,6 +446,8 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
{
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_sta_remove(local, sdata, sta);
if (local->ops->sta_remove)
local->ops->sta_remove(&local->hw, &sdata->vif, sta);
@@ -421,6 +463,8 @@ static inline int drv_conf_tx(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_conf_tx(local, sdata, queue, params);
if (local->ops->conf_tx)
ret = local->ops->conf_tx(&local->hw, &sdata->vif,
@@ -436,6 +480,8 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_get_tsf(local, sdata);
if (local->ops->get_tsf)
ret = local->ops->get_tsf(&local->hw, &sdata->vif);
@@ -449,6 +495,8 @@ static inline void drv_set_tsf(struct ieee80211_local *local,
{
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_set_tsf(local, sdata, tsf);
if (local->ops->set_tsf)
local->ops->set_tsf(&local->hw, &sdata->vif, tsf);
@@ -460,6 +508,8 @@ static inline void drv_reset_tsf(struct ieee80211_local *local,
{
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_reset_tsf(local, sdata);
if (local->ops->reset_tsf)
local->ops->reset_tsf(&local->hw, &sdata->vif);
@@ -489,6 +539,8 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size);
if (local->ops->ampdu_action)
@@ -644,6 +696,8 @@ static inline int drv_set_bitrate_mask(struct ieee80211_local *local,
might_sleep();
+ check_sdata_in_driver(sdata);
+
trace_drv_set_bitrate_mask(local, sdata, mask);
if (local->ops->set_bitrate_mask)
ret = local->ops->set_bitrate_mask(&local->hw,
@@ -657,6 +711,8 @@ static inline void drv_set_rekey_data(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct cfg80211_gtk_rekey_data *data)
{
+ check_sdata_in_driver(sdata);
+
trace_drv_set_rekey_data(local, sdata, data);
if (local->ops->set_rekey_data)
local->ops->set_rekey_data(&local->hw, &sdata->vif, data);
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f0fb737efa8..d06975098aa 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -196,7 +196,8 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index ede9a8b341a..7d84af70132 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -97,6 +97,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
/* if merging, indicate to driver that we leave the old IBSS */
if (sdata->vif.bss_conf.ibss_joined) {
sdata->vif.bss_conf.ibss_joined = false;
+ netif_carrier_off(sdata->dev);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS);
}
@@ -207,6 +208,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
mgmt, skb->len, 0, GFP_KERNEL);
cfg80211_put_bss(bss);
+ netif_carrier_on(sdata->dev);
cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
}
@@ -990,6 +992,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
}
sta_info_flush(sdata->local, sdata);
+ netif_carrier_off(sdata->dev);
/* remove beacon */
kfree(sdata->u.ibss.ie);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index ea10a51babd..068cc92d16a 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -24,6 +24,7 @@
#include <linux/spinlock.h>
#include <linux/etherdevice.h>
#include <linux/leds.h>
+#include <linux/idr.h>
#include <net/ieee80211_radiotap.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
@@ -184,12 +185,15 @@ enum ieee80211_packet_rx_flags {
* enum ieee80211_rx_flags - RX data flags
*
* @IEEE80211_RX_CMNTR: received on cooked monitor already
+ * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
+ * to cfg80211_report_obss_beacon().
*
* These flags are used across handling multiple interfaces
* for a single frame.
*/
enum ieee80211_rx_flags {
IEEE80211_RX_CMNTR = BIT(0),
+ IEEE80211_RX_BEACON_REPORTED = BIT(1),
};
struct ieee80211_rx_data {
@@ -228,6 +232,7 @@ struct beacon_data {
struct ieee80211_if_ap {
struct beacon_data __rcu *beacon;
+ struct sk_buff __rcu *probe_resp;
struct list_head vlans;
@@ -543,6 +548,7 @@ struct ieee80211_if_mesh {
* associated stations and deliver multicast frames both
* back to wireless media and to the local net stack.
* @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
+ * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver
*/
enum ieee80211_sub_if_data_flags {
IEEE80211_SDATA_ALLMULTI = BIT(0),
@@ -550,6 +556,7 @@ enum ieee80211_sub_if_data_flags {
IEEE80211_SDATA_OPERATING_GMODE = BIT(2),
IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3),
IEEE80211_SDATA_DISCONNECT_RESUME = BIT(4),
+ IEEE80211_SDATA_IN_DRIVER = BIT(5),
};
/**
@@ -722,17 +729,16 @@ enum {
* operating channel
* @SCAN_SET_CHANNEL: Set the next channel to be scanned
* @SCAN_SEND_PROBE: Send probe requests and wait for probe responses
- * @SCAN_LEAVE_OPER_CHANNEL: Leave the operating channel, notify the AP
- * about us leaving the channel and stop all associated STA interfaces
- * @SCAN_ENTER_OPER_CHANNEL: Enter the operating channel again, notify the
- * AP about us being back and restart all associated STA interfaces
+ * @SCAN_SUSPEND: Suspend the scan and go back to operating channel to
+ * send out data
+ * @SCAN_RESUME: Resume the scan and scan the next channel
*/
enum mac80211_scan_state {
SCAN_DECISION,
SCAN_SET_CHANNEL,
SCAN_SEND_PROBE,
- SCAN_LEAVE_OPER_CHANNEL,
- SCAN_ENTER_OPER_CHANNEL,
+ SCAN_SUSPEND,
+ SCAN_RESUME,
};
struct ieee80211_local {
@@ -1012,6 +1018,9 @@ struct ieee80211_local {
u32 hw_roc_cookie;
bool hw_roc_for_tx;
+ struct idr ack_status_frames;
+ spinlock_t ack_status_lock;
+
/* dummy netdev for use w/ NAPI */
struct net_device napi_dev;
@@ -1334,6 +1343,12 @@ void ieee80211_recalc_smps(struct ieee80211_local *local);
size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
const u8 *ids, int n_ids, size_t offset);
size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
+u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_supported_band *sband,
+ u16 cap);
+u8 *ieee80211_ie_build_ht_info(u8 *pos,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type);
/* internal work items */
void ieee80211_work_init(struct ieee80211_local *local);
@@ -1362,6 +1377,8 @@ ieee80211_get_channel_mode(struct ieee80211_local *local,
bool ieee80211_set_channel_type(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
enum nl80211_channel_type chantype);
+enum nl80211_channel_type
+ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info);
#ifdef CONFIG_MAC80211_NOINLINE
#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 30d73552e9a..12a6d4bb5d3 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -188,11 +188,22 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
return -ENOLINK;
break;
- case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_AP_VLAN: {
+ struct ieee80211_sub_if_data *master;
+
if (!sdata->bss)
return -ENOLINK;
+
list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
+
+ master = container_of(sdata->bss,
+ struct ieee80211_sub_if_data, u.ap);
+ sdata->control_port_protocol =
+ master->control_port_protocol;
+ sdata->control_port_no_encrypt =
+ master->control_port_no_encrypt;
break;
+ }
case NL80211_IFTYPE_AP:
sdata->bss = &sdata->u.ap;
break;
@@ -265,7 +276,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
break;
default:
if (coming_up) {
- res = drv_add_interface(local, &sdata->vif);
+ res = drv_add_interface(local, sdata);
if (res)
goto err_stop;
}
@@ -282,10 +293,18 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
changed |= ieee80211_reset_erp_info(sdata);
ieee80211_bss_info_change_notify(sdata, changed);
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ if (sdata->vif.type == NL80211_IFTYPE_STATION ||
+ sdata->vif.type == NL80211_IFTYPE_ADHOC)
netif_carrier_off(dev);
else
netif_carrier_on(dev);
+
+ /*
+ * set default queue parameters so drivers don't
+ * need to initialise the hardware if the hardware
+ * doesn't start up with sane defaults
+ */
+ ieee80211_set_wmm_default(sdata);
}
set_bit(SDATA_STATE_RUNNING, &sdata->state);
@@ -329,15 +348,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
if (coming_up)
local->open_count++;
- if (hw_reconf_flags) {
+ if (hw_reconf_flags)
ieee80211_hw_config(local, hw_reconf_flags);
- /*
- * set default queue parameters so drivers don't
- * need to initialise the hardware if the hardware
- * doesn't start up with sane defaults
- */
- ieee80211_set_wmm_default(sdata);
- }
ieee80211_recalc_ps(local, -1);
@@ -345,7 +357,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
return 0;
err_del_interface:
- drv_remove_interface(local, &sdata->vif);
+ drv_remove_interface(local, sdata);
err_stop:
if (!local->open_count)
drv_stop(local);
@@ -450,15 +462,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sub_if_data *vlan, *tmpsdata;
struct beacon_data *old_beacon =
rtnl_dereference(sdata->u.ap.beacon);
+ struct sk_buff *old_probe_resp =
+ rtnl_dereference(sdata->u.ap.probe_resp);
/* sdata_running will return false, so this will disable */
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_BEACON_ENABLED);
- /* remove beacon */
+ /* remove beacon and probe response */
RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
+ RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
synchronize_rcu();
kfree(old_beacon);
+ kfree(old_probe_resp);
/* down all dependent devices, that is VLANs */
list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
@@ -520,7 +536,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
ieee80211_free_keys(sdata);
if (going_down)
- drv_remove_interface(local, &sdata->vif);
+ drv_remove_interface(local, sdata);
}
sdata->bss = NULL;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index fb02ea52d2c..87a89741432 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -134,9 +134,13 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
sdata->crypto_tx_tailroom_needed_cnt--;
+ WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV));
+
return 0;
}
@@ -179,7 +183,8 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
sdata = key->sdata;
if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
increment_tailroom_need_count(sdata);
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index d999bf3b84e..e323d4e6647 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -100,7 +100,7 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
*/
bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
{
- struct ieee80211_channel *chan, *scan_chan;
+ struct ieee80211_channel *chan;
enum nl80211_channel_type channel_type;
/* This logic needs to match logic in ieee80211_hw_config */
@@ -114,7 +114,7 @@ bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
else
channel_type = NL80211_CHAN_NO_HT;
} else if (local->tmp_channel) {
- chan = scan_chan = local->tmp_channel;
+ chan = local->tmp_channel;
channel_type = local->tmp_channel_type;
} else {
chan = local->oper_channel;
@@ -126,8 +126,8 @@ bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
return false;
/* Check current hardware-config against oper_channel. */
- if ((local->oper_channel != local->hw.conf.channel) ||
- (local->_oper_channel_type != local->hw.conf.channel_type))
+ if (local->oper_channel != local->hw.conf.channel ||
+ local->_oper_channel_type != local->hw.conf.channel_type)
return false;
return true;
@@ -135,7 +135,7 @@ bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
{
- struct ieee80211_channel *chan, *scan_chan;
+ struct ieee80211_channel *chan;
int ret = 0;
int power;
enum nl80211_channel_type channel_type;
@@ -143,14 +143,12 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
might_sleep();
- scan_chan = local->scan_channel;
-
/* If this off-channel logic ever changes, ieee80211_on_oper_channel
* may need to change as well.
*/
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
- if (scan_chan) {
- chan = scan_chan;
+ if (local->scan_channel) {
+ chan = local->scan_channel;
/* If scanning on oper channel, use whatever channel-type
* is currently in use.
*/
@@ -159,7 +157,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
else
channel_type = NL80211_CHAN_NO_HT;
} else if (local->tmp_channel) {
- chan = scan_chan = local->tmp_channel;
+ chan = local->tmp_channel;
channel_type = local->tmp_channel_type;
} else {
chan = local->oper_channel;
@@ -595,7 +593,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
wiphy->flags |= WIPHY_FLAG_NETNS_OK |
WIPHY_FLAG_4ADDR_AP |
- WIPHY_FLAG_4ADDR_STATION;
+ WIPHY_FLAG_4ADDR_STATION |
+ WIPHY_FLAG_REPORTS_OBSS;
+
+ wiphy->features = NL80211_FEATURE_SK_TX_STATUS;
if (!ops->set_key)
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -670,6 +671,11 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
INIT_WORK(&local->sched_scan_stopped_work,
ieee80211_sched_scan_stopped_work);
+ spin_lock_init(&local->ack_status_lock);
+ idr_init(&local->ack_status_frames);
+ /* preallocate at least one entry */
+ idr_pre_get(&local->ack_status_frames, GFP_KERNEL);
+
sta_info_init(local);
for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
@@ -1045,6 +1051,13 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL(ieee80211_unregister_hw);
+static int ieee80211_free_ack_frame(int id, void *p, void *data)
+{
+ WARN_ONCE(1, "Have pending ack frames!\n");
+ kfree_skb(p);
+ return 0;
+}
+
void ieee80211_free_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
@@ -1055,6 +1068,10 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
if (local->wiphy_ciphers_allocated)
kfree(local->hw.wiphy->cipher_suites);
+ idr_for_each(&local->ack_status_frames,
+ ieee80211_free_ack_frame, NULL);
+ idr_destroy(&local->ack_status_frames);
+
wiphy_free(local->hw.wiphy);
}
EXPORT_SYMBOL(ieee80211_free_hw);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index a7078fdba8c..b3a125f6034 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -76,6 +76,7 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct ieee80211_local *local = sdata->local;
/*
* As support for each feature is added, check for matching
@@ -87,15 +88,23 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat
* - MDA enabled
* - Power management control on fc
*/
- if (ifmsh->mesh_id_len == ie->mesh_id_len &&
- memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
- (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) &&
- (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) &&
- (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) &&
- (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) &&
- (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))
- return true;
-
+ if (!(ifmsh->mesh_id_len == ie->mesh_id_len &&
+ memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
+ (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) &&
+ (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) &&
+ (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) &&
+ (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) &&
+ (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
+ goto mismatch;
+
+ /* disallow peering with mismatched channel types for now */
+ if (ie->ht_info_elem &&
+ (local->_oper_channel_type !=
+ ieee80211_ht_info_to_channel_type(ie->ht_info_elem)))
+ goto mismatch;
+
+ return true;
+mismatch:
return false;
}
@@ -341,6 +350,49 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
return 0;
}
+int mesh_add_ht_cap_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
+ u8 *pos;
+
+ sband = local->hw.wiphy->bands[local->oper_channel->band];
+ if (!sband->ht_cap.ht_supported ||
+ local->_oper_channel_type == NL80211_CHAN_NO_HT)
+ return 0;
+
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
+ return -ENOMEM;
+
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap));
+ ieee80211_ie_build_ht_cap(pos, sband, sband->ht_cap.cap);
+
+ return 0;
+}
+
+int mesh_add_ht_info_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_channel *channel = local->oper_channel;
+ enum nl80211_channel_type channel_type = local->_oper_channel_type;
+ struct ieee80211_supported_band *sband =
+ local->hw.wiphy->bands[channel->band];
+ struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
+ u8 *pos;
+
+ if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT)
+ return 0;
+
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_info))
+ return -ENOMEM;
+
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_info));
+ ieee80211_ie_build_ht_info(pos, ht_cap, channel, channel_type);
+
+ return 0;
+}
static void ieee80211_mesh_path_timer(unsigned long data)
{
struct ieee80211_sub_if_data *sdata =
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 8c00e2d1d63..622cc96eb4d 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -31,6 +31,8 @@
* @MESH_PATH_FIXED: the mesh path has been manually set and should not be
* modified
* @MESH_PATH_RESOLVED: the mesh path can has been resolved
+ * @MESH_PATH_REQ_QUEUED: there is an unsent path request for this destination
+ * already queued up, waiting for the discovery process to start.
*
* MESH_PATH_RESOLVED is used by the mesh path timer to
* decide when to stop or cancel the mesh path discovery.
@@ -41,6 +43,7 @@ enum mesh_path_flags {
MESH_PATH_SN_VALID = BIT(2),
MESH_PATH_FIXED = BIT(3),
MESH_PATH_RESOLVED = BIT(4),
+ MESH_PATH_REQ_QUEUED = BIT(5),
};
/**
@@ -212,6 +215,10 @@ int mesh_add_vendor_ies(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
int mesh_add_ds_params_ie(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
+int mesh_add_ht_cap_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
+int mesh_add_ht_info_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
void ieee80211s_init(void);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 174040a4288..a7afb2d32de 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -113,20 +113,20 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
- struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
+ struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- u8 *pos;
- int ie_len;
+ u8 *pos, ie_len;
+ int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
+ sizeof(mgmt->u.action.u.mesh_action);
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+ hdr_len +
+ 2 + 37); /* max HWMP IE */
if (!skb)
return -1;
skb_reserve(skb, local->hw.extra_tx_headroom);
- /* 25 is the size of the common mgmt part (24) plus the size of the
- * common action part (1)
- */
- mgmt = (struct ieee80211_mgmt *)
- skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
- memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
+ mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+ memset(mgmt, 0, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
@@ -240,20 +240,20 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
- struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
+ struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- u8 *pos;
- int ie_len;
+ u8 *pos, ie_len;
+ int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
+ sizeof(mgmt->u.action.u.mesh_action);
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+ hdr_len +
+ 2 + 15 /* PERR IE */);
if (!skb)
return -1;
skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom);
- /* 25 is the size of the common mgmt part (24) plus the size of the
- * common action part (1)
- */
- mgmt = (struct ieee80211_mgmt *)
- skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
- memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
+ mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+ memset(mgmt, 0, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
@@ -867,9 +867,19 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
return;
}
+ spin_lock_bh(&mpath->state_lock);
+ if (mpath->flags & MESH_PATH_REQ_QUEUED) {
+ spin_unlock_bh(&mpath->state_lock);
+ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
+ return;
+ }
+
memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
preq_node->flags = flags;
+ mpath->flags |= MESH_PATH_REQ_QUEUED;
+ spin_unlock_bh(&mpath->state_lock);
+
list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
++ifmsh->preq_queue_len;
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
@@ -921,6 +931,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
goto enddiscovery;
spin_lock_bh(&mpath->state_lock);
+ mpath->flags &= ~MESH_PATH_REQ_QUEUED;
if (preq_node->flags & PREQ_Q_F_START) {
if (mpath->flags & MESH_PATH_RESOLVING) {
spin_unlock_bh(&mpath->state_lock);
@@ -1028,11 +1039,11 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
mesh_queue_preq(mpath, PREQ_Q_F_START);
}
- if (skb_queue_len(&mpath->frame_queue) >=
- MESH_FRAME_QUEUE_LEN)
+ if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
skb_to_free = skb_dequeue(&mpath->frame_queue);
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ ieee80211_set_qos_hdr(sdata, skb);
skb_queue_tail(&mpath->frame_queue, skb);
if (skb_to_free)
mesh_path_discard_frame(skb_to_free, sdata);
@@ -1061,6 +1072,7 @@ void mesh_path_timer(unsigned long data)
} else if (mpath->discovery_retries < max_preq_retries(sdata)) {
++mpath->discovery_retries;
mpath->discovery_timeout *= 2;
+ mpath->flags &= ~MESH_PATH_REQ_QUEUED;
spin_unlock_bh(&mpath->state_lock);
mesh_queue_preq(mpath, 0);
} else {
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 7f54c504223..4fc23d1b9c3 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -213,7 +213,6 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
struct ieee80211_hdr *hdr;
struct sk_buff_head tmpq;
unsigned long flags;
- struct ieee80211_sub_if_data *sdata = mpath->sdata;
rcu_assign_pointer(mpath->next_hop, sta);
@@ -224,8 +223,6 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
hdr = (struct ieee80211_hdr *) skb->data;
memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
- skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
- ieee80211_set_qos_hdr(sdata, skb);
__skb_queue_tail(&tmpq, skb);
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 7e57f5d07f6..0140e88a822 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -80,11 +80,15 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta)
* on it in the lifecycle management section!
*/
static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
- u8 *hw_addr, u32 rates)
+ u8 *hw_addr, u32 rates,
+ struct ieee802_11_elems *elems)
{
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
struct sta_info *sta;
+ sband = local->hw.wiphy->bands[local->oper_channel->band];
+
if (local->num_sta >= MESH_MAX_PLINKS)
return NULL;
@@ -96,6 +100,9 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
set_sta_flag(sta, WLAN_STA_AUTHORIZED);
set_sta_flag(sta, WLAN_STA_WME);
sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
+ if (elems->ht_cap_elem)
+ ieee80211_ht_cap_ie_to_sta_ht_cap(sband, elems->ht_cap_elem,
+ &sta->sta.ht_cap);
rate_control_rate_init(sta);
return sta;
@@ -153,23 +160,31 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
enum ieee80211_self_protected_actioncode action,
u8 *da, __le16 llid, __le16 plid, __le16 reason) {
struct ieee80211_local *local = sdata->local;
- struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 +
- sdata->u.mesh.ie_len);
+ struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
bool include_plid = false;
- int ie_len = 4;
u16 peering_proto = 0;
- u8 *pos;
-
+ u8 *pos, ie_len = 4;
+ int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) +
+ sizeof(mgmt->u.action.u.self_prot);
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+ hdr_len +
+ 2 + /* capability info */
+ 2 + /* AID */
+ 2 + 8 + /* supported rates */
+ 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
+ 2 + sdata->u.mesh.mesh_id_len +
+ 2 + sizeof(struct ieee80211_meshconf_ie) +
+ 2 + sizeof(struct ieee80211_ht_cap) +
+ 2 + sizeof(struct ieee80211_ht_info) +
+ 2 + 8 + /* peering IE */
+ sdata->u.mesh.ie_len);
if (!skb)
return -1;
skb_reserve(skb, local->hw.extra_tx_headroom);
- /* 25 is the size of the common mgmt part (24) plus the size of the
- * common action part (1)
- */
- mgmt = (struct ieee80211_mgmt *)
- skb_put(skb, 25 + sizeof(mgmt->u.action.u.self_prot));
- memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.self_prot));
+ mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+ memset(mgmt, 0, hdr_len);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, da, ETH_ALEN);
@@ -235,6 +250,13 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
memcpy(pos, &reason, 2);
pos += 2;
}
+
+ if (action != WLAN_SP_MESH_PEERING_CLOSE) {
+ if (mesh_add_ht_cap_ie(skb, sdata) ||
+ mesh_add_ht_info_ie(skb, sdata))
+ return -1;
+ }
+
if (mesh_add_vendor_ies(skb, sdata))
return -1;
@@ -261,7 +283,7 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates,
elems->ie_start, elems->total_len,
GFP_KERNEL);
else
- sta = mesh_plink_alloc(sdata, hw_addr, rates);
+ sta = mesh_plink_alloc(sdata, hw_addr, rates, elems);
if (!sta)
return;
if (sta_info_insert_rcu(sta)) {
@@ -552,7 +574,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
}
rates = ieee80211_sta_get_rates(local, &elems, rx_status->band);
- sta = mesh_plink_alloc(sdata, mgmt->sa, rates);
+ sta = mesh_plink_alloc(sdata, mgmt->sa, rates, &elems);
if (!sta) {
mpl_dbg("Mesh plink error: plink table full\n");
return;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b1b1bb368f7..f9ec15b3fe0 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1468,6 +1468,47 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
return RX_MGMT_CFG80211_DISASSOC;
}
+static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
+ u8 *supp_rates, unsigned int supp_rates_len,
+ u32 *rates, u32 *basic_rates,
+ bool *have_higher_than_11mbit,
+ int *min_rate, int *min_rate_index)
+{
+ int i, j;
+
+ for (i = 0; i < supp_rates_len; i++) {
+ int rate = (supp_rates[i] & 0x7f) * 5;
+ bool is_basic = !!(supp_rates[i] & 0x80);
+
+ if (rate > 110)
+ *have_higher_than_11mbit = true;
+
+ /*
+ * BSS_MEMBERSHIP_SELECTOR_HT_PHY is defined in 802.11n-2009
+ * 7.3.2.2 as a magic value instead of a rate. Hence, skip it.
+ *
+ * Note: Even through the membership selector and the basic
+ * rate flag share the same bit, they are not exactly
+ * the same.
+ */
+ if (!!(supp_rates[i] & 0x80) &&
+ (supp_rates[i] & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY)
+ continue;
+
+ for (j = 0; j < sband->n_bitrates; j++) {
+ if (sband->bitrates[j].bitrate == rate) {
+ *rates |= BIT(j);
+ if (is_basic)
+ *basic_rates |= BIT(j);
+ if (rate < *min_rate) {
+ *min_rate = rate;
+ *min_rate_index = j;
+ }
+ break;
+ }
+ }
+ }
+}
static bool ieee80211_assoc_success(struct ieee80211_work *wk,
struct ieee80211_mgmt *mgmt, size_t len)
@@ -1484,7 +1525,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
struct ieee802_11_elems elems;
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
u32 changed = 0;
- int i, j, err;
+ int err;
bool have_higher_than_11mbit = false;
u16 ap_ht_cap_flags;
int min_rate = INT_MAX, min_rate_index = -1;
@@ -1542,47 +1583,14 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
basic_rates = 0;
sband = local->hw.wiphy->bands[wk->chan->band];
- for (i = 0; i < elems.supp_rates_len; i++) {
- int rate = (elems.supp_rates[i] & 0x7f) * 5;
- bool is_basic = !!(elems.supp_rates[i] & 0x80);
-
- if (rate > 110)
- have_higher_than_11mbit = true;
+ ieee80211_get_rates(sband, elems.supp_rates, elems.supp_rates_len,
+ &rates, &basic_rates, &have_higher_than_11mbit,
+ &min_rate, &min_rate_index);
- for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate) {
- rates |= BIT(j);
- if (is_basic)
- basic_rates |= BIT(j);
- if (rate < min_rate) {
- min_rate = rate;
- min_rate_index = j;
- }
- break;
- }
- }
- }
-
- for (i = 0; i < elems.ext_supp_rates_len; i++) {
- int rate = (elems.ext_supp_rates[i] & 0x7f) * 5;
- bool is_basic = !!(elems.ext_supp_rates[i] & 0x80);
-
- if (rate > 110)
- have_higher_than_11mbit = true;
-
- for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate) {
- rates |= BIT(j);
- if (is_basic)
- basic_rates |= BIT(j);
- if (rate < min_rate) {
- min_rate = rate;
- min_rate_index = j;
- }
- break;
- }
- }
- }
+ ieee80211_get_rates(sband, elems.ext_supp_rates,
+ elems.ext_supp_rates_len, &rates, &basic_rates,
+ &have_higher_than_11mbit,
+ &min_rate, &min_rate_index);
/*
* some buggy APs don't advertise basic_rates. use the lowest
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 9ee7164b207..596efaf50e0 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -125,7 +125,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_BEACON_ENABLED);
- drv_remove_interface(local, &sdata->vif);
+ drv_remove_interface(local, sdata);
}
/* stop hardware - this must stop RX */
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 58a89554b78..b39dda523f3 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -334,8 +334,8 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
static void
-calc_rate_durations(struct minstrel_sta_info *mi, struct ieee80211_local *local,
- struct minstrel_rate *d, struct ieee80211_rate *rate)
+calc_rate_durations(struct ieee80211_local *local, struct minstrel_rate *d,
+ struct ieee80211_rate *rate)
{
int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
@@ -402,8 +402,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
mr->rix = i;
mr->bitrate = sband->bitrates[i].bitrate / 5;
- calc_rate_durations(mi, local, mr,
- &sband->bitrates[i]);
+ calc_rate_durations(local, mr, &sband->bitrates[i]);
/* calculate maximum number of retransmissions before
* fallback (based on maximum segment size) */
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index fb123e2e081..d1a8869fe05 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -748,10 +748,11 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
struct ieee80211_local *local = rx->local;
struct ieee80211_hw *hw = &local->hw;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct sta_info *sta = rx->sta;
struct tid_ampdu_rx *tid_agg_rx;
u16 sc;
- int tid;
+ u8 tid, ack_policy;
if (!ieee80211_is_data_qos(hdr->frame_control))
goto dont_reorder;
@@ -764,6 +765,8 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
if (!sta)
goto dont_reorder;
+ ack_policy = *ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_ACK_POLICY_MASK;
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
@@ -774,6 +777,15 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
goto dont_reorder;
+ /* not part of a BA session */
+ if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
+ ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
+ goto dont_reorder;
+
+ /* not actually part of this BA session */
+ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
+ goto dont_reorder;
+
/* new, potentially un-ordered, ampdu frame - process it */
/* reset session timer */
@@ -858,6 +870,13 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
rx->sdata->control_port_protocol)
return RX_CONTINUE;
}
+
+ if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
+ cfg80211_rx_spurious_frame(rx->sdata->dev,
+ hdr->addr2,
+ GFP_ATOMIC))
+ return RX_DROP_UNUSABLE;
+
return RX_DROP_MONITOR;
}
@@ -1327,15 +1346,20 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
/*
* If we receive a 4-addr nullfunc frame from a STA
- * that was not moved to a 4-addr STA vlan yet, drop
- * the frame to the monitor interface, to make sure
- * that hostapd sees it
+ * that was not moved to a 4-addr STA vlan yet send
+ * the event to userspace and for older hostapd drop
+ * the frame to the monitor interface.
*/
if (ieee80211_has_a4(hdr->frame_control) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
(rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- !rx->sdata->u.vlan.sta)))
+ !rx->sdata->u.vlan.sta))) {
+ if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
+ cfg80211_rx_unexpected_4addr_frame(
+ rx->sdata->dev, sta->sta.addr,
+ GFP_ATOMIC);
return RX_DROP_MONITOR;
+ }
/*
* Update counter and free packet here to avoid
* counting this as a dropped packed.
@@ -1933,6 +1957,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
return RX_CONTINUE;
+ skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
mesh_hdr->ttl--;
if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
@@ -1957,12 +1982,10 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
memset(info, 0, sizeof(*info));
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
info->control.vif = &rx->sdata->vif;
+ info->control.jiffies = jiffies;
if (is_multicast_ether_addr(fwd_hdr->addr1)) {
IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
fwded_mcast);
- skb_set_queue_mapping(fwd_skb,
- ieee80211_select_queue(sdata, fwd_skb));
- ieee80211_set_qos_hdr(sdata, fwd_skb);
} else {
int err;
/*
@@ -2014,12 +2037,17 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
return RX_DROP_MONITOR;
/*
- * Allow the cooked monitor interface of an AP to see 4-addr frames so
- * that a 4-addr station can be detected and moved into a separate VLAN
+ * Send unexpected-4addr-frame event to hostapd. For older versions,
+ * also drop the frame to cooked monitor interfaces.
*/
if (ieee80211_has_a4(hdr->frame_control) &&
- sdata->vif.type == NL80211_IFTYPE_AP)
+ sdata->vif.type == NL80211_IFTYPE_AP) {
+ if (rx->sta &&
+ !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
+ cfg80211_rx_unexpected_4addr_frame(
+ rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
return RX_DROP_MONITOR;
+ }
err = __ieee80211_data_to_8023(rx, &port_control);
if (unlikely(err))
@@ -2174,6 +2202,18 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
if (!ieee80211_is_mgmt(mgmt->frame_control))
return RX_DROP_MONITOR;
+ if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
+ ieee80211_is_beacon(mgmt->frame_control) &&
+ !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
+ struct ieee80211_rx_status *status;
+
+ status = IEEE80211_SKB_RXCB(rx->skb);
+ cfg80211_report_obss_beacon(rx->local->hw.wiphy,
+ rx->skb->data, rx->skb->len,
+ status->freq, GFP_ATOMIC);
+ rx->flags |= IEEE80211_RX_BEACON_REPORTED;
+ }
+
if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
return RX_DROP_MONITOR;
@@ -2207,13 +2247,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
switch (mgmt->u.action.category) {
case WLAN_CATEGORY_BACK:
- /*
- * The aggregation code is not prepared to handle
- * anything but STA/AP due to the BSSID handling;
- * IBSS could work in the code but isn't supported
- * by drivers or the standard.
- */
if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+ sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
sdata->vif.type != NL80211_IFTYPE_AP)
break;
@@ -2493,6 +2528,10 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
goto out_free_skb;
rx->flags |= IEEE80211_RX_CMNTR;
+ /* If there are no cooked monitor interfaces, just free the SKB */
+ if (!local->cooked_mntrs)
+ goto out_free_skb;
+
if (skb_headroom(skb) < sizeof(*rthdr) &&
pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
goto out_free_skb;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 105436dbb90..81863031e0a 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -213,12 +213,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
if (bss)
ieee80211_rx_bss_put(sdata->local, bss);
- /* If we are on-operating-channel, and this packet is for the
- * current channel, pass the pkt on up the stack so that
- * the rest of the stack can make use of it.
- */
- if (ieee80211_cfg_on_oper_channel(sdata->local)
- && (channel == sdata->local->oper_channel))
+ if (channel == sdata->local->oper_channel)
return RX_CONTINUE;
dev_kfree_skb(skb);
@@ -264,8 +259,6 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
bool was_hw_scan)
{
struct ieee80211_local *local = hw_to_local(hw);
- bool on_oper_chan;
- bool enable_beacons = false;
lockdep_assert_held(&local->mtx);
@@ -298,25 +291,13 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
local->scanning = 0;
local->scan_channel = NULL;
- on_oper_chan = ieee80211_cfg_on_oper_channel(local);
-
- if (was_hw_scan || !on_oper_chan)
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- else
- /* Set power back to normal operating levels. */
- ieee80211_hw_config(local, 0);
+ /* Set power back to normal operating levels. */
+ ieee80211_hw_config(local, 0);
if (!was_hw_scan) {
- bool on_oper_chan2;
ieee80211_configure_filter(local);
drv_sw_scan_complete(local);
- on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
- /* We should always be on-channel at this point. */
- WARN_ON(!on_oper_chan2);
- if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
- enable_beacons = true;
-
- ieee80211_offchannel_return(local, enable_beacons, true);
+ ieee80211_offchannel_return(local, true, true);
}
ieee80211_recalc_idle(local);
@@ -361,11 +342,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
local->next_scan_state = SCAN_DECISION;
local->scan_channel_idx = 0;
- /* We always want to use off-channel PS, even if we
- * are not really leaving oper-channel. Don't
- * tell the AP though, as long as we are on-channel.
- */
- ieee80211_offchannel_enable_all_ps(local, false);
+ ieee80211_offchannel_stop_vifs(local, true);
ieee80211_configure_filter(local);
@@ -373,8 +350,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
ieee80211_hw_config(local, 0);
ieee80211_queue_delayed_work(&local->hw,
- &local->scan_work,
- IEEE80211_CHANNEL_TIME);
+ &local->scan_work, 0);
return 0;
}
@@ -510,96 +486,39 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
next_chan = local->scan_req->channels[local->scan_channel_idx];
- if (ieee80211_cfg_on_oper_channel(local)) {
- /* We're currently on operating channel. */
- if (next_chan == local->oper_channel)
- /* We don't need to move off of operating channel. */
- local->next_scan_state = SCAN_SET_CHANNEL;
- else
- /*
- * We do need to leave operating channel, as next
- * scan is somewhere else.
- */
- local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
- } else {
- /*
- * we're currently scanning a different channel, let's
- * see if we can scan another channel without interfering
- * with the current traffic situation.
- *
- * Since we don't know if the AP has pending frames for us
- * we can only check for our tx queues and use the current
- * pm_qos requirements for rx. Hence, if no tx traffic occurs
- * at all we will scan as many channels in a row as the pm_qos
- * latency allows us to. Additionally we also check for the
- * currently negotiated listen interval to prevent losing
- * frames unnecessarily.
- *
- * Otherwise switch back to the operating channel.
- */
-
- bad_latency = time_after(jiffies +
- ieee80211_scan_get_channel_time(next_chan),
- local->leave_oper_channel_time +
- usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY)));
-
- listen_int_exceeded = time_after(jiffies +
- ieee80211_scan_get_channel_time(next_chan),
- local->leave_oper_channel_time +
- usecs_to_jiffies(min_beacon_int * 1024) *
- local->hw.conf.listen_interval);
-
- if (associated && ( !tx_empty || bad_latency ||
- listen_int_exceeded))
- local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
- else
- local->next_scan_state = SCAN_SET_CHANNEL;
- }
-
- *next_delay = 0;
-}
-
-static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
- unsigned long *next_delay)
-{
- /* PS will already be in off-channel mode,
- * we do that once at the beginning of scanning.
- */
- ieee80211_offchannel_stop_vifs(local, false);
-
/*
- * What if the nullfunc frames didn't arrive?
+ * we're currently scanning a different channel, let's
+ * see if we can scan another channel without interfering
+ * with the current traffic situation.
+ *
+ * Since we don't know if the AP has pending frames for us
+ * we can only check for our tx queues and use the current
+ * pm_qos requirements for rx. Hence, if no tx traffic occurs
+ * at all we will scan as many channels in a row as the pm_qos
+ * latency allows us to. Additionally we also check for the
+ * currently negotiated listen interval to prevent losing
+ * frames unnecessarily.
+ *
+ * Otherwise switch back to the operating channel.
*/
- drv_flush(local, false);
- if (local->ops->flush)
- *next_delay = 0;
- else
- *next_delay = HZ / 10;
- /* remember when we left the operating channel */
- local->leave_oper_channel_time = jiffies;
+ bad_latency = time_after(jiffies +
+ ieee80211_scan_get_channel_time(next_chan),
+ local->leave_oper_channel_time +
+ usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY)));
- /* advance to the next channel to be scanned */
- local->next_scan_state = SCAN_SET_CHANNEL;
-}
-
-static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local,
- unsigned long *next_delay)
-{
- /* switch back to the operating channel */
- local->scan_channel = NULL;
- if (!ieee80211_cfg_on_oper_channel(local))
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ listen_int_exceeded = time_after(jiffies +
+ ieee80211_scan_get_channel_time(next_chan),
+ local->leave_oper_channel_time +
+ usecs_to_jiffies(min_beacon_int * 1024) *
+ local->hw.conf.listen_interval);
- /*
- * Re-enable vifs and beaconing. Leave PS
- * in off-channel state..will put that back
- * on-channel at the end of scanning.
- */
- ieee80211_offchannel_return(local, true, false);
+ if (associated && (!tx_empty || bad_latency || listen_int_exceeded))
+ local->next_scan_state = SCAN_SUSPEND;
+ else
+ local->next_scan_state = SCAN_SET_CHANNEL;
- *next_delay = HZ / 5;
- local->next_scan_state = SCAN_DECISION;
+ *next_delay = 0;
}
static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
@@ -613,10 +532,8 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
local->scan_channel = chan;
- /* Only call hw-config if we really need to change channels. */
- if (chan != local->hw.conf.channel)
- if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
- skip = 1;
+ if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
+ skip = 1;
/* advance state machine to next channel/band */
local->scan_channel_idx++;
@@ -673,6 +590,44 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
local->next_scan_state = SCAN_DECISION;
}
+static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
+ unsigned long *next_delay)
+{
+ /* switch back to the operating channel */
+ local->scan_channel = NULL;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+
+ /*
+ * Re-enable vifs and beaconing. Leave PS
+ * in off-channel state..will put that back
+ * on-channel at the end of scanning.
+ */
+ ieee80211_offchannel_return(local, true, false);
+
+ *next_delay = HZ / 5;
+ /* afterwards, resume scan & go to next channel */
+ local->next_scan_state = SCAN_RESUME;
+}
+
+static void ieee80211_scan_state_resume(struct ieee80211_local *local,
+ unsigned long *next_delay)
+{
+ /* PS already is in off-channel mode */
+ ieee80211_offchannel_stop_vifs(local, false);
+
+ if (local->ops->flush) {
+ drv_flush(local, false);
+ *next_delay = 0;
+ } else
+ *next_delay = HZ / 10;
+
+ /* remember when we left the operating channel */
+ local->leave_oper_channel_time = jiffies;
+
+ /* advance to the next channel to be scanned */
+ local->next_scan_state = SCAN_DECISION;
+}
+
void ieee80211_scan_work(struct work_struct *work)
{
struct ieee80211_local *local =
@@ -743,11 +698,11 @@ void ieee80211_scan_work(struct work_struct *work)
case SCAN_SEND_PROBE:
ieee80211_scan_state_send_probe(local, &next_delay);
break;
- case SCAN_LEAVE_OPER_CHANNEL:
- ieee80211_scan_state_leave_oper_channel(local, &next_delay);
+ case SCAN_SUSPEND:
+ ieee80211_scan_state_suspend(local, &next_delay);
break;
- case SCAN_ENTER_OPER_CHANNEL:
- ieee80211_scan_state_enter_oper_channel(local, &next_delay);
+ case SCAN_RESUME:
+ ieee80211_scan_state_resume(local, &next_delay);
break;
}
} while (next_delay == 0);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 8c8ce05ad26..c5923ab8a07 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -52,6 +52,7 @@
* unblocks the station.
* @WLAN_STA_SP: Station is in a service period, so don't try to
* reply to other uAPSD trigger frames or PS-Poll.
+ * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame.
*/
enum ieee80211_sta_info_flags {
WLAN_STA_AUTH,
@@ -71,6 +72,7 @@ enum ieee80211_sta_info_flags {
WLAN_STA_TDLS_PEER_AUTH,
WLAN_STA_UAPSD,
WLAN_STA_SP,
+ WLAN_STA_4ADDR_EVENT,
};
#define STA_TID_NUM 16
@@ -390,6 +392,12 @@ static inline int test_and_clear_sta_flag(struct sta_info *sta,
return test_and_clear_bit(flag, &sta->_flags);
}
+static inline int test_and_set_sta_flag(struct sta_info *sta,
+ enum ieee80211_sta_info_flags flag)
+{
+ return test_and_set_bit(flag, &sta->_flags);
+}
+
void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
struct tid_ampdu_tx *tid_tx);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 16518f38611..46222ce0e5b 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -517,27 +517,54 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
}
if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
- struct ieee80211_work *wk;
u64 cookie = (unsigned long)skb;
- rcu_read_lock();
- list_for_each_entry_rcu(wk, &local->work_list, list) {
- if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
- continue;
- if (wk->offchan_tx.frame != skb)
- continue;
- wk->offchan_tx.status = true;
- break;
- }
- rcu_read_unlock();
- if (local->hw_roc_skb_for_status == skb) {
- cookie = local->hw_roc_cookie ^ 2;
- local->hw_roc_skb_for_status = NULL;
+ if (ieee80211_is_nullfunc(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+ bool acked = info->flags & IEEE80211_TX_STAT_ACK;
+ cfg80211_probe_status(skb->dev, hdr->addr1,
+ cookie, acked, GFP_ATOMIC);
+ } else {
+ struct ieee80211_work *wk;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(wk, &local->work_list, list) {
+ if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
+ continue;
+ if (wk->offchan_tx.frame != skb)
+ continue;
+ wk->offchan_tx.status = true;
+ break;
+ }
+ rcu_read_unlock();
+ if (local->hw_roc_skb_for_status == skb) {
+ cookie = local->hw_roc_cookie ^ 2;
+ local->hw_roc_skb_for_status = NULL;
+ }
+
+ cfg80211_mgmt_tx_status(
+ skb->dev, cookie, skb->data, skb->len,
+ !!(info->flags & IEEE80211_TX_STAT_ACK),
+ GFP_ATOMIC);
}
+ }
- cfg80211_mgmt_tx_status(
- skb->dev, cookie, skb->data, skb->len,
- !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
+ if (unlikely(info->ack_frame_id)) {
+ struct sk_buff *ack_skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local->ack_status_lock, flags);
+ ack_skb = idr_find(&local->ack_status_frames,
+ info->ack_frame_id);
+ if (ack_skb)
+ idr_remove(&local->ack_status_frames,
+ info->ack_frame_id);
+ spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+ /* consumes ack_skb */
+ if (ack_skb)
+ skb_complete_wifi_ack(ack_skb,
+ info->flags & IEEE80211_TX_STAT_ACK);
}
/* this was a transmitted frame, but now we want to reuse it */
@@ -610,3 +637,29 @@ void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets)
num_packets, GFP_ATOMIC);
}
EXPORT_SYMBOL(ieee80211_report_low_ack);
+
+void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (unlikely(info->ack_frame_id)) {
+ struct sk_buff *ack_skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local->ack_status_lock, flags);
+ ack_skb = idr_find(&local->ack_status_frames,
+ info->ack_frame_id);
+ if (ack_skb)
+ idr_remove(&local->ack_status_frames,
+ info->ack_frame_id);
+ spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+ /* consumes ack_skb */
+ if (ack_skb)
+ dev_kfree_skb_any(ack_skb);
+ }
+
+ dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(ieee80211_free_txskb);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 1f8b120146d..f044963feb9 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1685,8 +1685,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
int nh_pos, h_pos;
struct sta_info *sta = NULL;
bool wme_sta = false, authorized = false, tdls_auth = false;
- struct sk_buff *tmp_skb;
bool tdls_direct = false;
+ bool multicast;
+ u32 info_flags = 0;
+ u16 info_id = 0;
if (unlikely(skb->len < ETH_HLEN)) {
ret = NETDEV_TX_OK;
@@ -1873,7 +1875,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
* if it is a multicast address (which can only happen
* in AP mode)
*/
- if (!is_multicast_ether_addr(hdr.addr1)) {
+ multicast = is_multicast_ether_addr(hdr.addr1);
+ if (!multicast) {
rcu_read_lock();
sta = sta_info_get(sdata, hdr.addr1);
if (sta) {
@@ -1914,11 +1917,54 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
goto fail;
}
+ if (unlikely(!multicast && skb->sk &&
+ skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) {
+ struct sk_buff *orig_skb = skb;
+
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (skb) {
+ unsigned long flags;
+ int id, r;
+
+ spin_lock_irqsave(&local->ack_status_lock, flags);
+ r = idr_get_new_above(&local->ack_status_frames,
+ orig_skb, 1, &id);
+ if (r == -EAGAIN) {
+ idr_pre_get(&local->ack_status_frames,
+ GFP_ATOMIC);
+ r = idr_get_new_above(&local->ack_status_frames,
+ orig_skb, 1, &id);
+ }
+ if (WARN_ON(!id) || id > 0xffff) {
+ idr_remove(&local->ack_status_frames, id);
+ r = -ERANGE;
+ }
+ spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+ if (!r) {
+ info_id = id;
+ info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ } else if (skb_shared(skb)) {
+ kfree_skb(orig_skb);
+ } else {
+ kfree_skb(skb);
+ skb = orig_skb;
+ }
+ } else {
+ /* couldn't clone -- lose tx status ... */
+ skb = orig_skb;
+ }
+ }
+
/*
* If the skb is shared we need to obtain our own copy.
*/
if (skb_shared(skb)) {
- tmp_skb = skb;
+ struct sk_buff *tmp_skb = skb;
+
+ /* can't happen -- skb is a clone if info_id != 0 */
+ WARN_ON(info_id);
+
skb = skb_clone(skb, GFP_ATOMIC);
kfree_skb(tmp_skb);
@@ -2019,6 +2065,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
memset(info, 0, sizeof(*info));
dev->trans_start = jiffies;
+
+ info->flags = info_flags;
+ info->ack_frame_id = info_id;
+
ieee80211_xmit(sdata, skb);
return NETDEV_TX_OK;
@@ -2279,22 +2329,31 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
struct ieee80211_mgmt *mgmt;
u8 *pos;
+ int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
+ sizeof(mgmt->u.beacon);
#ifdef CONFIG_MAC80211_MESH
if (!sdata->u.mesh.mesh_id_len)
goto out;
#endif
- /* headroom, head length, tail length and maximum TIM length */
- skb = dev_alloc_skb(local->tx_headroom + 400 +
- sdata->u.mesh.ie_len);
+ skb = dev_alloc_skb(local->tx_headroom +
+ hdr_len +
+ 2 + /* NULL SSID */
+ 2 + 8 + /* supported rates */
+ 2 + 3 + /* DS params */
+ 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
+ 2 + sizeof(struct ieee80211_ht_cap) +
+ 2 + sizeof(struct ieee80211_ht_info) +
+ 2 + sdata->u.mesh.mesh_id_len +
+ 2 + sizeof(struct ieee80211_meshconf_ie) +
+ sdata->u.mesh.ie_len);
if (!skb)
goto out;
skb_reserve(skb, local->hw.extra_tx_headroom);
- mgmt = (struct ieee80211_mgmt *)
- skb_put(skb, 24 + sizeof(mgmt->u.beacon));
- memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
+ mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+ memset(mgmt, 0, hdr_len);
mgmt->frame_control =
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
memset(mgmt->da, 0xff, ETH_ALEN);
@@ -2313,6 +2372,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
mesh_add_ds_params_ie(skb, sdata) ||
ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
mesh_add_rsn_ie(skb, sdata) ||
+ mesh_add_ht_cap_ie(skb, sdata) ||
+ mesh_add_ht_info_ie(skb, sdata) ||
mesh_add_meshid_ie(skb, sdata) ||
mesh_add_meshconf_ie(skb, sdata) ||
mesh_add_vendor_ies(skb, sdata)) {
@@ -2355,6 +2416,37 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_beacon_get_tim);
+struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_if_ap *ap = NULL;
+ struct sk_buff *presp = NULL, *skb = NULL;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ if (sdata->vif.type != NL80211_IFTYPE_AP)
+ return NULL;
+
+ rcu_read_lock();
+
+ ap = &sdata->u.ap;
+ presp = rcu_dereference(ap->probe_resp);
+ if (!presp)
+ goto out;
+
+ skb = skb_copy(presp, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ memset(hdr->addr1, 0, sizeof(hdr->addr1));
+
+out:
+ rcu_read_unlock();
+ return skb;
+}
+EXPORT_SYMBOL(ieee80211_proberesp_get);
+
struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index eca0fad0970..3a00814699f 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -812,23 +812,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
offset = noffset;
}
- if (sband->ht_cap.ht_supported) {
- u16 cap = sband->ht_cap.cap;
- __le16 tmp;
-
- *pos++ = WLAN_EID_HT_CAPABILITY;
- *pos++ = sizeof(struct ieee80211_ht_cap);
- memset(pos, 0, sizeof(struct ieee80211_ht_cap));
- tmp = cpu_to_le16(cap);
- memcpy(pos, &tmp, sizeof(u16));
- pos += sizeof(u16);
- *pos++ = sband->ht_cap.ampdu_factor |
- (sband->ht_cap.ampdu_density <<
- IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
- memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
- pos += sizeof(sband->ht_cap.mcs);
- pos += 2 + 4 + 1; /* ext info, BF cap, antsel */
- }
+ if (sband->ht_cap.ht_supported)
+ pos = ieee80211_ie_build_ht_cap(pos, sband, sband->ht_cap.cap);
/*
* If adding more here, adjust code in main.c
@@ -1026,7 +1011,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
sdata->vif.type != NL80211_IFTYPE_MONITOR &&
ieee80211_sdata_running(sdata))
- res = drv_add_interface(local, &sdata->vif);
+ res = drv_add_interface(local, sdata);
}
/* add STAs back */
@@ -1077,7 +1062,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
BSS_CHANGED_BEACON_INT |
BSS_CHANGED_BSSID |
BSS_CHANGED_CQM |
- BSS_CHANGED_QOS;
+ BSS_CHANGED_QOS |
+ BSS_CHANGED_IDLE;
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
@@ -1090,7 +1076,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
changed |= BSS_CHANGED_IBSS;
/* fall through */
case NL80211_IFTYPE_AP:
- changed |= BSS_CHANGED_SSID;
+ changed |= BSS_CHANGED_SSID |
+ BSS_CHANGED_AP_PROBE_RESP;
/* fall through */
case NL80211_IFTYPE_MESH_POINT:
changed |= BSS_CHANGED_BEACON |
@@ -1112,6 +1099,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
}
+ ieee80211_recalc_ps(local, -1);
+
/*
* Clear the WLAN_STA_BLOCK_BA flag so new aggregation
* sessions can be established after a resume.
@@ -1367,6 +1356,103 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif)
}
EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
+u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_supported_band *sband,
+ u16 cap)
+{
+ __le16 tmp;
+
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ memset(pos, 0, sizeof(struct ieee80211_ht_cap));
+
+ /* capability flags */
+ tmp = cpu_to_le16(cap);
+ memcpy(pos, &tmp, sizeof(u16));
+ pos += sizeof(u16);
+
+ /* AMPDU parameters */
+ *pos++ = sband->ht_cap.ampdu_factor |
+ (sband->ht_cap.ampdu_density <<
+ IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
+
+ /* MCS set */
+ memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
+ pos += sizeof(sband->ht_cap.mcs);
+
+ /* extended capabilities */
+ pos += sizeof(__le16);
+
+ /* BF capabilities */
+ pos += sizeof(__le32);
+
+ /* antenna selection */
+ pos += sizeof(u8);
+
+ return pos;
+}
+
+u8 *ieee80211_ie_build_ht_info(u8 *pos,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type)
+{
+ struct ieee80211_ht_info *ht_info;
+ /* Build HT Information */
+ *pos++ = WLAN_EID_HT_INFORMATION;
+ *pos++ = sizeof(struct ieee80211_ht_info);
+ ht_info = (struct ieee80211_ht_info *)pos;
+ ht_info->control_chan =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ switch (channel_type) {
+ case NL80211_CHAN_HT40MINUS:
+ ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ break;
+ case NL80211_CHAN_HT20:
+ default:
+ ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ break;
+ }
+ if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ ht_info->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
+ ht_info->operation_mode = 0x0000;
+ ht_info->stbc_param = 0x0000;
+
+ /* It seems that Basic MCS set and Supported MCS set
+ are identical for the first 10 bytes */
+ memset(&ht_info->basic_set, 0, 16);
+ memcpy(&ht_info->basic_set, &ht_cap->mcs, 10);
+
+ return pos + sizeof(struct ieee80211_ht_info);
+}
+
+enum nl80211_channel_type
+ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info)
+{
+ enum nl80211_channel_type channel_type;
+
+ if (!ht_info)
+ return NL80211_CHAN_NO_HT;
+
+ switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+ case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+ channel_type = NL80211_CHAN_HT20;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ channel_type = NL80211_CHAN_HT40PLUS;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ channel_type = NL80211_CHAN_HT40MINUS;
+ break;
+ default:
+ channel_type = NL80211_CHAN_NO_HT;
+ }
+
+ return channel_type;
+}
+
int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index fd52e695c07..43327115b49 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -83,7 +83,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
- ra = skb->data;
+ qos = true;
break;
#endif
case NL80211_IFTYPE_STATION:
@@ -143,11 +143,15 @@ void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
/* Fill in the QoS header if there is one. */
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *p = ieee80211_get_qos_ctl(hdr);
- u8 ack_policy = 0, tid;
+ u8 ack_policy, tid;
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
- if (unlikely(sdata->local->wifi_wme_noack_test))
+ /* preserve EOSP bit */
+ ack_policy = *p & IEEE80211_QOS_CTL_EOSP;
+
+ if (unlikely(sdata->local->wifi_wme_noack_test) ||
+ is_multicast_ether_addr(hdr->addr1))
ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
/* qos header is 2 bytes */
*p++ = ack_policy | tid;
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 6c53b6d1002..3dd5a89e99a 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -103,7 +103,6 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
u8 *pos;
u32 flags = channel->flags;
u16 cap = sband->ht_cap.cap;
- __le16 tmp;
if (!sband->ht_cap.ht_supported)
return;
@@ -154,34 +153,8 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
}
/* reserve and fill IE */
-
pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
- *pos++ = WLAN_EID_HT_CAPABILITY;
- *pos++ = sizeof(struct ieee80211_ht_cap);
- memset(pos, 0, sizeof(struct ieee80211_ht_cap));
-
- /* capability flags */
- tmp = cpu_to_le16(cap);
- memcpy(pos, &tmp, sizeof(u16));
- pos += sizeof(u16);
-
- /* AMPDU parameters */
- *pos++ = sband->ht_cap.ampdu_factor |
- (sband->ht_cap.ampdu_density <<
- IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
-
- /* MCS set */
- memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
- pos += sizeof(sband->ht_cap.mcs);
-
- /* extended capabilities */
- pos += sizeof(__le16);
-
- /* BF capabilities */
- pos += sizeof(__le32);
-
- /* antenna selection */
- pos += sizeof(u8);
+ ieee80211_ie_build_ht_cap(pos, sband, cap);
}
static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
@@ -969,10 +942,9 @@ static void ieee80211_work_work(struct work_struct *work)
}
if (!started && !local->tmp_channel) {
- bool on_oper_chan;
- bool tmp_chan_changed = false;
- bool on_oper_chan2;
+ bool on_oper_chan, on_oper_chan2;
enum nl80211_channel_type wk_ct;
+
on_oper_chan = ieee80211_cfg_on_oper_channel(local);
/* Work with existing channel type if possible. */
@@ -981,11 +953,6 @@ static void ieee80211_work_work(struct work_struct *work)
wk_ct = ieee80211_calc_ct(wk->chan_type,
local->hw.conf.channel_type);
- if (local->tmp_channel)
- if ((local->tmp_channel != wk->chan) ||
- (local->tmp_channel_type != wk_ct))
- tmp_chan_changed = true;
-
local->tmp_channel = wk->chan;
local->tmp_channel_type = wk_ct;
/*
@@ -1008,12 +975,7 @@ static void ieee80211_work_work(struct work_struct *work)
true,
false);
}
- } else if (tmp_chan_changed)
- /* Still off-channel, but on some other
- * channel, so update hardware.
- * PS should already be off-channel.
- */
- ieee80211_hw_config(local, 0);
+ }
started = true;
wk->timeout = jiffies;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index f614ce7bb6e..106e15a4649 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -390,7 +390,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
u8 scratch[6 * AES_BLOCK_SIZE];
if (info->control.hw_key &&
- !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+ !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
+ !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
/*
* hwaccel has no need for preallocated room for CCMP
* header or MIC fields
@@ -412,6 +413,12 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
+
+ /* the HW only needs room for the IV, but not the actual IV */
+ if (info->control.hw_key &&
+ (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
+ return 0;
+
hdr = (struct ieee80211_hdr *) pos;
pos += hdrlen;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index afca6c78948..4aa0f4b19bd 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -54,6 +54,12 @@ EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
EXPORT_SYMBOL(nf_hooks);
+
+#if defined(CONFIG_JUMP_LABEL)
+struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+EXPORT_SYMBOL(nf_hooks_needed);
+#endif
+
static DEFINE_MUTEX(nf_hook_mutex);
int nf_register_hook(struct nf_hook_ops *reg)
@@ -70,6 +76,9 @@ int nf_register_hook(struct nf_hook_ops *reg)
}
list_add_rcu(&reg->list, elem->list.prev);
mutex_unlock(&nf_hook_mutex);
+#if defined(CONFIG_JUMP_LABEL)
+ jump_label_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
+#endif
return 0;
}
EXPORT_SYMBOL(nf_register_hook);
@@ -79,7 +88,9 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
mutex_lock(&nf_hook_mutex);
list_del_rcu(&reg->list);
mutex_unlock(&nf_hook_mutex);
-
+#if defined(CONFIG_JUMP_LABEL)
+ jump_label_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
+#endif
synchronize_net();
}
EXPORT_SYMBOL(nf_unregister_hook);
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index f2d576e6b76..4015fcaf87b 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -241,7 +241,7 @@ hash_ip6_data_isnull(const struct hash_ip6_elem *elem)
static inline void
hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src)
{
- ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+ dst->ip.in6 = src->ip.in6;
}
static inline void
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 60d016541c5..28988196775 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -267,7 +267,7 @@ static inline void
hash_net6_data_copy(struct hash_net6_elem *dst,
const struct hash_net6_elem *src)
{
- ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+ dst->ip.in6 = src->ip.in6;
dst->cidr = src->cidr;
}
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 093cc327020..611c3359b94 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -983,7 +983,7 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
if (!cp)
return NF_ACCEPT;
- ipv6_addr_copy(&snet.in6, &iph->saddr);
+ snet.in6 = iph->saddr;
return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp,
pp, offset, sizeof(struct ipv6hdr));
}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 3cdd479f9b5..bcf5563e483 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -603,9 +603,9 @@ sloop:
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6) {
p += sizeof(struct ip_vs_sync_v6);
- ipv6_addr_copy(&s->v6.caddr, &cp->caddr.in6);
- ipv6_addr_copy(&s->v6.vaddr, &cp->vaddr.in6);
- ipv6_addr_copy(&s->v6.daddr, &cp->daddr.in6);
+ s->v6.caddr = cp->caddr.in6;
+ s->v6.vaddr = cp->vaddr.in6;
+ s->v6.daddr = cp->daddr.in6;
} else
#endif
{
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index aa2d7206ee8..38a576d05b4 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -235,7 +235,7 @@ __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
goto out_err;
}
}
- ipv6_addr_copy(ret_saddr, &fl6.saddr);
+ *ret_saddr = fl6.saddr;
return dst;
out_err:
@@ -279,7 +279,7 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
atomic_read(&rt->dst.__refcnt));
}
if (ret_saddr)
- ipv6_addr_copy(ret_saddr, &dest->dst_saddr.in6);
+ *ret_saddr = dest->dst_saddr.in6;
spin_unlock(&dest->dst_lock);
} else {
dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
@@ -705,7 +705,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* mangle the packet */
if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
goto tx_error;
- ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &cp->daddr.in6);
+ ipv6_hdr(skb)->daddr = cp->daddr.in6;
if (!local || !skb->dev) {
/* drop the old route when skb is not shared */
@@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
iph->priority = old_iph->priority;
memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
- ipv6_addr_copy(&iph->daddr, &cp->daddr.in6);
- ipv6_addr_copy(&iph->saddr, &saddr);
+ iph->daddr = cp->daddr.in6;
+ iph->saddr = saddr;
iph->hop_limit = old_iph->hop_limit;
/* Another hack: avoid icmp_send in ip_fragment */
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index f03c2d4539f..f9368f33e7a 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -750,10 +750,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
struct rt6_info *rt1, *rt2;
memset(&fl1, 0, sizeof(fl1));
- ipv6_addr_copy(&fl1.daddr, &src->in6);
+ fl1.daddr = src->in6;
memset(&fl2, 0, sizeof(fl2));
- ipv6_addr_copy(&fl2.daddr, &dst->in6);
+ fl2.daddr = dst->in6;
if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
flowi6_to_flowi(&fl1), false)) {
if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 9e63b43faee..3ecade3966d 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -161,7 +161,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
struct flowi6 *fl6 = &fl.u.ip6;
memset(fl6, 0, sizeof(*fl6));
- ipv6_addr_copy(&fl6->daddr, &ipv6_hdr(skb)->saddr);
+ fl6->daddr = ipv6_hdr(skb)->saddr;
}
rcu_read_lock();
ai = nf_get_afinfo(family);
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index b77d383cec7..c047de2046a 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -42,7 +42,7 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
int route_err;
memset(&flow, 0, sizeof(flow));
- ipv6_addr_copy(&flow.daddr, addr);
+ flow.daddr = *addr;
if (dev)
flow.flowi6_oif = dev->ifindex;
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 7b372a763c6..3735297c524 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -155,12 +155,12 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
if (map6 == NULL)
goto cfg_unlbl_map_add_failure;
map6->type = NETLBL_NLTYPE_UNLABELED;
- ipv6_addr_copy(&map6->list.addr, addr6);
+ map6->list.addr = *addr6;
map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0];
map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1];
map6->list.addr.s6_addr32[2] &= mask6->s6_addr32[2];
map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
- ipv6_addr_copy(&map6->list.mask, mask6);
+ map6->list.mask = *mask6;
map6->list.valid = 1;
ret_val = netlbl_af6list_add(&map6->list,
&addrmap->list6);
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index bfa55586977..9879300beef 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -216,12 +216,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
ret_val = -ENOMEM;
goto add_failure;
}
- ipv6_addr_copy(&map->list.addr, addr);
+ map->list.addr = *addr;
map->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
map->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
map->list.addr.s6_addr32[2] &= mask->s6_addr32[2];
map->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
- ipv6_addr_copy(&map->list.mask, mask);
+ map->list.mask = *mask;
map->list.valid = 1;
map->type = entry->type;
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index e251c2c8852..049ccd2447d 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -300,12 +300,12 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface,
if (entry == NULL)
return -ENOMEM;
- ipv6_addr_copy(&entry->list.addr, addr);
+ entry->list.addr = *addr;
entry->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
entry->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
entry->list.addr.s6_addr32[2] &= mask->s6_addr32[2];
entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
- ipv6_addr_copy(&entry->list.mask, mask);
+ entry->list.mask = *mask;
entry->list.valid = 1;
entry->secid = secid;
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 3925c657876..fe5ca89abfc 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -126,7 +126,10 @@ static inline int nci_request(struct nci_dev *ndev,
static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
{
- nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL);
+ struct nci_core_reset_cmd cmd;
+
+ cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
+ nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd);
}
static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
@@ -136,17 +139,11 @@ static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
{
- struct nci_core_conn_create_cmd conn_cmd;
struct nci_rf_disc_map_cmd cmd;
struct disc_map_config *cfg = cmd.mapping_configs;
__u8 *num = &cmd.num_mapping_configs;
int i;
- /* create static rf connection */
- conn_cmd.target_handle = 0;
- conn_cmd.num_target_specific_params = 0;
- nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd);
-
/* set rf mapping configurations */
*num = 0;
@@ -470,7 +467,7 @@ static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
ndev->data_exchange_cb = cb;
ndev->data_exchange_cb_context = cb_context;
- rc = nci_send_data(ndev, ndev->conn_id, skb);
+ rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
if (rc)
clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
@@ -726,7 +723,10 @@ static void nci_tx_work(struct work_struct *work)
if (!skb)
return;
- atomic_dec(&ndev->credits_cnt);
+ /* Check if data flow control is used */
+ if (atomic_read(&ndev->credits_cnt) !=
+ NCI_DATA_FLOW_CONTROL_NOT_USED)
+ atomic_dec(&ndev->credits_cnt);
nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
nci_pbf(skb->data),
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index e5ed90fc1a9..511fb96e21b 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -95,7 +95,8 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
__skb_queue_head_init(&frags_q);
while (total_len) {
- frag_len = min_t(int, total_len, ndev->max_pkt_payload_size);
+ frag_len =
+ min_t(int, total_len, ndev->max_data_pkt_payload_size);
skb_frag = nci_skb_alloc(ndev,
(NCI_DATA_HDR_SIZE + frag_len),
@@ -151,7 +152,7 @@ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
nfc_dbg("entry, conn_id 0x%x, plen %d", conn_id, skb->len);
/* check if the packet need to be fragmented */
- if (skb->len <= ndev->max_pkt_payload_size) {
+ if (skb->len <= ndev->max_data_pkt_payload_size) {
/* no need to fragment packet */
nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST);
diff --git a/net/nfc/nci/lib.c b/net/nfc/nci/lib.c
index b19dc2fa90e..e99adcfb1bc 100644
--- a/net/nfc/nci/lib.c
+++ b/net/nfc/nci/lib.c
@@ -42,12 +42,9 @@ int nci_to_errno(__u8 code)
case NCI_STATUS_REJECTED:
return -EBUSY;
- case NCI_STATUS_MESSAGE_CORRUPTED:
+ case NCI_STATUS_RF_FRAME_CORRUPTED:
return -EBADMSG;
- case NCI_STATUS_BUFFER_FULL:
- return -ENOBUFS;
-
case NCI_STATUS_NOT_INITIALIZED:
return -EHOSTDOWN;
@@ -80,9 +77,6 @@ int nci_to_errno(__u8 code)
case NCI_STATUS_NFCEE_TIMEOUT_ERROR:
return -ETIMEDOUT;
- case NCI_STATUS_RF_LINK_LOSS_ERROR:
- return -ENOLINK;
-
case NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED:
return -EDQUOT;
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 96633f5cda4..c1bf54172c2 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -54,7 +54,7 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
ntf->conn_entries[i].conn_id,
ntf->conn_entries[i].credits);
- if (ntf->conn_entries[i].conn_id == ndev->conn_id) {
+ if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) {
/* found static rf connection */
atomic_add(ntf->conn_entries[i].credits,
&ndev->credits_cnt);
@@ -66,22 +66,12 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
queue_work(ndev->tx_wq, &ndev->tx_work);
}
-static void nci_rf_field_info_ntf_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
-{
- struct nci_rf_field_info_ntf *ntf = (void *) skb->data;
-
- nfc_dbg("entry, rf_field_status %d", ntf->rf_field_status);
-}
-
-static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
- struct nci_rf_activate_ntf *ntf, __u8 *data)
+static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
+ struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
{
struct rf_tech_specific_params_nfca_poll *nfca_poll;
- struct activation_params_nfca_poll_iso_dep *nfca_poll_iso_dep;
nfca_poll = &ntf->rf_tech_specific_params.nfca_poll;
- nfca_poll_iso_dep = &ntf->activation_params.nfca_poll_iso_dep;
nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
data += 2;
@@ -100,32 +90,32 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
if (nfca_poll->sel_res_len != 0)
nfca_poll->sel_res = *data++;
- ntf->rf_interface_type = *data++;
- ntf->activation_params_len = *data++;
-
- nfc_dbg("sel_res_len %d, sel_res 0x%x, rf_interface_type %d, activation_params_len %d",
+ nfc_dbg("sel_res_len %d, sel_res 0x%x",
nfca_poll->sel_res_len,
- nfca_poll->sel_res,
- ntf->rf_interface_type,
- ntf->activation_params_len);
-
- switch (ntf->rf_interface_type) {
- case NCI_RF_INTERFACE_ISO_DEP:
- nfca_poll_iso_dep->rats_res_len = *data++;
- if (nfca_poll_iso_dep->rats_res_len > 0) {
- memcpy(nfca_poll_iso_dep->rats_res,
+ nfca_poll->sel_res);
+
+ return data;
+}
+
+static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
+ struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
+{
+ struct activation_params_nfca_poll_iso_dep *nfca_poll;
+
+ switch (ntf->activation_rf_tech_and_mode) {
+ case NCI_NFC_A_PASSIVE_POLL_MODE:
+ nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
+ nfca_poll->rats_res_len = *data++;
+ if (nfca_poll->rats_res_len > 0) {
+ memcpy(nfca_poll->rats_res,
data,
- nfca_poll_iso_dep->rats_res_len);
+ nfca_poll->rats_res_len);
}
break;
- case NCI_RF_INTERFACE_FRAME:
- /* no activation params */
- break;
-
default:
- nfc_err("unsupported rf_interface_type 0x%x",
- ntf->rf_interface_type);
+ nfc_err("unsupported activation_rf_tech_and_mode 0x%x",
+ ntf->activation_rf_tech_and_mode);
return -EPROTO;
}
@@ -133,7 +123,7 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
}
static void nci_target_found(struct nci_dev *ndev,
- struct nci_rf_activate_ntf *ntf)
+ struct nci_rf_intf_activated_ntf *ntf)
{
struct nfc_target nfc_tgt;
@@ -141,6 +131,8 @@ static void nci_target_found(struct nci_dev *ndev,
nfc_tgt.supported_protocols = NFC_PROTO_MIFARE_MASK;
else if (ntf->rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) /* 4A */
nfc_tgt.supported_protocols = NFC_PROTO_ISO14443_MASK;
+ else
+ nfc_tgt.supported_protocols = 0;
nfc_tgt.sens_res = ntf->rf_tech_specific_params.nfca_poll.sens_res;
nfc_tgt.sel_res = ntf->rf_tech_specific_params.nfca_poll.sel_res;
@@ -158,49 +150,86 @@ static void nci_target_found(struct nci_dev *ndev,
nfc_targets_found(ndev->nfc_dev, &nfc_tgt, 1);
}
-static void nci_rf_activate_ntf_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
+static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
{
- struct nci_rf_activate_ntf ntf;
+ struct nci_rf_intf_activated_ntf ntf;
__u8 *data = skb->data;
- int rc = -1;
+ int err = 0;
clear_bit(NCI_DISCOVERY, &ndev->flags);
set_bit(NCI_POLL_ACTIVE, &ndev->flags);
- ntf.target_handle = *data++;
+ ntf.rf_discovery_id = *data++;
+ ntf.rf_interface_type = *data++;
ntf.rf_protocol = *data++;
- ntf.rf_tech_and_mode = *data++;
+ ntf.activation_rf_tech_and_mode = *data++;
ntf.rf_tech_specific_params_len = *data++;
- nfc_dbg("target_handle %d, rf_protocol 0x%x, rf_tech_and_mode 0x%x, rf_tech_specific_params_len %d",
- ntf.target_handle,
- ntf.rf_protocol,
- ntf.rf_tech_and_mode,
+ nfc_dbg("rf_discovery_id %d", ntf.rf_discovery_id);
+ nfc_dbg("rf_interface_type 0x%x", ntf.rf_interface_type);
+ nfc_dbg("rf_protocol 0x%x", ntf.rf_protocol);
+ nfc_dbg("activation_rf_tech_and_mode 0x%x",
+ ntf.activation_rf_tech_and_mode);
+ nfc_dbg("rf_tech_specific_params_len %d",
ntf.rf_tech_specific_params_len);
- switch (ntf.rf_tech_and_mode) {
- case NCI_NFC_A_PASSIVE_POLL_MODE:
- rc = nci_rf_activate_nfca_passive_poll(ndev, &ntf,
- data);
- break;
+ if (ntf.rf_tech_specific_params_len > 0) {
+ switch (ntf.activation_rf_tech_and_mode) {
+ case NCI_NFC_A_PASSIVE_POLL_MODE:
+ data = nci_extract_rf_params_nfca_passive_poll(ndev,
+ &ntf, data);
+ break;
+
+ default:
+ nfc_err("unsupported activation_rf_tech_and_mode 0x%x",
+ ntf.activation_rf_tech_and_mode);
+ return;
+ }
+ }
- default:
- nfc_err("unsupported rf_tech_and_mode 0x%x",
- ntf.rf_tech_and_mode);
- return;
+ ntf.data_exch_rf_tech_and_mode = *data++;
+ ntf.data_exch_tx_bit_rate = *data++;
+ ntf.data_exch_rx_bit_rate = *data++;
+ ntf.activation_params_len = *data++;
+
+ nfc_dbg("data_exch_rf_tech_and_mode 0x%x",
+ ntf.data_exch_rf_tech_and_mode);
+ nfc_dbg("data_exch_tx_bit_rate 0x%x",
+ ntf.data_exch_tx_bit_rate);
+ nfc_dbg("data_exch_rx_bit_rate 0x%x",
+ ntf.data_exch_rx_bit_rate);
+ nfc_dbg("activation_params_len %d",
+ ntf.activation_params_len);
+
+ if (ntf.activation_params_len > 0) {
+ switch (ntf.rf_interface_type) {
+ case NCI_RF_INTERFACE_ISO_DEP:
+ err = nci_extract_activation_params_iso_dep(ndev,
+ &ntf, data);
+ break;
+
+ case NCI_RF_INTERFACE_FRAME:
+ /* no activation params */
+ break;
+
+ default:
+ nfc_err("unsupported rf_interface_type 0x%x",
+ ntf.rf_interface_type);
+ return;
+ }
}
- if (!rc)
+ if (!err)
nci_target_found(ndev, &ntf);
}
static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
- __u8 type = skb->data[0];
+ struct nci_rf_deactivate_ntf *ntf = (void *) skb->data;
- nfc_dbg("entry, type 0x%x", type);
+ nfc_dbg("entry, type 0x%x, reason 0x%x", ntf->type, ntf->reason);
clear_bit(NCI_POLL_ACTIVE, &ndev->flags);
ndev->target_active_prot = 0;
@@ -214,6 +243,9 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
ndev->rx_data_reassembly = 0;
}
+ /* set the available credits to initial value */
+ atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
+
/* complete the data exchange transaction, if exists */
if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
nci_data_exchange_complete(ndev, NULL, -EIO);
@@ -237,12 +269,8 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
nci_core_conn_credits_ntf_packet(ndev, skb);
break;
- case NCI_OP_RF_FIELD_INFO_NTF:
- nci_rf_field_info_ntf_packet(ndev, skb);
- break;
-
- case NCI_OP_RF_ACTIVATE_NTF:
- nci_rf_activate_ntf_packet(ndev, skb);
+ case NCI_OP_RF_INTF_ACTIVATED_NTF:
+ nci_rf_intf_activated_ntf_packet(ndev, skb);
break;
case NCI_OP_RF_DEACTIVATE_NTF:
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index 0403d4cd091..0591f5aff89 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -42,10 +42,11 @@ static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
nfc_dbg("entry, status 0x%x", rsp->status);
- if (rsp->status == NCI_STATUS_OK)
+ if (rsp->status == NCI_STATUS_OK) {
ndev->nci_ver = rsp->nci_ver;
-
- nfc_dbg("nci_ver 0x%x", ndev->nci_ver);
+ nfc_dbg("nci_ver 0x%x, config_status 0x%x",
+ rsp->nci_ver, rsp->config_status);
+ }
nci_req_complete(ndev, rsp->status);
}
@@ -58,13 +59,13 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
nfc_dbg("entry, status 0x%x", rsp_1->status);
if (rsp_1->status != NCI_STATUS_OK)
- return;
+ goto exit;
ndev->nfcc_features = __le32_to_cpu(rsp_1->nfcc_features);
ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces;
if (ndev->num_supported_rf_interfaces >
- NCI_MAX_SUPPORTED_RF_INTERFACES) {
+ NCI_MAX_SUPPORTED_RF_INTERFACES) {
ndev->num_supported_rf_interfaces =
NCI_MAX_SUPPORTED_RF_INTERFACES;
}
@@ -73,20 +74,26 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
rsp_1->supported_rf_interfaces,
ndev->num_supported_rf_interfaces);
- rsp_2 = (void *) (skb->data + 6 + ndev->num_supported_rf_interfaces);
+ rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces);
ndev->max_logical_connections =
rsp_2->max_logical_connections;
ndev->max_routing_table_size =
__le16_to_cpu(rsp_2->max_routing_table_size);
- ndev->max_control_packet_payload_length =
- rsp_2->max_control_packet_payload_length;
- ndev->rf_sending_buffer_size =
- __le16_to_cpu(rsp_2->rf_sending_buffer_size);
- ndev->rf_receiving_buffer_size =
- __le16_to_cpu(rsp_2->rf_receiving_buffer_size);
- ndev->manufacturer_id =
- __le16_to_cpu(rsp_2->manufacturer_id);
+ ndev->max_ctrl_pkt_payload_len =
+ rsp_2->max_ctrl_pkt_payload_len;
+ ndev->max_size_for_large_params =
+ __le16_to_cpu(rsp_2->max_size_for_large_params);
+ ndev->max_data_pkt_payload_size =
+ rsp_2->max_data_pkt_payload_size;
+ ndev->initial_num_credits =
+ rsp_2->initial_num_credits;
+ ndev->manufact_id =
+ rsp_2->manufact_id;
+ ndev->manufact_specific_info =
+ __le32_to_cpu(rsp_2->manufact_specific_info);
+
+ atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
nfc_dbg("nfcc_features 0x%x",
ndev->nfcc_features);
@@ -104,39 +111,23 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
ndev->max_logical_connections);
nfc_dbg("max_routing_table_size %d",
ndev->max_routing_table_size);
- nfc_dbg("max_control_packet_payload_length %d",
- ndev->max_control_packet_payload_length);
- nfc_dbg("rf_sending_buffer_size %d",
- ndev->rf_sending_buffer_size);
- nfc_dbg("rf_receiving_buffer_size %d",
- ndev->rf_receiving_buffer_size);
- nfc_dbg("manufacturer_id 0x%x",
- ndev->manufacturer_id);
-
+ nfc_dbg("max_ctrl_pkt_payload_len %d",
+ ndev->max_ctrl_pkt_payload_len);
+ nfc_dbg("max_size_for_large_params %d",
+ ndev->max_size_for_large_params);
+ nfc_dbg("max_data_pkt_payload_size %d",
+ ndev->max_data_pkt_payload_size);
+ nfc_dbg("initial_num_credits %d",
+ ndev->initial_num_credits);
+ nfc_dbg("manufact_id 0x%x",
+ ndev->manufact_id);
+ nfc_dbg("manufact_specific_info 0x%x",
+ ndev->manufact_specific_info);
+
+exit:
nci_req_complete(ndev, rsp_1->status);
}
-static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
-{
- struct nci_core_conn_create_rsp *rsp = (void *) skb->data;
-
- nfc_dbg("entry, status 0x%x", rsp->status);
-
- if (rsp->status != NCI_STATUS_OK)
- return;
-
- ndev->max_pkt_payload_size = rsp->max_pkt_payload_size;
- ndev->initial_num_credits = rsp->initial_num_credits;
- ndev->conn_id = rsp->conn_id;
-
- atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
-
- nfc_dbg("max_pkt_payload_size %d", ndev->max_pkt_payload_size);
- nfc_dbg("initial_num_credits %d", ndev->initial_num_credits);
- nfc_dbg("conn_id %d", ndev->conn_id);
-}
-
static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
@@ -196,10 +187,6 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
nci_core_init_rsp_packet(ndev, skb);
break;
- case NCI_OP_CORE_CONN_CREATE_RSP:
- nci_core_conn_create_rsp_packet(ndev, skb);
- break;
-
case NCI_OP_RF_DISCOVER_MAP_RSP:
nci_rf_disc_map_rsp_packet(ndev, skb);
break;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 82a6f34d39d..0da505c9ac2 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1499,10 +1499,11 @@ retry:
if (!skb) {
size_t reserved = LL_RESERVED_SPACE(dev);
+ int tlen = dev->needed_tailroom;
unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
rcu_read_unlock();
- skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
+ skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
if (skb == NULL)
return -ENOBUFS;
/* FIXME: Save some space for broken drivers that write a hard
@@ -1944,7 +1945,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
void *frame, struct net_device *dev, int size_max,
- __be16 proto, unsigned char *addr)
+ __be16 proto, unsigned char *addr, int hlen)
{
union {
struct tpacket_hdr *h1;
@@ -1978,7 +1979,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
return -EMSGSIZE;
}
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
@@ -2053,6 +2054,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
unsigned char *addr;
int len_sum = 0;
int status = 0;
+ int hlen, tlen;
mutex_lock(&po->pg_vec_lock);
@@ -2101,16 +2103,17 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
}
status = TP_STATUS_SEND_REQUEST;
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
skb = sock_alloc_send_skb(&po->sk,
- LL_ALLOCATED_SPACE(dev)
- + sizeof(struct sockaddr_ll),
+ hlen + tlen + sizeof(struct sockaddr_ll),
0, &err);
if (unlikely(skb == NULL))
goto out_status;
tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
- addr);
+ addr, hlen);
if (unlikely(tp_len < 0)) {
if (po->tp_loss) {
@@ -2207,6 +2210,7 @@ static int packet_snd(struct socket *sock,
int vnet_hdr_len;
struct packet_sock *po = pkt_sk(sk);
unsigned short gso_type = 0;
+ int hlen, tlen;
/*
* Get and verify the address.
@@ -2291,8 +2295,9 @@ static int packet_snd(struct socket *sock,
goto out_unlock;
err = -ENOBUFS;
- skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
- LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
+ hlen = LL_RESERVED_SPACE(dev);
+ tlen = dev->needed_tailroom;
+ skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out_unlock;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 2ba6e9fb4cb..9f60008740e 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -534,6 +534,29 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
return pipe_handler_send_created_ind(sk);
}
+static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ struct pnpipehdr *hdr = pnp_hdr(skb);
+
+ if (hdr->error_code != PN_PIPE_NO_ERROR)
+ return -ECONNREFUSED;
+
+ return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */,
+ NULL, 0, GFP_ATOMIC);
+
+}
+
+static void pipe_start_flow_control(struct sock *sk)
+{
+ struct pep_sock *pn = pep_sk(sk);
+
+ if (!pn_flow_safe(pn->tx_fc)) {
+ atomic_set(&pn->tx_credits, 1);
+ sk->sk_write_space(sk);
+ }
+ pipe_grant_credits(sk, GFP_ATOMIC);
+}
+
/* Queue an skb to an actively connected sock.
* Socket lock must be held. */
static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
@@ -579,13 +602,25 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
sk->sk_state = TCP_CLOSE_WAIT;
break;
}
+ if (pn->init_enable == PN_PIPE_DISABLE)
+ sk->sk_state = TCP_SYN_RECV;
+ else {
+ sk->sk_state = TCP_ESTABLISHED;
+ pipe_start_flow_control(sk);
+ }
+ break;
- sk->sk_state = TCP_ESTABLISHED;
- if (!pn_flow_safe(pn->tx_fc)) {
- atomic_set(&pn->tx_credits, 1);
- sk->sk_write_space(sk);
+ case PNS_PEP_ENABLE_RESP:
+ if (sk->sk_state != TCP_SYN_SENT)
+ break;
+
+ if (pep_enableresp_rcv(sk, skb)) {
+ sk->sk_state = TCP_CLOSE_WAIT;
+ break;
}
- pipe_grant_credits(sk, GFP_ATOMIC);
+
+ sk->sk_state = TCP_ESTABLISHED;
+ pipe_start_flow_control(sk);
break;
case PNS_PEP_DISCONNECT_RESP:
@@ -864,14 +899,32 @@ static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
int err;
u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
- pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
+ if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE)
+ pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
+
err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
- PN_PIPE_ENABLE, data, 4);
+ pn->init_enable, data, 4);
if (err) {
pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
return err;
}
+
sk->sk_state = TCP_SYN_SENT;
+
+ return 0;
+}
+
+static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
+{
+ int err;
+
+ err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD,
+ NULL, 0);
+ if (err)
+ return err;
+
+ sk->sk_state = TCP_SYN_SENT;
+
return 0;
}
@@ -879,11 +932,14 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
struct pep_sock *pn = pep_sk(sk);
int answ;
+ int ret = -ENOIOCTLCMD;
switch (cmd) {
case SIOCINQ:
- if (sk->sk_state == TCP_LISTEN)
- return -EINVAL;
+ if (sk->sk_state == TCP_LISTEN) {
+ ret = -EINVAL;
+ break;
+ }
lock_sock(sk);
if (sock_flag(sk, SOCK_URGINLINE) &&
@@ -894,10 +950,22 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
else
answ = 0;
release_sock(sk);
- return put_user(answ, (int __user *)arg);
+ ret = put_user(answ, (int __user *)arg);
+ break;
+
+ case SIOCPNENABLEPIPE:
+ lock_sock(sk);
+ if (sk->sk_state == TCP_SYN_SENT)
+ ret = -EBUSY;
+ else if (sk->sk_state == TCP_ESTABLISHED)
+ ret = -EISCONN;
+ else
+ ret = pep_sock_enable(sk, NULL, 0);
+ release_sock(sk);
+ break;
}
- return -ENOIOCTLCMD;
+ return ret;
}
static int pep_init(struct sock *sk)
@@ -960,6 +1028,18 @@ static int pep_setsockopt(struct sock *sk, int level, int optname,
}
goto out_norel;
+ case PNPIPE_HANDLE:
+ if ((sk->sk_state == TCP_CLOSE) &&
+ (val >= 0) && (val < PN_PIPE_INVALID_HANDLE))
+ pn->pipe_handle = val;
+ else
+ err = -EINVAL;
+ break;
+
+ case PNPIPE_INITSTATE:
+ pn->init_enable = !!val;
+ break;
+
default:
err = -ENOPROTOOPT;
}
@@ -995,6 +1075,10 @@ static int pep_getsockopt(struct sock *sk, int level, int optname,
return -EINVAL;
break;
+ case PNPIPE_INITSTATE:
+ val = pn->init_enable;
+ break;
+
default:
return -ENOPROTOOPT;
}
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 5be19575c34..354760ebbbd 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -644,7 +644,7 @@ static ssize_t rfkill_soft_store(struct device *dev,
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 0, &state);
+ err = kstrtoul(buf, 0, &state);
if (err)
return err;
@@ -688,7 +688,7 @@ static ssize_t rfkill_state_store(struct device *dev,
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 0, &state);
+ err = kstrtoul(buf, 0, &state);
if (err)
return err;
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 43ea7de2fc8..4cba13e46ff 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -306,10 +306,9 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
td->data_len = len;
if (len > 0) {
- td->data = kmalloc(len, GFP_KERNEL);
+ td->data = kmemdup(xdr, len, GFP_KERNEL);
if (!td->data)
return -ENOMEM;
- memcpy(td->data, xdr, len);
len = (len + 3) & ~3;
toklen -= len;
xdr += len >> 2;
@@ -401,10 +400,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
_debug("ticket len %u", len);
if (len > 0) {
- *_ticket = kmalloc(len, GFP_KERNEL);
+ *_ticket = kmemdup(xdr, len, GFP_KERNEL);
if (!*_ticket)
return -ENOMEM;
- memcpy(*_ticket, xdr, len);
len = (len + 3) & ~3;
toklen -= len;
xdr += len >> 2;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 3422b25df9e..061bcb744bb 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -152,15 +152,14 @@ static bool choke_match_flow(struct sk_buff *skb1,
{
int off1, off2, poff;
const u32 *ports1, *ports2;
+ u32 _ports1, _ports2;
u8 ip_proto;
__u32 hash1;
if (skb1->protocol != skb2->protocol)
return false;
- /* Use hash value as quick check
- * Assumes that __skb_get_rxhash makes IP header and ports linear
- */
+ /* Use rxhash value as quick check */
hash1 = skb_get_rxhash(skb1);
if (!hash1 || hash1 != skb_get_rxhash(skb2))
return false;
@@ -172,10 +171,12 @@ static bool choke_match_flow(struct sk_buff *skb1,
switch (skb1->protocol) {
case __constant_htons(ETH_P_IP): {
const struct iphdr *ip1, *ip2;
+ struct iphdr _ip1, _ip2;
- ip1 = (const struct iphdr *) (skb1->data + off1);
- ip2 = (const struct iphdr *) (skb2->data + off2);
-
+ ip1 = skb_header_pointer(skb1, off1, sizeof(_ip1), &_ip1);
+ ip2 = skb_header_pointer(skb2, off2, sizeof(_ip2), &_ip2);
+ if (!ip1 || !ip2)
+ return false;
ip_proto = ip1->protocol;
if (ip_proto != ip2->protocol ||
ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
@@ -190,9 +191,12 @@ static bool choke_match_flow(struct sk_buff *skb1,
case __constant_htons(ETH_P_IPV6): {
const struct ipv6hdr *ip1, *ip2;
+ struct ipv6hdr _ip1, _ip2;
- ip1 = (const struct ipv6hdr *) (skb1->data + off1);
- ip2 = (const struct ipv6hdr *) (skb2->data + off2);
+ ip1 = skb_header_pointer(skb1, off1, sizeof(_ip1), &_ip1);
+ ip2 = skb_header_pointer(skb2, off2, sizeof(_ip2), &_ip2);
+ if (!ip1 || !ip2)
+ return false;
ip_proto = ip1->nexthdr;
if (ip_proto != ip2->nexthdr ||
@@ -214,8 +218,11 @@ static bool choke_match_flow(struct sk_buff *skb1,
off1 += poff;
off2 += poff;
- ports1 = (__force u32 *)(skb1->data + off1);
- ports2 = (__force u32 *)(skb2->data + off2);
+ ports1 = skb_header_pointer(skb1, off1, sizeof(_ports1), &_ports1);
+ ports2 = skb_header_pointer(skb2, off2, sizeof(_ports2), &_ports2);
+ if (!ports1 || !ports2)
+ return false;
+
return *ports1 == *ports2;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 69fca279880..79ac1458c2b 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -246,6 +246,7 @@ static void dev_watchdog(unsigned long arg)
time_after(jiffies, (trans_start +
dev->watchdog_timeo))) {
some_queue_timedout = 1;
+ txq->trans_timeout++;
break;
}
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 810427833bc..91f479121c5 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -107,7 +107,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
addr->a.v6.sin6_port = 0;
- ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifa->addr);
+ addr->a.v6.sin6_addr = ifa->addr;
addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
addr->valid = 1;
spin_lock_bh(&sctp_local_addr_lock);
@@ -219,8 +219,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
/* Fill in the dest address from the route entry passed with the skb
* and the source address from the transport.
*/
- ipv6_addr_copy(&fl6.daddr, &transport->ipaddr.v6.sin6_addr);
- ipv6_addr_copy(&fl6.saddr, &transport->saddr.v6.sin6_addr);
+ fl6.daddr = transport->ipaddr.v6.sin6_addr;
+ fl6.saddr = transport->saddr.v6.sin6_addr;
fl6.flowlabel = np->flow_label;
IP6_ECN_flow_xmit(sk, fl6.flowlabel);
@@ -231,7 +231,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
if (np->opt && np->opt->srcrt) {
struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
- ipv6_addr_copy(&fl6.daddr, rt0->addr);
+ fl6.daddr = *rt0->addr;
}
SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n",
@@ -265,7 +265,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
sctp_scope_t scope;
memset(fl6, 0, sizeof(struct flowi6));
- ipv6_addr_copy(&fl6->daddr, &daddr->v6.sin6_addr);
+ fl6->daddr = daddr->v6.sin6_addr;
fl6->fl6_dport = daddr->v6.sin6_port;
fl6->flowi6_proto = IPPROTO_SCTP;
if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -277,7 +277,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
fl6->fl6_sport = htons(asoc->base.bind_addr.port);
if (saddr) {
- ipv6_addr_copy(&fl6->saddr, &saddr->v6.sin6_addr);
+ fl6->saddr = saddr->v6.sin6_addr;
fl6->fl6_sport = saddr->v6.sin6_port;
SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr);
}
@@ -334,7 +334,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
}
rcu_read_unlock();
if (baddr) {
- ipv6_addr_copy(&fl6->saddr, &baddr->v6.sin6_addr);
+ fl6->saddr = baddr->v6.sin6_addr;
fl6->fl6_sport = baddr->v6.sin6_port;
dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
}
@@ -375,7 +375,7 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk,
if (t->dst) {
saddr->v6.sin6_family = AF_INET6;
- ipv6_addr_copy(&saddr->v6.sin6_addr, &fl6->saddr);
+ saddr->v6.sin6_addr = fl6->saddr;
}
}
@@ -400,7 +400,7 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
addr->a.v6.sin6_port = 0;
- ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifp->addr);
+ addr->a.v6.sin6_addr = ifp->addr;
addr->a.v6.sin6_scope_id = dev->ifindex;
addr->valid = 1;
INIT_LIST_HEAD(&addr->list);
@@ -416,7 +416,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb,
int is_saddr)
{
- void *from;
__be16 *port;
struct sctphdr *sh;
@@ -428,12 +427,11 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb,
sh = sctp_hdr(skb);
if (is_saddr) {
*port = sh->source;
- from = &ipv6_hdr(skb)->saddr;
+ addr->v6.sin6_addr = ipv6_hdr(skb)->saddr;
} else {
*port = sh->dest;
- from = &ipv6_hdr(skb)->daddr;
+ addr->v6.sin6_addr = ipv6_hdr(skb)->daddr;
}
- ipv6_addr_copy(&addr->v6.sin6_addr, from);
}
/* Initialize an sctp_addr from a socket. */
@@ -441,7 +439,7 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
{
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_port = 0;
- ipv6_addr_copy(&addr->v6.sin6_addr, &inet6_sk(sk)->rcv_saddr);
+ addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr;
}
/* Initialize sk->sk_rcv_saddr from sctp_addr. */
@@ -454,7 +452,7 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
inet6_sk(sk)->rcv_saddr.s6_addr32[3] =
addr->v4.sin_addr.s_addr;
} else {
- ipv6_addr_copy(&inet6_sk(sk)->rcv_saddr, &addr->v6.sin6_addr);
+ inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr;
}
}
@@ -467,7 +465,7 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff);
inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
} else {
- ipv6_addr_copy(&inet6_sk(sk)->daddr, &addr->v6.sin6_addr);
+ inet6_sk(sk)->daddr = addr->v6.sin6_addr;
}
}
@@ -479,7 +477,7 @@ static void sctp_v6_from_addr_param(union sctp_addr *addr,
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_port = port;
addr->v6.sin6_flowinfo = 0; /* BUG */
- ipv6_addr_copy(&addr->v6.sin6_addr, &param->v6.addr);
+ addr->v6.sin6_addr = param->v6.addr;
addr->v6.sin6_scope_id = iif;
}
@@ -493,7 +491,7 @@ static int sctp_v6_to_addr_param(const union sctp_addr *addr,
param->v6.param_hdr.type = SCTP_PARAM_IPV6_ADDRESS;
param->v6.param_hdr.length = htons(length);
- ipv6_addr_copy(&param->v6.addr, &addr->v6.sin6_addr);
+ param->v6.addr = addr->v6.sin6_addr;
return length;
}
@@ -504,7 +502,7 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
{
addr->sa.sa_family = AF_INET6;
addr->v6.sin6_port = port;
- ipv6_addr_copy(&addr->v6.sin6_addr, saddr);
+ addr->v6.sin6_addr = *saddr;
}
/* Compare addresses exactly.
@@ -759,7 +757,7 @@ static void sctp_inet6_event_msgname(struct sctp_ulpevent *event,
}
sin6from = &asoc->peer.primary_addr.v6;
- ipv6_addr_copy(&sin6->sin6_addr, &sin6from->sin6_addr);
+ sin6->sin6_addr = sin6from->sin6_addr;
if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin6->sin6_scope_id = sin6from->sin6_scope_id;
}
@@ -787,7 +785,7 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
}
/* Otherwise, just copy the v6 address. */
- ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr);
+ sin6->sin6_addr = ipv6_hdr(skb)->saddr;
if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) {
struct sctp_ulpevent *ev = sctp_skb2event(skb);
sin6->sin6_scope_id = ev->iif;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 0121e0ab035..a85eeeb55dd 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3400,8 +3400,10 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
asconf_len -= length;
}
- if (no_err && asoc->src_out_of_asoc_ok)
+ if (no_err && asoc->src_out_of_asoc_ok) {
asoc->src_out_of_asoc_ok = 0;
+ sctp_transport_immediate_rtx(asoc->peer.primary_path);
+ }
/* Free the cached last sent asconf chunk. */
list_del_init(&asconf->transmitted_list);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 76388b083f2..1ff51c9d18d 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -666,6 +666,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
struct sctp_chunk *chunk)
{
sctp_sender_hb_info_t *hbinfo;
+ int was_unconfirmed = 0;
/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
* HEARTBEAT should clear the error counter of the destination
@@ -692,9 +693,11 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
/* Mark the destination transport address as active if it is not so
* marked.
*/
- if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED))
+ if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
+ was_unconfirmed = 1;
sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
SCTP_HEARTBEAT_SUCCESS);
+ }
/* The receiver of the HEARTBEAT ACK should also perform an
* RTT measurement for that destination transport address
@@ -712,6 +715,9 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
/* Update the heartbeat timer. */
if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
sctp_transport_hold(t);
+
+ if (was_unconfirmed && asoc->peer.transport_count == 1)
+ sctp_transport_immediate_rtx(t);
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 13bf5fcdbff..d56c07a3d43 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -804,7 +804,7 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
struct sockaddr_in6 *sin6;
sin6 = (struct sockaddr_in6 *)addrs;
- ipv6_addr_copy(&asoc->asconf_addr_del_pending->v6.sin6_addr, &sin6->sin6_addr);
+ asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
}
SCTP_DEBUG_PRINTK_IPADDR("send_asconf_del_ip: keep the last address asoc: %p ",
" at %p\n", asoc, asoc->asconf_addr_del_pending,
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 394c57ca2f5..3889330b7b0 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -641,3 +641,19 @@ void sctp_transport_reset(struct sctp_transport *t)
t->cacc.next_tsn_at_change = 0;
t->cacc.cacc_saw_newack = 0;
}
+
+/* Schedule retransmission on the given transport */
+void sctp_transport_immediate_rtx(struct sctp_transport *t)
+{
+ /* Stop pending T3_rtx_timer */
+ if (timer_pending(&t->T3_rtx_timer)) {
+ (void)del_timer(&t->T3_rtx_timer);
+ sctp_transport_put(t);
+ }
+ sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
+ if (!timer_pending(&t->T3_rtx_timer)) {
+ if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
+ sctp_transport_hold(t);
+ }
+ return;
+}
diff --git a/net/socket.c b/net/socket.c
index 2877647f347..e62b4f05507 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -538,6 +538,8 @@ int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags)
*tx_flags |= SKBTX_HW_TSTAMP;
if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
*tx_flags |= SKBTX_SW_TSTAMP;
+ if (sock_flag(sk, SOCK_WIFI_STATUS))
+ *tx_flags |= SKBTX_WIFI_STATUS;
return 0;
}
EXPORT_SYMBOL(sock_tx_timestamp);
@@ -549,6 +551,8 @@ static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
sock_update_classid(sock->sk);
+ sock_update_netprioidx(sock->sk);
+
si->sock = sock;
si->scm = NULL;
si->msg = msg;
@@ -674,6 +678,22 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
}
EXPORT_SYMBOL_GPL(__sock_recv_timestamp);
+void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int ack;
+
+ if (!sock_flag(sk, SOCK_WIFI_STATUS))
+ return;
+ if (!skb->wifi_acked_valid)
+ return;
+
+ ack = skb->wifi_acked;
+
+ put_cmsg(msg, SOL_SOCKET, SCM_WIFI_STATUS, sizeof(ack), &ack);
+}
+EXPORT_SYMBOL_GPL(__sock_recv_wifi_status);
+
static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index ce136323da8..fe258fc37f5 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -134,7 +134,7 @@ static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
struct ip_map *item = container_of(citem, struct ip_map, h);
strcpy(new->m_class, item->m_class);
- ipv6_addr_copy(&new->m_addr, &item->m_addr);
+ new->m_addr = item->m_addr;
}
static void update(struct cache_head *cnew, struct cache_head *citem)
{
@@ -274,7 +274,7 @@ static int ip_map_show(struct seq_file *m,
}
im = container_of(h, struct ip_map, h);
/* class addr domain */
- ipv6_addr_copy(&addr, &im->m_addr);
+ addr = im->m_addr;
if (test_bit(CACHE_VALID, &h->flags) &&
!test_bit(CACHE_NEGATIVE, &h->flags))
@@ -297,7 +297,7 @@ static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
struct cache_head *ch;
strcpy(ip.m_class, class);
- ipv6_addr_copy(&ip.m_addr, addr);
+ ip.m_addr = *addr;
ch = sunrpc_cache_lookup(cd, &ip.h,
hash_str(class, IP_HASHBITS) ^
hash_ip6(*addr));
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 71bed1c1c77..4653286fcc9 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -157,7 +157,7 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
cmh->cmsg_level = SOL_IPV6;
cmh->cmsg_type = IPV6_PKTINFO;
pki->ipi6_ifindex = daddr->sin6_scope_id;
- ipv6_addr_copy(&pki->ipi6_addr, &daddr->sin6_addr);
+ pki->ipi6_addr = daddr->sin6_addr;
cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
}
break;
@@ -523,7 +523,7 @@ static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
return 0;
daddr->sin6_family = AF_INET6;
- ipv6_addr_copy(&daddr->sin6_addr, &pki->ipi6_addr);
+ daddr->sin6_addr = pki->ipi6_addr;
daddr->sin6_scope_id = pki->ipi6_ifindex;
return 1;
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 220f3bd176f..ccdfed89765 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -492,6 +492,10 @@ int wiphy_register(struct wiphy *wiphy)
!(wiphy->wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY)))
return -EINVAL;
+ if (WARN_ON(wiphy->ap_sme_capa &&
+ !(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME)))
+ return -EINVAL;
+
if (WARN_ON(wiphy->addresses && !wiphy->n_addresses))
return -EINVAL;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index b9ec3061ed7..1c7d4df5418 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -54,6 +54,8 @@ struct cfg80211_registered_device {
int opencount; /* also protected by devlist_mtx */
wait_queue_head_t dev_wait;
+ u32 ap_beacons_nlpid;
+
/* BSSes/scanning */
spinlock_t bss_lock;
struct list_head bss_list;
@@ -376,7 +378,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, bool no_cck,
- u64 *cookie);
+ bool dont_wait_for_ack, u64 *cookie);
/* SME */
int __cfg80211_connect(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 21fc9702f81..6c1bafd508c 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -879,6 +879,9 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
}
spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+ if (nlpid == wdev->ap_unexpected_nlpid)
+ wdev->ap_unexpected_nlpid = 0;
}
void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
@@ -901,7 +904,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, bool no_cck,
- u64 *cookie)
+ bool dont_wait_for_ack, u64 *cookie)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
const struct ieee80211_mgmt *mgmt;
@@ -992,7 +995,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
/* Transmit the Action frame as requested by user space */
return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan,
channel_type, channel_type_valid,
- wait, buf, len, no_cck, cookie);
+ wait, buf, len, no_cck, dont_wait_for_ack,
+ cookie);
}
bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
@@ -1107,3 +1111,30 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
}
EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
+
+bool cfg80211_rx_spurious_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
+ wdev->iftype != NL80211_IFTYPE_P2P_GO))
+ return false;
+
+ return nl80211_unexpected_frame(dev, addr, gfp);
+}
+EXPORT_SYMBOL(cfg80211_rx_spurious_frame);
+
+bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
+ wdev->iftype != NL80211_IFTYPE_P2P_GO &&
+ wdev->iftype != NL80211_IFTYPE_AP_VLAN))
+ return false;
+
+ return nl80211_unexpected_4addr_frame(dev, addr, gfp);
+}
+EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index b3a476fe827..6bc7c4b32fa 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -98,7 +98,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 },
[NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 },
[NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG },
- [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
+ [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
[NL80211_ATTR_KEY_TYPE] = { .type = NLA_U32 },
[NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
@@ -196,6 +196,9 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_TDLS_OPERATION] = { .type = NLA_U8 },
[NL80211_ATTR_TDLS_SUPPORT] = { .type = NLA_FLAG },
[NL80211_ATTR_TDLS_EXTERNAL_SETUP] = { .type = NLA_FLAG },
+ [NL80211_ATTR_DONT_WAIT_FOR_ACK] = { .type = NLA_FLAG },
+ [NL80211_ATTR_PROBE_RESP] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_DATA_LEN },
};
/* policy for the key attributes */
@@ -203,7 +206,7 @@ static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = {
[NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN },
[NL80211_KEY_IDX] = { .type = NLA_U8 },
[NL80211_KEY_CIPHER] = { .type = NLA_U32 },
- [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
+ [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
[NL80211_KEY_DEFAULT] = { .type = NLA_FLAG },
[NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG },
[NL80211_KEY_TYPE] = { .type = NLA_U32 },
@@ -758,6 +761,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
dev->wiphy.available_antennas_rx);
+ if (dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD)
+ NLA_PUT_U32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
+ dev->wiphy.probe_resp_offload);
+
if ((dev->wiphy.available_antennas_tx ||
dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) {
u32 tx_ant = 0, rx_ant = 0;
@@ -890,6 +897,11 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
}
if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
CMD(sched_scan_start, START_SCHED_SCAN);
+ CMD(probe_client, PROBE_CLIENT);
+ if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
+ i++;
+ NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS);
+ }
#undef CMD
@@ -1007,6 +1019,12 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
if (nl80211_put_iface_combinations(&dev->wiphy, msg))
goto nla_put_failure;
+ if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME)
+ NLA_PUT_U32(msg, NL80211_ATTR_DEVICE_AP_SME,
+ dev->wiphy.ap_sme_capa);
+
+ NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features);
+
return genlmsg_end(msg, hdr);
nla_put_failure:
@@ -2155,6 +2173,13 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
}
+ if (info->attrs[NL80211_ATTR_PROBE_RESP]) {
+ params.probe_resp =
+ nla_data(info->attrs[NL80211_ATTR_PROBE_RESP]);
+ params.probe_resp_len =
+ nla_len(info->attrs[NL80211_ATTR_PROBE_RESP]);
+ }
+
err = call(&rdev->wiphy, dev, &params);
if (!err && params.interval)
wdev->beacon_interval = params.interval;
@@ -5271,12 +5296,13 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
bool channel_type_valid = false;
u32 freq;
int err;
- void *hdr;
+ void *hdr = NULL;
u64 cookie;
- struct sk_buff *msg;
+ struct sk_buff *msg = NULL;
unsigned int wait = 0;
- bool offchan;
- bool no_cck;
+ bool offchan, no_cck, dont_wait_for_ack;
+
+ dont_wait_for_ack = info->attrs[NL80211_ATTR_DONT_WAIT_FOR_ACK];
if (!info->attrs[NL80211_ATTR_FRAME] ||
!info->attrs[NL80211_ATTR_WIPHY_FREQ])
@@ -5320,29 +5346,36 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
if (chan == NULL)
return -EINVAL;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
+ if (!dont_wait_for_ack) {
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
- hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
- NL80211_CMD_FRAME);
+ hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+ NL80211_CMD_FRAME);
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
- goto free_msg;
+ if (IS_ERR(hdr)) {
+ err = PTR_ERR(hdr);
+ goto free_msg;
+ }
}
+
err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, offchan, channel_type,
channel_type_valid, wait,
nla_data(info->attrs[NL80211_ATTR_FRAME]),
nla_len(info->attrs[NL80211_ATTR_FRAME]),
- no_cck, &cookie);
+ no_cck, dont_wait_for_ack, &cookie);
if (err)
goto free_msg;
- NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+ if (msg) {
+ NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
- genlmsg_end(msg, hdr);
- return genlmsg_reply(msg, info);
+ genlmsg_end(msg, hdr);
+ return genlmsg_reply(msg, info);
+ }
+
+ return 0;
nla_put_failure:
err = -ENOBUFS;
@@ -5832,6 +5865,91 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
return err;
}
+static int nl80211_register_unexpected_frame(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ if (wdev->iftype != NL80211_IFTYPE_AP &&
+ wdev->iftype != NL80211_IFTYPE_P2P_GO)
+ return -EINVAL;
+
+ if (wdev->ap_unexpected_nlpid)
+ return -EBUSY;
+
+ wdev->ap_unexpected_nlpid = info->snd_pid;
+ return 0;
+}
+
+static int nl80211_probe_client(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct sk_buff *msg;
+ void *hdr;
+ const u8 *addr;
+ u64 cookie;
+ int err;
+
+ if (wdev->iftype != NL80211_IFTYPE_AP &&
+ wdev->iftype != NL80211_IFTYPE_P2P_GO)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL80211_ATTR_MAC])
+ return -EINVAL;
+
+ if (!rdev->ops->probe_client)
+ return -EOPNOTSUPP;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+ NL80211_CMD_PROBE_CLIENT);
+
+ if (IS_ERR(hdr)) {
+ err = PTR_ERR(hdr);
+ goto free_msg;
+ }
+
+ addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ err = rdev->ops->probe_client(&rdev->wiphy, dev, addr, &cookie);
+ if (err)
+ goto free_msg;
+
+ NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_reply(msg, info);
+
+ nla_put_failure:
+ err = -ENOBUFS;
+ free_msg:
+ nlmsg_free(msg);
+ return err;
+}
+
+static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS))
+ return -EOPNOTSUPP;
+
+ if (rdev->ap_beacons_nlpid)
+ return -EBUSY;
+
+ rdev->ap_beacons_nlpid = info->snd_pid;
+
+ return 0;
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -6387,6 +6505,30 @@ static struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_UNEXPECTED_FRAME,
+ .doit = nl80211_register_unexpected_frame,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_PROBE_CLIENT,
+ .doit = nl80211_probe_client,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_REGISTER_BEACONS,
+ .doit = nl80211_register_beacons,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WIPHY |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -6639,10 +6781,7 @@ void nl80211_send_reg_change_event(struct regulatory_request *request)
if (wiphy_idx_valid(request->wiphy_idx))
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
rcu_read_lock();
genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id,
@@ -6678,10 +6817,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -6762,10 +6898,7 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT);
NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -6821,10 +6954,7 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
if (resp_ie)
NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -6862,10 +6992,7 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
if (resp_ie)
NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -6903,10 +7030,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
if (ie)
NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, GFP_KERNEL);
@@ -6939,10 +7063,7 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -6977,10 +7098,7 @@ void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
if (ie_len && ie)
NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -7019,10 +7137,7 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
if (tsc)
NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -7073,10 +7188,7 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
goto nla_put_failure;
nla_nest_end(msg, nl_freq);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
rcu_read_lock();
genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id,
@@ -7119,10 +7231,7 @@ static void nl80211_send_remain_on_chan_event(
if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL)
NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -7193,10 +7302,7 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -7207,13 +7313,68 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
nlmsg_free(msg);
}
+static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
+ const u8 *addr, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+ int err;
+ u32 nlpid = ACCESS_ONCE(wdev->ap_unexpected_nlpid);
+
+ if (!nlpid)
+ return false;
+
+ msg = nlmsg_new(100, gfp);
+ if (!msg)
+ return true;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return true;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
+ NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+
+ err = genlmsg_end(msg, hdr);
+ if (err < 0) {
+ nlmsg_free(msg);
+ return true;
+ }
+
+ genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
+ return true;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+ return true;
+}
+
+bool nl80211_unexpected_frame(struct net_device *dev, const u8 *addr, gfp_t gfp)
+{
+ return __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_FRAME,
+ addr, gfp);
+}
+
+bool nl80211_unexpected_4addr_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp)
+{
+ return __nl80211_unexpected_frame(dev,
+ NL80211_CMD_UNEXPECTED_4ADDR_FRAME,
+ addr, gfp);
+}
+
int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
struct net_device *netdev, u32 nlpid,
int freq, const u8 *buf, size_t len, gfp_t gfp)
{
struct sk_buff *msg;
void *hdr;
- int err;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
@@ -7230,16 +7391,9 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
- err = genlmsg_end(msg, hdr);
- if (err < 0) {
- nlmsg_free(msg);
- return err;
- }
+ genlmsg_end(msg, hdr);
- err = genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
- if (err < 0)
- return err;
- return 0;
+ return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
nla_put_failure:
genlmsg_cancel(msg, hdr);
@@ -7272,10 +7426,7 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
if (ack)
NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
return;
@@ -7317,10 +7468,7 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
nla_nest_end(msg, pinfoattr);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -7362,10 +7510,7 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
nla_nest_end(msg, rekey_attr);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -7408,10 +7553,7 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
nla_nest_end(msg, attr);
- if (genlmsg_end(msg, hdr) < 0) {
- nlmsg_free(msg);
- return;
- }
+ genlmsg_end(msg, hdr);
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, gfp);
@@ -7453,7 +7595,45 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
nla_nest_end(msg, pinfoattr);
- if (genlmsg_end(msg, hdr) < 0) {
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+
+void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
+ u64 cookie, bool acked, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+ int err;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PROBE_CLIENT);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
+ NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+ NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+ if (acked)
+ NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
+
+ err = genlmsg_end(msg, hdr);
+ if (err < 0) {
nlmsg_free(msg);
return;
}
@@ -7466,6 +7646,45 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
genlmsg_cancel(msg, hdr);
nlmsg_free(msg);
}
+EXPORT_SYMBOL(cfg80211_probe_status);
+
+void cfg80211_report_obss_beacon(struct wiphy *wiphy,
+ const u8 *frame, size_t len,
+ int freq, gfp_t gfp)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+ u32 nlpid = ACCESS_ONCE(rdev->ap_beacons_nlpid);
+
+ if (!nlpid)
+ return;
+
+ msg = nlmsg_new(len + 100, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ if (freq)
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
+ NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame);
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_report_obss_beacon);
static int nl80211_netlink_notify(struct notifier_block * nb,
unsigned long state,
@@ -7480,9 +7699,12 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
rcu_read_lock();
- list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list)
+ list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
list_for_each_entry_rcu(wdev, &rdev->netdev_list, list)
cfg80211_mlme_unregister_socket(wdev, notify->pid);
+ if (rdev->ap_beacons_nlpid == notify->pid)
+ rdev->ap_beacons_nlpid = 0;
+ }
rcu_read_unlock();
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index f24a1fbeaf1..12bf4d185ab 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -117,4 +117,9 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev, int index,
const u8 *bssid, bool preauth, gfp_t gfp);
+bool nl80211_unexpected_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp);
+bool nl80211_unexpected_4addr_frame(struct net_device *dev,
+ const u8 *addr, gfp_t gfp);
+
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index dc23b31594e..31119e32e09 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -355,8 +355,8 @@ static bool is_mesh(struct cfg80211_bss *a,
sizeof(struct ieee80211_meshconf_ie) - 2) == 0;
}
-static int cmp_bss(struct cfg80211_bss *a,
- struct cfg80211_bss *b)
+static int cmp_bss_core(struct cfg80211_bss *a,
+ struct cfg80211_bss *b)
{
int r;
@@ -378,7 +378,15 @@ static int cmp_bss(struct cfg80211_bss *a,
b->len_information_elements);
}
- r = memcmp(a->bssid, b->bssid, ETH_ALEN);
+ return memcmp(a->bssid, b->bssid, ETH_ALEN);
+}
+
+static int cmp_bss(struct cfg80211_bss *a,
+ struct cfg80211_bss *b)
+{
+ int r;
+
+ r = cmp_bss_core(a, b);
if (r)
return r;
@@ -389,6 +397,52 @@ static int cmp_bss(struct cfg80211_bss *a,
b->len_information_elements);
}
+static int cmp_hidden_bss(struct cfg80211_bss *a,
+ struct cfg80211_bss *b)
+{
+ const u8 *ie1;
+ const u8 *ie2;
+ int i;
+ int r;
+
+ r = cmp_bss_core(a, b);
+ if (r)
+ return r;
+
+ ie1 = cfg80211_find_ie(WLAN_EID_SSID,
+ a->information_elements,
+ a->len_information_elements);
+ ie2 = cfg80211_find_ie(WLAN_EID_SSID,
+ b->information_elements,
+ b->len_information_elements);
+
+ /* Key comparator must use same algorithm in any rb-tree
+ * search function (order is important), otherwise ordering
+ * of items in the tree is broken and search gives incorrect
+ * results. This code uses same order as cmp_ies() does. */
+
+ /* sort missing IE before (left of) present IE */
+ if (!ie1)
+ return -1;
+ if (!ie2)
+ return 1;
+
+ /* zero-size SSID is used as an indication of the hidden bss */
+ if (!ie2[1])
+ return 0;
+
+ /* sort by length first, then by contents */
+ if (ie1[1] != ie2[1])
+ return ie2[1] - ie1[1];
+
+ /* zeroed SSID ie is another indication of a hidden bss */
+ for (i = 0; i < ie2[1]; i++)
+ if (ie2[i + 2])
+ return -1;
+
+ return 0;
+}
+
struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *bssid,
@@ -505,6 +559,48 @@ rb_find_bss(struct cfg80211_registered_device *dev,
}
static struct cfg80211_internal_bss *
+rb_find_hidden_bss(struct cfg80211_registered_device *dev,
+ struct cfg80211_internal_bss *res)
+{
+ struct rb_node *n = dev->bss_tree.rb_node;
+ struct cfg80211_internal_bss *bss;
+ int r;
+
+ while (n) {
+ bss = rb_entry(n, struct cfg80211_internal_bss, rbn);
+ r = cmp_hidden_bss(&res->pub, &bss->pub);
+
+ if (r == 0)
+ return bss;
+ else if (r < 0)
+ n = n->rb_left;
+ else
+ n = n->rb_right;
+ }
+
+ return NULL;
+}
+
+static void
+copy_hidden_ies(struct cfg80211_internal_bss *res,
+ struct cfg80211_internal_bss *hidden)
+{
+ if (unlikely(res->pub.beacon_ies))
+ return;
+ if (WARN_ON(!hidden->pub.beacon_ies))
+ return;
+
+ res->pub.beacon_ies = kmalloc(hidden->pub.len_beacon_ies, GFP_ATOMIC);
+ if (unlikely(!res->pub.beacon_ies))
+ return;
+
+ res->beacon_ies_allocated = true;
+ res->pub.len_beacon_ies = hidden->pub.len_beacon_ies;
+ memcpy(res->pub.beacon_ies, hidden->pub.beacon_ies,
+ res->pub.len_beacon_ies);
+}
+
+static struct cfg80211_internal_bss *
cfg80211_bss_update(struct cfg80211_registered_device *dev,
struct cfg80211_internal_bss *res)
{
@@ -607,6 +703,21 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
kref_put(&res->ref, bss_release);
} else {
+ struct cfg80211_internal_bss *hidden;
+
+ /* First check if the beacon is a probe response from
+ * a hidden bss. If so, copy beacon ies (with nullified
+ * ssid) into the probe response bss entry (with real ssid).
+ * It is required basically for PSM implementation
+ * (probe responses do not contain tim ie) */
+
+ /* TODO: The code is not trying to update existing probe
+ * response bss entries when beacon ies are
+ * getting changed. */
+ hidden = rb_find_hidden_bss(dev, res);
+ if (hidden)
+ copy_hidden_ies(res, hidden);
+
/* this "consumes" the reference */
list_add_tail(&res->list, &dev->bss_list);
rb_insert_bss(dev, res);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 6897436b1d3..3c24eb97e9d 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -819,12 +819,24 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
struct iw_freq *freq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct ieee80211_channel *chan;
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
return cfg80211_mgd_wext_giwfreq(dev, info, freq, extra);
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra);
+ case NL80211_IFTYPE_MONITOR:
+ if (!rdev->ops->get_channel)
+ return -EINVAL;
+
+ chan = rdev->ops->get_channel(wdev->wiphy);
+ if (!chan)
+ return -EINVAL;
+ freq->m = chan->center_freq;
+ freq->e = 6;
+ return 0;
default:
if (!wdev->channel)
return -EINVAL;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 2118d644663..4fce1cec193 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -61,8 +61,8 @@ __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
const struct flowi4 *fl4 = &fl->u.ip4;
- return addr_match(&fl4->daddr, &sel->daddr, sel->prefixlen_d) &&
- addr_match(&fl4->saddr, &sel->saddr, sel->prefixlen_s) &&
+ return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
+ addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
(fl4->flowi4_proto == sel->proto || !sel->proto) &&
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 9414b9c5b1e..5b228f97d4b 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1035,16 +1035,12 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
break;
case AF_INET6:
- ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
- (const struct in6_addr *)daddr);
- ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
- (const struct in6_addr *)saddr);
+ *(struct in6_addr *)x->sel.daddr.a6 = *(struct in6_addr *)daddr;
+ *(struct in6_addr *)x->sel.saddr.a6 = *(struct in6_addr *)saddr;
x->sel.prefixlen_d = 128;
x->sel.prefixlen_s = 128;
- ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
- (const struct in6_addr *)saddr);
- ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
- (const struct in6_addr *)daddr);
+ *(struct in6_addr *)x->props.saddr.a6 = *(struct in6_addr *)saddr;
+ *(struct in6_addr *)x->id.daddr.a6 = *(struct in6_addr *)daddr;
break;
}