summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/af_bluetooth.c8
-rw-r--r--net/bluetooth/bnep/core.c2
-rw-r--r--net/bluetooth/hci_conn.c56
-rw-r--r--net/bluetooth/hci_core.c267
-rw-r--r--net/bluetooth/hci_event.c75
-rw-r--r--net/bluetooth/hci_sysfs.c5
-rw-r--r--net/bluetooth/l2cap_core.c762
-rw-r--r--net/bluetooth/l2cap_sock.c76
-rw-r--r--net/bluetooth/mgmt.c286
-rw-r--r--net/bluetooth/rfcomm/sock.c14
-rw-r--r--net/bluetooth/sco.c75
-rw-r--r--net/bluetooth/smp.c2
-rw-r--r--net/ipv4/fib_semantics.c12
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/udp.c30
-rw-r--r--net/mac80211/agg-tx.c10
-rw-r--r--net/mac80211/debugfs_netdev.c2
-rw-r--r--net/mac80211/ibss.c5
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/main.c3
-rw-r--r--net/mac80211/mesh.c6
-rw-r--r--net/mac80211/mesh_hwmp.c5
-rw-r--r--net/mac80211/mesh_plink.c65
-rw-r--r--net/mac80211/rx.c6
-rw-r--r--net/mac80211/wep.c15
-rw-r--r--net/mac80211/wpa.c10
-rw-r--r--net/nfc/core.c112
-rw-r--r--net/nfc/hci/Kconfig1
-rw-r--r--net/nfc/hci/core.c78
-rw-r--r--net/nfc/hci/shdlc.c12
-rw-r--r--net/nfc/llcp/commands.c4
-rw-r--r--net/nfc/llcp/llcp.c7
-rw-r--r--net/nfc/llcp/sock.c57
-rw-r--r--net/nfc/nci/core.c27
-rw-r--r--net/nfc/nci/data.c8
-rw-r--r--net/nfc/nci/lib.c1
-rw-r--r--net/nfc/nci/ntf.c2
-rw-r--r--net/nfc/netlink.c6
-rw-r--r--net/nfc/nfc.h2
-rw-r--r--net/wireless/chan.c2
-rw-r--r--net/wireless/core.c4
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c69
-rw-r--r--net/wireless/util.c2
46 files changed, 1350 insertions, 855 deletions
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 6fb68a9743a..46e7f86acfc 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
}
if (sk->sk_state == BT_CONNECTED || !newsock ||
- bt_sk(parent)->defer_setup) {
+ test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) {
bt_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
@@ -410,8 +410,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
if (sk->sk_state == BT_CONNECTED ||
- (bt_sk(parent)->defer_setup &&
- sk->sk_state == BT_CONNECT2))
+ (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
+ sk->sk_state == BT_CONNECT2))
return POLLIN | POLLRDNORM;
}
@@ -450,7 +450,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wa
sk->sk_state == BT_CONFIG)
return mask;
- if (!bt_sk(sk)->suspended && sock_writeable(sk))
+ if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 88884d1d95f..031d7d65675 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -340,7 +340,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
}
/* Strip 802.1p header */
- if (ntohs(s->eh.h_proto) == 0x8100) {
+ if (ntohs(s->eh.h_proto) == ETH_P_8021Q) {
if (!skb_pull(skb, 4))
goto badframe;
s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 5238b6b3ea6..3f18a6ed973 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -223,36 +223,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
}
EXPORT_SYMBOL(hci_le_start_enc);
-void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
-{
- struct hci_dev *hdev = conn->hdev;
- struct hci_cp_le_ltk_reply cp;
-
- BT_DBG("%p", conn);
-
- memset(&cp, 0, sizeof(cp));
-
- cp.handle = cpu_to_le16(conn->handle);
- memcpy(cp.ltk, ltk, sizeof(ltk));
-
- hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
-}
-EXPORT_SYMBOL(hci_le_ltk_reply);
-
-void hci_le_ltk_neg_reply(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
- struct hci_cp_le_ltk_neg_reply cp;
-
- BT_DBG("%p", conn);
-
- memset(&cp, 0, sizeof(cp));
-
- cp.handle = cpu_to_le16(conn->handle);
-
- hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
-}
-
/* Device _must_ be locked */
void hci_sco_setup(struct hci_conn *conn, __u8 status)
{
@@ -513,7 +483,8 @@ EXPORT_SYMBOL(hci_get_route);
/* Create SCO, ACL or LE connection.
* Device _must_ be locked */
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ __u8 dst_type, __u8 sec_level, __u8 auth_type)
{
struct hci_conn *acl;
struct hci_conn *sco;
@@ -522,23 +493,18 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
BT_DBG("%s dst %s", hdev->name, batostr(dst));
if (type == LE_LINK) {
- struct adv_entry *entry;
-
le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
- if (le)
- return ERR_PTR(-EBUSY);
-
- entry = hci_find_adv_entry(hdev, dst);
- if (!entry)
- return ERR_PTR(-EHOSTUNREACH);
+ if (!le) {
+ le = hci_conn_add(hdev, LE_LINK, dst);
+ if (!le)
+ return ERR_PTR(-ENOMEM);
- le = hci_conn_add(hdev, LE_LINK, dst);
- if (!le)
- return ERR_PTR(-ENOMEM);
-
- le->dst_type = entry->bdaddr_type;
+ le->dst_type = bdaddr_to_le(dst_type);
+ hci_le_connect(le);
+ }
- hci_le_connect(le);
+ le->pending_sec_level = sec_level;
+ le->auth_type = auth_type;
hci_conn_hold(le);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index d6dc44cd15b..411ace8e647 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -83,6 +83,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
*/
if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
+ u16 opcode = __le16_to_cpu(sent->opcode);
struct sk_buff *skb;
/* Some CSR based controllers generate a spontaneous
@@ -92,7 +93,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
* command.
*/
- if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
+ if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
return;
skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
@@ -251,6 +252,9 @@ static void amp_init(struct hci_dev *hdev)
/* Read Local Version */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+
+ /* Read Local AMP Info */
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
}
static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -384,7 +388,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
case DISCOVERY_STOPPED:
if (hdev->discovery.state != DISCOVERY_STARTING)
mgmt_discovering(hdev, 0);
- hdev->discovery.type = 0;
break;
case DISCOVERY_STARTING:
break;
@@ -1089,32 +1092,6 @@ static const struct rfkill_ops hci_rfkill_ops = {
.set_block = hci_rfkill_set_block,
};
-/* Alloc HCI device */
-struct hci_dev *hci_alloc_dev(void)
-{
- struct hci_dev *hdev;
-
- hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
- if (!hdev)
- return NULL;
-
- hci_init_sysfs(hdev);
- skb_queue_head_init(&hdev->driver_init);
-
- return hdev;
-}
-EXPORT_SYMBOL(hci_alloc_dev);
-
-/* Free HCI device */
-void hci_free_dev(struct hci_dev *hdev)
-{
- skb_queue_purge(&hdev->driver_init);
-
- /* will free via device release */
- put_device(&hdev->dev);
-}
-EXPORT_SYMBOL(hci_free_dev);
-
static void hci_power_on(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
@@ -1336,7 +1313,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
}
int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
- int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
+ int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
ediv, u8 rand[8])
{
struct smp_ltk *key, *old_key;
@@ -1544,75 +1521,6 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
return mgmt_device_unblocked(hdev, bdaddr, type);
}
-static void hci_clear_adv_cache(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- adv_work.work);
-
- hci_dev_lock(hdev);
-
- hci_adv_entries_clear(hdev);
-
- hci_dev_unlock(hdev);
-}
-
-int hci_adv_entries_clear(struct hci_dev *hdev)
-{
- struct adv_entry *entry, *tmp;
-
- list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
- list_del(&entry->list);
- kfree(entry);
- }
-
- BT_DBG("%s adv cache cleared", hdev->name);
-
- return 0;
-}
-
-struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
-{
- struct adv_entry *entry;
-
- list_for_each_entry(entry, &hdev->adv_entries, list)
- if (bacmp(bdaddr, &entry->bdaddr) == 0)
- return entry;
-
- return NULL;
-}
-
-static inline int is_connectable_adv(u8 evt_type)
-{
- if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
- return 1;
-
- return 0;
-}
-
-int hci_add_adv_entry(struct hci_dev *hdev,
- struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
- return -EINVAL;
-
- /* Only new entries should be added to adv_entries. So, if
- * bdaddr was found, don't add it. */
- if (hci_find_adv_entry(hdev, &ev->bdaddr))
- return 0;
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- bacpy(&entry->bdaddr, &ev->bdaddr);
- entry->bdaddr_type = ev->bdaddr_type;
-
- list_add(&entry->list, &hdev->adv_entries);
-
- BT_DBG("%s adv entry added: address %s type %u", hdev->name,
- batostr(&entry->bdaddr), entry->bdaddr_type);
-
- return 0;
-}
-
static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
{
struct le_scan_params *param = (struct le_scan_params *) opt;
@@ -1670,6 +1578,24 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
return 0;
}
+int hci_cancel_le_scan(struct hci_dev *hdev)
+{
+ BT_DBG("%s", hdev->name);
+
+ if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ return -EALREADY;
+
+ if (cancel_delayed_work(&hdev->le_scan_disable)) {
+ struct hci_cp_le_set_scan_enable cp;
+
+ /* Send HCI command to disable LE Scan */
+ memset(&cp, 0, sizeof(cp));
+ hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+ }
+
+ return 0;
+}
+
static void le_scan_disable_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
@@ -1714,95 +1640,103 @@ int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
return 0;
}
-/* Register HCI device */
-int hci_register_dev(struct hci_dev *hdev)
+/* Alloc HCI device */
+struct hci_dev *hci_alloc_dev(void)
{
- struct list_head *head = &hci_dev_list, *p;
- int i, id, error;
-
- BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
-
- if (!hdev->open || !hdev->close)
- return -EINVAL;
-
- /* Do not allow HCI_AMP devices to register at index 0,
- * so the index can be used as the AMP controller ID.
- */
- id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
-
- write_lock(&hci_dev_list_lock);
-
- /* Find first available device id */
- list_for_each(p, &hci_dev_list) {
- if (list_entry(p, struct hci_dev, list)->id != id)
- break;
- head = p; id++;
- }
-
- sprintf(hdev->name, "hci%d", id);
- hdev->id = id;
- list_add_tail(&hdev->list, head);
+ struct hci_dev *hdev;
- mutex_init(&hdev->lock);
+ hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
+ if (!hdev)
+ return NULL;
- hdev->flags = 0;
- hdev->dev_flags = 0;
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
hdev->io_capability = 0x03; /* No Input No Output */
- hdev->idle_timeout = 0;
hdev->sniff_max_interval = 800;
hdev->sniff_min_interval = 80;
+ mutex_init(&hdev->lock);
+ mutex_init(&hdev->req_lock);
+
+ INIT_LIST_HEAD(&hdev->mgmt_pending);
+ INIT_LIST_HEAD(&hdev->blacklist);
+ INIT_LIST_HEAD(&hdev->uuids);
+ INIT_LIST_HEAD(&hdev->link_keys);
+ INIT_LIST_HEAD(&hdev->long_term_keys);
+ INIT_LIST_HEAD(&hdev->remote_oob_data);
+
INIT_WORK(&hdev->rx_work, hci_rx_work);
INIT_WORK(&hdev->cmd_work, hci_cmd_work);
INIT_WORK(&hdev->tx_work, hci_tx_work);
+ INIT_WORK(&hdev->power_on, hci_power_on);
+ INIT_WORK(&hdev->le_scan, le_scan_work);
+ INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+ INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
+ INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
+ skb_queue_head_init(&hdev->driver_init);
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
- setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
-
- for (i = 0; i < NUM_REASSEMBLY; i++)
- hdev->reassembly[i] = NULL;
-
init_waitqueue_head(&hdev->req_wait_q);
- mutex_init(&hdev->req_lock);
- discovery_init(hdev);
+ setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
+ hci_init_sysfs(hdev);
+ discovery_init(hdev);
hci_conn_hash_init(hdev);
- INIT_LIST_HEAD(&hdev->mgmt_pending);
-
- INIT_LIST_HEAD(&hdev->blacklist);
+ return hdev;
+}
+EXPORT_SYMBOL(hci_alloc_dev);
- INIT_LIST_HEAD(&hdev->uuids);
+/* Free HCI device */
+void hci_free_dev(struct hci_dev *hdev)
+{
+ skb_queue_purge(&hdev->driver_init);
- INIT_LIST_HEAD(&hdev->link_keys);
- INIT_LIST_HEAD(&hdev->long_term_keys);
+ /* will free via device release */
+ put_device(&hdev->dev);
+}
+EXPORT_SYMBOL(hci_free_dev);
- INIT_LIST_HEAD(&hdev->remote_oob_data);
+/* Register HCI device */
+int hci_register_dev(struct hci_dev *hdev)
+{
+ struct list_head *head, *p;
+ int id, error;
- INIT_LIST_HEAD(&hdev->adv_entries);
+ if (!hdev->open || !hdev->close)
+ return -EINVAL;
- INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
- INIT_WORK(&hdev->power_on, hci_power_on);
- INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+ write_lock(&hci_dev_list_lock);
- INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
+ /* Do not allow HCI_AMP devices to register at index 0,
+ * so the index can be used as the AMP controller ID.
+ */
+ id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
+ head = &hci_dev_list;
- memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
+ /* Find first available device id */
+ list_for_each(p, &hci_dev_list) {
+ int nid = list_entry(p, struct hci_dev, list)->id;
+ if (nid > id)
+ break;
+ if (nid == id)
+ id++;
+ head = p;
+ }
- atomic_set(&hdev->promisc, 0);
+ sprintf(hdev->name, "hci%d", id);
+ hdev->id = id;
- INIT_WORK(&hdev->le_scan, le_scan_work);
+ BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
+ list_add(&hdev->list, head);
write_unlock(&hci_dev_list_lock);
@@ -1884,8 +1818,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_del_sysfs(hdev);
- cancel_delayed_work_sync(&hdev->adv_work);
-
destroy_workqueue(hdev->workqueue);
hci_dev_lock(hdev);
@@ -1894,7 +1826,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_link_keys_clear(hdev);
hci_smp_ltks_clear(hdev);
hci_remote_oob_data_clear(hdev);
- hci_adv_entries_clear(hdev);
hci_dev_unlock(hdev);
hci_dev_put(hdev);
@@ -2231,6 +2162,12 @@ static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
+ skb->len = skb_headlen(skb);
+ skb->data_len = 0;
+
+ bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
+ hci_add_acl_hdr(skb, conn->handle, flags);
+
list = skb_shinfo(skb)->frag_list;
if (!list) {
/* Non fragmented */
@@ -2274,8 +2211,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
skb->dev = (void *) hdev;
- bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags);
hci_queue_acl(conn, &chan->data_q, skb, flags);
@@ -2313,7 +2248,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *conn = NULL, *c;
- int num = 0, min = ~0;
+ unsigned int num = 0, min = ~0;
/* We don't have to lock device here. Connections are always
* added and removed with TX task disabled. */
@@ -2394,7 +2329,7 @@ static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_chan *chan = NULL;
- int num = 0, min = ~0, cur_prio = 0;
+ unsigned int num = 0, min = ~0, cur_prio = 0;
struct hci_conn *conn;
int cnt, q, conn_num = 0;
@@ -2945,7 +2880,19 @@ int hci_cancel_inquiry(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_INQUIRY, &hdev->flags))
- return -EPERM;
+ return -EALREADY;
return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
}
+
+u8 bdaddr_to_le(u8 bdaddr_type)
+{
+ switch (bdaddr_type) {
+ case BDADDR_LE_PUBLIC:
+ return ADDR_LE_DEV_PUBLIC;
+
+ default:
+ /* Fallback to LE Random address type */
+ return ADDR_LE_DEV_RANDOM;
+ }
+}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 1266f78fa8e..4eefb7f65cf 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -69,6 +69,18 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_check_pending(hdev);
}
+static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ if (status)
+ return;
+
+ set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
+}
+
static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -78,6 +90,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
if (status)
return;
+ clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
+
hci_conn_check_pending(hdev);
}
@@ -192,7 +206,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
hci_req_complete(hdev, HCI_OP_RESET, status);
/* Reset all non-persistent flags */
- hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
+ hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
+ BIT(HCI_PERIODIC_INQ));
hdev->discovery.state = DISCOVERY_STOPPED;
}
@@ -505,7 +520,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
events[5] |= 0x10; /* Synchronous Connection Changed */
if (hdev->features[3] & LMP_RSSI_INQ)
- events[4] |= 0x04; /* Inquiry Result with RSSI */
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
if (hdev->features[5] & LMP_SNIFF_SUBR)
events[5] |= 0x20; /* Sniff Subrating */
@@ -615,6 +630,7 @@ done:
static void hci_setup_link_policy(struct hci_dev *hdev)
{
+ struct hci_cp_write_def_link_policy cp;
u16 link_policy = 0;
if (hdev->features[0] & LMP_RSWITCH)
@@ -626,9 +642,8 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
if (hdev->features[1] & LMP_PARK)
link_policy |= HCI_LP_PARK;
- link_policy = cpu_to_le16(link_policy);
- hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
- &link_policy);
+ cp.policy = cpu_to_le16(link_policy);
+ hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
}
static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -710,7 +725,7 @@ static void hci_set_le_support(struct hci_dev *hdev)
memset(&cp, 0, sizeof(cp));
- if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
cp.le = 1;
cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
}
@@ -887,11 +902,14 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (!rp->status)
+ hdev->inq_tx_power = rp->tx_power;
- hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
+ hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
}
static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1082,23 +1100,23 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
set_bit(HCI_LE_SCAN, &hdev->dev_flags);
- cancel_delayed_work_sync(&hdev->adv_work);
-
hci_dev_lock(hdev);
- hci_adv_entries_clear(hdev);
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
hci_dev_unlock(hdev);
break;
case LE_SCANNING_DISABLED:
- if (status)
+ if (status) {
+ hci_dev_lock(hdev);
+ mgmt_stop_discovery_failed(hdev, status);
+ hci_dev_unlock(hdev);
return;
+ }
clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
- schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
-
- if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
+ if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
+ hdev->discovery.state == DISCOVERY_FINDING) {
mgmt_interleaved_discovery(hdev);
} else {
hci_dev_lock(hdev);
@@ -1625,6 +1643,8 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
if (status) {
if (conn && conn->state == BT_CONNECT) {
conn->state = BT_CLOSED;
+ mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
+ conn->dst_type, status);
hci_proto_connect_cfm(conn, status);
hci_conn_del(conn);
}
@@ -1699,6 +1719,9 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
@@ -2040,7 +2063,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
if (ev->status && conn->state == BT_CONNECTED) {
- hci_acl_disconn(conn, 0x13);
+ hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
hci_conn_put(conn);
goto unlock;
}
@@ -2154,6 +2177,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_inquiry_cancel(hdev, skb);
break;
+ case HCI_OP_PERIODIC_INQ:
+ hci_cc_periodic_inq(hdev, skb);
+ break;
+
case HCI_OP_EXIT_PERIODIC_INQ:
hci_cc_exit_periodic_inq(hdev, skb);
break;
@@ -2806,6 +2833,9 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
@@ -2971,12 +3001,16 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
struct inquiry_data data;
struct extended_inquiry_info *info = (void *) (skb->data + 1);
int num_rsp = *((__u8 *) skb->data);
+ size_t eir_len;
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
@@ -3000,9 +3034,10 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
name_known = hci_inquiry_cache_update(hdev, &data, name_known,
&ssp);
+ eir_len = eir_get_length(info->data, sizeof(info->data));
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi, !name_known,
- ssp, info->data, sizeof(info->data));
+ ssp, info->data, eir_len);
}
hci_dev_unlock(hdev);
@@ -3322,8 +3357,6 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
while (num_reports--) {
struct hci_ev_le_advertising_info *ev = ptr;
- hci_add_adv_entry(hdev, ev);
-
rssi = ev->data[ev->length];
mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
NULL, rssi, 0, 1, ev->data, ev->length);
@@ -3343,7 +3376,7 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
struct hci_conn *conn;
struct smp_ltk *ltk;
- BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
+ BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle));
hci_dev_lock(hdev);
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index bc154298979..937f3187eaf 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -444,8 +444,8 @@ static const struct file_operations blacklist_fops = {
static void print_bt_uuid(struct seq_file *f, u8 *uuid)
{
- u32 data0, data4;
- u16 data1, data2, data3, data5;
+ __be32 data0, data4;
+ __be16 data1, data2, data3, data5;
memcpy(&data0, &uuid[0], 4);
memcpy(&data1, &uuid[4], 2);
@@ -533,7 +533,6 @@ int hci_add_sysfs(struct hci_dev *hdev)
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- dev->parent = hdev->parent;
dev_set_name(dev, "%s", hdev->name);
err = device_add(dev);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 6f9c25b633a..24f144b72a9 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -4,6 +4,7 @@
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Copyright (C) 2010 Google Inc.
Copyright (C) 2011 ProFUSION Embedded Systems
+ Copyright (c) 2012 Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -70,7 +71,7 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
void *data);
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
static void l2cap_send_disconn_req(struct l2cap_conn *conn,
- struct l2cap_chan *chan, int err);
+ struct l2cap_chan *chan, int err);
/* ---- L2CAP channels ---- */
@@ -97,13 +98,15 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16
}
/* Find channel with given SCID.
- * Returns locked socket */
+ * Returns locked channel. */
static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
{
struct l2cap_chan *c;
mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_scid(conn, cid);
+ if (c)
+ l2cap_chan_lock(c);
mutex_unlock(&conn->chan_lock);
return c;
@@ -120,17 +123,6 @@ static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8
return NULL;
}
-static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
-{
- struct l2cap_chan *c;
-
- mutex_lock(&conn->chan_lock);
- c = __l2cap_get_chan_by_ident(conn, ident);
- mutex_unlock(&conn->chan_lock);
-
- return c;
-}
-
static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
{
struct l2cap_chan *c;
@@ -232,6 +224,124 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
release_sock(sk);
}
+/* ---- L2CAP sequence number lists ---- */
+
+/* For ERTM, ordered lists of sequence numbers must be tracked for
+ * SREJ requests that are received and for frames that are to be
+ * retransmitted. These seq_list functions implement a singly-linked
+ * list in an array, where membership in the list can also be checked
+ * in constant time. Items can also be added to the tail of the list
+ * and removed from the head in constant time, without further memory
+ * allocs or frees.
+ */
+
+static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
+{
+ size_t alloc_size, i;
+
+ /* Allocated size is a power of 2 to map sequence numbers
+ * (which may be up to 14 bits) in to a smaller array that is
+ * sized for the negotiated ERTM transmit windows.
+ */
+ alloc_size = roundup_pow_of_two(size);
+
+ seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
+ if (!seq_list->list)
+ return -ENOMEM;
+
+ seq_list->mask = alloc_size - 1;
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ for (i = 0; i < alloc_size; i++)
+ seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
+
+ return 0;
+}
+
+static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
+{
+ kfree(seq_list->list);
+}
+
+static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
+ u16 seq)
+{
+ /* Constant-time check for list membership */
+ return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
+}
+
+static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
+{
+ u16 mask = seq_list->mask;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
+ /* In case someone tries to pop the head of an empty list */
+ return L2CAP_SEQ_LIST_CLEAR;
+ } else if (seq_list->head == seq) {
+ /* Head can be removed in constant time */
+ seq_list->head = seq_list->list[seq & mask];
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ }
+ } else {
+ /* Walk the list to find the sequence number */
+ u16 prev = seq_list->head;
+ while (seq_list->list[prev & mask] != seq) {
+ prev = seq_list->list[prev & mask];
+ if (prev == L2CAP_SEQ_LIST_TAIL)
+ return L2CAP_SEQ_LIST_CLEAR;
+ }
+
+ /* Unlink the number from the list and clear it */
+ seq_list->list[prev & mask] = seq_list->list[seq & mask];
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
+ if (seq_list->tail == seq)
+ seq_list->tail = prev;
+ }
+ return seq;
+}
+
+static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
+{
+ /* Remove the head in constant time */
+ return l2cap_seq_list_remove(seq_list, seq_list->head);
+}
+
+static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
+{
+ u16 i;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ for (i = 0; i <= seq_list->mask; i++)
+ seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
+
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+}
+
+static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
+{
+ u16 mask = seq_list->mask;
+
+ /* All appends happen in constant time */
+
+ if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
+ seq_list->head = seq;
+ else
+ seq_list->list[seq_list->tail & mask] = seq;
+
+ seq_list->tail = seq;
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
+}
+
static void l2cap_chan_timeout(struct work_struct *work)
{
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
@@ -262,7 +372,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
l2cap_chan_put(chan);
}
-struct l2cap_chan *l2cap_chan_create(struct sock *sk)
+struct l2cap_chan *l2cap_chan_create(void)
{
struct l2cap_chan *chan;
@@ -272,8 +382,6 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
mutex_init(&chan->lock);
- chan->sk = sk;
-
write_lock(&chan_list_lock);
list_add(&chan->global_l, &chan_list);
write_unlock(&chan_list_lock);
@@ -284,7 +392,7 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
atomic_set(&chan->refcnt, 1);
- BT_DBG("sk %p chan %p", sk, chan);
+ BT_DBG("chan %p", chan);
return chan;
}
@@ -298,10 +406,21 @@ void l2cap_chan_destroy(struct l2cap_chan *chan)
l2cap_chan_put(chan);
}
-void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+void l2cap_chan_set_defaults(struct l2cap_chan *chan)
+{
+ chan->fcs = L2CAP_FCS_CRC16;
+ chan->max_tx = L2CAP_DEFAULT_MAX_TX;
+ chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+ chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+ chan->sec_level = BT_SECURITY_LOW;
+
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+}
+
+static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
- chan->psm, chan->dcid);
+ __le16_to_cpu(chan->psm), chan->dcid);
conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
@@ -347,7 +466,7 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
list_add(&chan->list, &conn->chan_l);
}
-void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
mutex_lock(&conn->chan_lock);
__l2cap_chan_add(conn, chan);
@@ -405,6 +524,8 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
skb_queue_purge(&chan->srej_q);
+ l2cap_seq_list_free(&chan->srej_list);
+ l2cap_seq_list_free(&chan->retrans_list);
list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
list_del(&l->list);
kfree(l);
@@ -453,7 +574,6 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
case BT_CONFIG:
if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
conn->hcon->type == ACL_LINK) {
- __clear_chan_timer(chan);
__set_chan_timer(chan, sk->sk_sndtimeo);
l2cap_send_disconn_req(conn, chan, reason);
} else
@@ -466,7 +586,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
struct l2cap_conn_rsp rsp;
__u16 result;
- if (bt_sk(sk)->defer_setup)
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
result = L2CAP_CR_SEC_BLOCK;
else
result = L2CAP_CR_BAD_PSM;
@@ -599,6 +719,117 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
hci_send_acl(chan->conn->hchan, skb, flags);
}
+static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
+{
+ control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
+ control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
+
+ if (enh & L2CAP_CTRL_FRAME_TYPE) {
+ /* S-Frame */
+ control->sframe = 1;
+ control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
+ control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ /* I-Frame */
+ control->sframe = 0;
+ control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
+ control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
+{
+ control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+ control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
+
+ if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
+ /* S-Frame */
+ control->sframe = 1;
+ control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
+ control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ /* I-Frame */
+ control->sframe = 0;
+ control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
+ control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static inline void __unpack_control(struct l2cap_chan *chan,
+ struct sk_buff *skb)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ __unpack_extended_control(get_unaligned_le32(skb->data),
+ &bt_cb(skb)->control);
+ } else {
+ __unpack_enhanced_control(get_unaligned_le16(skb->data),
+ &bt_cb(skb)->control);
+ }
+}
+
+static u32 __pack_extended_control(struct l2cap_ctrl *control)
+{
+ u32 packed;
+
+ packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+ packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
+
+ if (control->sframe) {
+ packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
+ packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
+ packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
+ } else {
+ packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
+ packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+ }
+
+ return packed;
+}
+
+static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
+{
+ u16 packed;
+
+ packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
+ packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
+
+ if (control->sframe) {
+ packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
+ packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
+ packed |= L2CAP_CTRL_FRAME_TYPE;
+ } else {
+ packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
+ packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
+ }
+
+ return packed;
+}
+
+static inline void __pack_control(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ put_unaligned_le32(__pack_extended_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ } else {
+ put_unaligned_le16(__pack_enhanced_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ }
+}
+
static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
{
struct sk_buff *skb;
@@ -681,10 +912,38 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan)
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
}
+static void l2cap_chan_ready(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->sk;
+ struct sock *parent;
+
+ lock_sock(sk);
+
+ parent = bt_sk(sk)->parent;
+
+ BT_DBG("sk %p, parent %p", sk, parent);
+
+ chan->conf_state = 0;
+ __clear_chan_timer(chan);
+
+ __l2cap_state_change(chan, BT_CONNECTED);
+ sk->sk_state_change(sk);
+
+ if (parent)
+ parent->sk_data_ready(parent, 0);
+
+ release_sock(sk);
+}
+
static void l2cap_do_start(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
+ if (conn->hcon->type == LE_LINK) {
+ l2cap_chan_ready(chan);
+ return;
+ }
+
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
return;
@@ -791,7 +1050,8 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
if (l2cap_chan_check_security(chan)) {
lock_sock(sk);
- if (bt_sk(sk)->defer_setup) {
+ if (test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
struct sock *parent = bt_sk(sk)->parent;
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
@@ -830,10 +1090,12 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
mutex_unlock(&conn->chan_lock);
}
-/* Find socket with cid and source bdaddr.
+/* Find socket with cid and source/destination bdaddr.
* Returns closest match, locked.
*/
-static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
+static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
+ bdaddr_t *src,
+ bdaddr_t *dst)
{
struct l2cap_chan *c, *c1 = NULL;
@@ -846,14 +1108,22 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdadd
continue;
if (c->scid == cid) {
+ int src_match, dst_match;
+ int src_any, dst_any;
+
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src)) {
+ src_match = !bacmp(&bt_sk(sk)->src, src);
+ dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+ if (src_match && dst_match) {
read_unlock(&chan_list_lock);
return c;
}
/* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+ src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
+ dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+ if ((src_match && dst_any) || (src_any && dst_match) ||
+ (src_any && dst_any))
c1 = c;
}
}
@@ -872,7 +1142,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
/* Check if we have socket listening on cid */
pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
- conn->src);
+ conn->src, conn->dst);
if (!pchan)
return;
@@ -910,29 +1180,6 @@ clean:
release_sock(parent);
}
-static void l2cap_chan_ready(struct l2cap_chan *chan)
-{
- struct sock *sk = chan->sk;
- struct sock *parent;
-
- lock_sock(sk);
-
- parent = bt_sk(sk)->parent;
-
- BT_DBG("sk %p, parent %p", sk, parent);
-
- chan->conf_state = 0;
- __clear_chan_timer(chan);
-
- __l2cap_state_change(chan, BT_CONNECTED);
- sk->sk_state_change(sk);
-
- if (parent)
- parent->sk_data_ready(parent, 0);
-
- release_sock(sk);
-}
-
static void l2cap_conn_ready(struct l2cap_conn *conn)
{
struct l2cap_chan *chan;
@@ -1016,6 +1263,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
/* Kill channels */
list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
+ l2cap_chan_hold(chan);
l2cap_chan_lock(chan);
l2cap_chan_del(chan, err);
@@ -1023,6 +1271,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
l2cap_chan_unlock(chan);
chan->ops->close(chan->data);
+ l2cap_chan_put(chan);
}
mutex_unlock(&conn->chan_lock);
@@ -1100,10 +1349,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
/* ---- Socket interface ---- */
-/* Find socket with psm and source bdaddr.
+/* Find socket with psm and source / destination bdaddr.
* Returns closest match.
*/
-static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
+static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
+ bdaddr_t *src,
+ bdaddr_t *dst)
{
struct l2cap_chan *c, *c1 = NULL;
@@ -1116,14 +1367,22 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
continue;
if (c->psm == psm) {
+ int src_match, dst_match;
+ int src_any, dst_any;
+
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src)) {
+ src_match = !bacmp(&bt_sk(sk)->src, src);
+ dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+ if (src_match && dst_match) {
read_unlock(&chan_list_lock);
return c;
}
/* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+ src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
+ dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+ if ((src_match && dst_any) || (src_any && dst_match) ||
+ (src_any && dst_any))
c1 = c;
}
}
@@ -1133,7 +1392,8 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
return c1;
}
-int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
+int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ bdaddr_t *dst, u8 dst_type)
{
struct sock *sk = chan->sk;
bdaddr_t *src = &bt_sk(sk)->src;
@@ -1143,8 +1403,8 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
__u8 auth_type;
int err;
- BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
- chan->psm);
+ BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
+ dst_type, __le16_to_cpu(chan->psm));
hdev = hci_get_route(dst, src);
if (!hdev)
@@ -1218,11 +1478,11 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
auth_type = l2cap_get_auth_type(chan);
if (chan->dcid == L2CAP_CID_LE_DATA)
- hcon = hci_connect(hdev, LE_LINK, dst,
- chan->sec_level, auth_type);
+ hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
+ chan->sec_level, auth_type);
else
- hcon = hci_connect(hdev, ACL_LINK, dst,
- chan->sec_level, auth_type);
+ hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
+ chan->sec_level, auth_type);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
@@ -1236,6 +1496,18 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
goto done;
}
+ if (hcon->type == LE_LINK) {
+ err = 0;
+
+ if (!list_empty(&conn->chan_l)) {
+ err = -EBUSY;
+ hci_conn_put(hcon);
+ }
+
+ if (err)
+ goto done;
+ }
+
/* Update source addr of the socket */
bacpy(src, conn->src);
@@ -1346,7 +1618,7 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
while ((skb = skb_peek(&chan->tx_q)) &&
chan->unacked_frames) {
- if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
+ if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
break;
skb = skb_dequeue(&chan->tx_q);
@@ -1368,6 +1640,7 @@ static void l2cap_streaming_send(struct l2cap_chan *chan)
while ((skb = skb_dequeue(&chan->tx_q))) {
control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
control |= __set_txseq(chan, chan->next_tx_seq);
+ control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
__put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
if (chan->fcs == L2CAP_FCS_CRC16) {
@@ -1393,21 +1666,21 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
if (!skb)
return;
- while (bt_cb(skb)->tx_seq != tx_seq) {
+ while (bt_cb(skb)->control.txseq != tx_seq) {
if (skb_queue_is_last(&chan->tx_q, skb))
return;
skb = skb_queue_next(&chan->tx_q, skb);
}
- if (chan->remote_max_tx &&
- bt_cb(skb)->retries == chan->remote_max_tx) {
+ if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
+ chan->remote_max_tx) {
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
return;
}
tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->retries++;
+ bt_cb(skb)->control.retries++;
control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
control &= __get_sar_mask(chan);
@@ -1440,17 +1713,20 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
if (chan->state != BT_CONNECTED)
return -ENOTCONN;
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
+ return 0;
+
while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
- if (chan->remote_max_tx &&
- bt_cb(skb)->retries == chan->remote_max_tx) {
+ if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
+ chan->remote_max_tx) {
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
break;
}
tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->retries++;
+ bt_cb(skb)->control.retries++;
control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
control &= __get_sar_mask(chan);
@@ -1460,6 +1736,7 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
control |= __set_reqseq(chan, chan->buffer_seq);
control |= __set_txseq(chan, chan->next_tx_seq);
+ control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
__put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
@@ -1474,11 +1751,11 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
__set_retrans_timer(chan);
- bt_cb(skb)->tx_seq = chan->next_tx_seq;
+ bt_cb(skb)->control.txseq = chan->next_tx_seq;
chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
- if (bt_cb(skb)->retries == 1) {
+ if (bt_cb(skb)->control.retries == 1) {
chan->unacked_frames++;
if (!nsent++)
@@ -1554,7 +1831,7 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff **frag;
- int err, sent = 0;
+ int sent = 0;
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
return -EFAULT;
@@ -1565,14 +1842,17 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
/* Continuation fragments (no L2CAP header) */
frag = &skb_shinfo(skb)->frag_list;
while (len) {
+ struct sk_buff *tmp;
+
count = min_t(unsigned int, conn->mtu, len);
- *frag = chan->ops->alloc_skb(chan, count,
- msg->msg_flags & MSG_DONTWAIT,
- &err);
+ tmp = chan->ops->alloc_skb(chan, count,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ *frag = tmp;
- if (!*frag)
- return err;
if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
return -EFAULT;
@@ -1581,6 +1861,9 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
sent += count;
len -= count;
+ skb->len += (*frag)->len;
+ skb->data_len += (*frag)->len;
+
frag = &(*frag)->next;
}
@@ -1601,18 +1884,17 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
count = min_t(unsigned int, (conn->mtu - hlen), len);
skb = chan->ops->alloc_skb(chan, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
-
- if (!skb)
- return ERR_PTR(err);
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
skb->priority = priority;
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- put_unaligned_le16(chan->psm, skb_put(skb, 2));
+ lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
+ put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
if (unlikely(err < 0)) {
@@ -1628,25 +1910,24 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE;
+ int err, count;
struct l2cap_hdr *lh;
BT_DBG("chan %p len %d", chan, (int)len);
- count = min_t(unsigned int, (conn->mtu - hlen), len);
+ count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
- skb = chan->ops->alloc_skb(chan, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
-
- if (!skb)
- return ERR_PTR(err);
+ skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
skb->priority = priority;
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+ lh->len = cpu_to_le16(len);
err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
if (unlikely(err < 0)) {
@@ -1658,7 +1939,7 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
struct msghdr *msg, size_t len,
- u32 control, u16 sdulen)
+ u16 sdulen)
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
@@ -1684,17 +1965,16 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
count = min_t(unsigned int, (conn->mtu - hlen), len);
skb = chan->ops->alloc_skb(chan, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
-
- if (!skb)
- return ERR_PTR(err);
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
+ __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
if (sdulen)
put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
@@ -1708,61 +1988,82 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
if (chan->fcs == L2CAP_FCS_CRC16)
put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
- bt_cb(skb)->retries = 0;
+ bt_cb(skb)->control.retries = 0;
return skb;
}
-static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static int l2cap_segment_sdu(struct l2cap_chan *chan,
+ struct sk_buff_head *seg_queue,
+ struct msghdr *msg, size_t len)
{
struct sk_buff *skb;
- struct sk_buff_head sar_queue;
- u32 control;
- size_t size = 0;
+ u16 sdu_len;
+ size_t pdu_len;
+ int err = 0;
+ u8 sar;
- skb_queue_head_init(&sar_queue);
- control = __set_ctrl_sar(chan, L2CAP_SAR_START);
- skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
- __skb_queue_tail(&sar_queue, skb);
- len -= chan->remote_mps;
- size += chan->remote_mps;
+ /* It is critical that ERTM PDUs fit in a single HCI fragment,
+ * so fragmented skbs are not used. The HCI layer's handling
+ * of fragmented skbs is not compatible with ERTM's queueing.
+ */
- while (len > 0) {
- size_t buflen;
+ /* PDU size is derived from the HCI MTU */
+ pdu_len = chan->conn->mtu;
- if (len > chan->remote_mps) {
- control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
- buflen = chan->remote_mps;
- } else {
- control = __set_ctrl_sar(chan, L2CAP_SAR_END);
- buflen = len;
- }
+ pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
+
+ /* Adjust for largest possible L2CAP overhead. */
+ pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
+
+ /* Remote device may have requested smaller PDUs */
+ pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
+
+ if (len <= pdu_len) {
+ sar = L2CAP_SAR_UNSEGMENTED;
+ sdu_len = 0;
+ pdu_len = len;
+ } else {
+ sar = L2CAP_SAR_START;
+ sdu_len = len;
+ pdu_len -= L2CAP_SDULEN_SIZE;
+ }
+
+ while (len > 0) {
+ skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
- skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
if (IS_ERR(skb)) {
- skb_queue_purge(&sar_queue);
+ __skb_queue_purge(seg_queue);
return PTR_ERR(skb);
}
- __skb_queue_tail(&sar_queue, skb);
- len -= buflen;
- size += buflen;
+ bt_cb(skb)->control.sar = sar;
+ __skb_queue_tail(seg_queue, skb);
+
+ len -= pdu_len;
+ if (sdu_len) {
+ sdu_len = 0;
+ pdu_len += L2CAP_SDULEN_SIZE;
+ }
+
+ if (len <= pdu_len) {
+ sar = L2CAP_SAR_END;
+ pdu_len = len;
+ } else {
+ sar = L2CAP_SAR_CONTINUE;
+ }
}
- skb_queue_splice_tail(&sar_queue, &chan->tx_q);
- if (chan->tx_send_head == NULL)
- chan->tx_send_head = sar_queue.next;
- return size;
+ return err;
}
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
u32 priority)
{
struct sk_buff *skb;
- u32 control;
int err;
+ struct sk_buff_head seg_queue;
/* Connectionless channel */
if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
@@ -1791,42 +2092,47 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
case L2CAP_MODE_ERTM:
case L2CAP_MODE_STREAMING:
- /* Entire SDU fits into one PDU */
- if (len <= chan->remote_mps) {
- control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
- skb = l2cap_create_iframe_pdu(chan, msg, len, control,
- 0);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ /* Check outgoing MTU */
+ if (len > chan->omtu) {
+ err = -EMSGSIZE;
+ break;
+ }
- __skb_queue_tail(&chan->tx_q, skb);
+ __skb_queue_head_init(&seg_queue);
- if (chan->tx_send_head == NULL)
- chan->tx_send_head = skb;
+ /* Do segmentation before calling in to the state machine,
+ * since it's possible to block while waiting for memory
+ * allocation.
+ */
+ err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
- } else {
- /* Segment SDU into multiples PDUs */
- err = l2cap_sar_segment_sdu(chan, msg, len);
- if (err < 0)
- return err;
+ /* The channel could have been closed while segmenting,
+ * check that it is still connected.
+ */
+ if (chan->state != BT_CONNECTED) {
+ __skb_queue_purge(&seg_queue);
+ err = -ENOTCONN;
}
- if (chan->mode == L2CAP_MODE_STREAMING) {
- l2cap_streaming_send(chan);
- err = len;
+ if (err)
break;
- }
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
- test_bit(CONN_WAIT_F, &chan->conn_state)) {
- err = len;
- break;
- }
+ if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
+ chan->tx_send_head = seg_queue.next;
+ skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
+
+ if (chan->mode == L2CAP_MODE_ERTM)
+ err = l2cap_ertm_send(chan);
+ else
+ l2cap_streaming_send(chan);
- err = l2cap_ertm_send(chan);
if (err >= 0)
err = len;
+ /* If the skbs were not queued for sending, they'll still be in
+ * seg_queue and need to be purged.
+ */
+ __skb_queue_purge(&seg_queue);
break;
default:
@@ -2040,13 +2346,29 @@ static void l2cap_ack_timeout(struct work_struct *work)
l2cap_chan_put(chan);
}
-static inline void l2cap_ertm_init(struct l2cap_chan *chan)
+static inline int l2cap_ertm_init(struct l2cap_chan *chan)
{
+ int err;
+
+ chan->next_tx_seq = 0;
+ chan->expected_tx_seq = 0;
chan->expected_ack_seq = 0;
chan->unacked_frames = 0;
chan->buffer_seq = 0;
chan->num_acked = 0;
chan->frames_sent = 0;
+ chan->last_acked_seq = 0;
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+
+ skb_queue_head_init(&chan->tx_q);
+
+ if (chan->mode != L2CAP_MODE_ERTM)
+ return 0;
+
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+ chan->tx_state = L2CAP_TX_STATE_XMIT;
INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
@@ -2055,6 +2377,11 @@ static inline void l2cap_ertm_init(struct l2cap_chan *chan)
skb_queue_head_init(&chan->srej_q);
INIT_LIST_HEAD(&chan->srej_l);
+ err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
+ if (err < 0)
+ return err;
+
+ return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
}
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2378,9 +2705,9 @@ done:
chan->remote_mps = size;
rfc.retrans_timeout =
- le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
+ __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
rfc.monitor_timeout =
- le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
+ __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
set_bit(CONF_MODE_DONE, &chan->conf_state);
@@ -2644,10 +2971,10 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
u16 dcid = 0, scid = __le16_to_cpu(req->scid);
__le16 psm = req->psm;
- BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
+ BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
/* Check if we have socket listening on psm */
- pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
+ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
if (!pchan) {
result = L2CAP_CR_BAD_PSM;
goto sendresp;
@@ -2706,7 +3033,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
if (l2cap_chan_check_security(chan)) {
- if (bt_sk(sk)->defer_setup) {
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
__l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHOR_PEND;
@@ -2848,7 +3175,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
u16 dcid, flags;
u8 rsp[64];
struct l2cap_chan *chan;
- int len;
+ int len, err = 0;
dcid = __le16_to_cpu(req->dcid);
flags = __le16_to_cpu(req->flags);
@@ -2859,8 +3186,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
if (!chan)
return -ENOENT;
- l2cap_chan_lock(chan);
-
if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
struct l2cap_cmd_rej_cid rej;
@@ -2915,13 +3240,15 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
l2cap_state_change(chan, BT_CONNECTED);
- chan->next_tx_seq = 0;
- chan->expected_tx_seq = 0;
- skb_queue_head_init(&chan->tx_q);
- if (chan->mode == L2CAP_MODE_ERTM)
- l2cap_ertm_init(chan);
+ if (chan->mode == L2CAP_MODE_ERTM ||
+ chan->mode == L2CAP_MODE_STREAMING)
+ err = l2cap_ertm_init(chan);
+
+ if (err < 0)
+ l2cap_send_disconn_req(chan->conn, chan, -err);
+ else
+ l2cap_chan_ready(chan);
- l2cap_chan_ready(chan);
goto unlock;
}
@@ -2949,7 +3276,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
unlock:
l2cap_chan_unlock(chan);
- return 0;
+ return err;
}
static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
@@ -2957,21 +3284,20 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
u16 scid, flags, result;
struct l2cap_chan *chan;
- int len = cmd->len - sizeof(*rsp);
+ int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
+ int err = 0;
scid = __le16_to_cpu(rsp->scid);
flags = __le16_to_cpu(rsp->flags);
result = __le16_to_cpu(rsp->result);
- BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
- scid, flags, result);
+ BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
+ result, len);
chan = l2cap_get_chan_by_scid(conn, scid);
if (!chan)
return 0;
- l2cap_chan_lock(chan);
-
switch (result) {
case L2CAP_CONF_SUCCESS:
l2cap_conf_rfc_get(chan, rsp->data, len);
@@ -3045,18 +3371,19 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
set_default_fcs(chan);
l2cap_state_change(chan, BT_CONNECTED);
- chan->next_tx_seq = 0;
- chan->expected_tx_seq = 0;
- skb_queue_head_init(&chan->tx_q);
- if (chan->mode == L2CAP_MODE_ERTM)
- l2cap_ertm_init(chan);
+ if (chan->mode == L2CAP_MODE_ERTM ||
+ chan->mode == L2CAP_MODE_STREAMING)
+ err = l2cap_ertm_init(chan);
- l2cap_chan_ready(chan);
+ if (err < 0)
+ l2cap_send_disconn_req(chan->conn, chan, -err);
+ else
+ l2cap_chan_ready(chan);
}
done:
l2cap_chan_unlock(chan);
- return 0;
+ return err;
}
static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
@@ -3092,11 +3419,13 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
sk->sk_shutdown = SHUTDOWN_MASK;
release_sock(sk);
+ l2cap_chan_hold(chan);
l2cap_chan_del(chan, ECONNRESET);
l2cap_chan_unlock(chan);
chan->ops->close(chan->data);
+ l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -3124,11 +3453,13 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
l2cap_chan_lock(chan);
+ l2cap_chan_hold(chan);
l2cap_chan_del(chan, 0);
l2cap_chan_unlock(chan);
chan->ops->close(chan->data);
+ l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -3265,8 +3596,8 @@ static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
/* Placeholder: Always reject */
rsp.dcid = 0;
rsp.scid = cpu_to_le16(scid);
- rsp.result = L2CAP_CR_NO_MEM;
- rsp.status = L2CAP_CS_NO_INFO;
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
sizeof(rsp), &rsp);
@@ -3665,19 +3996,19 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
struct sk_buff *next_skb;
int tx_seq_offset, next_tx_seq_offset;
- bt_cb(skb)->tx_seq = tx_seq;
- bt_cb(skb)->sar = sar;
+ bt_cb(skb)->control.txseq = tx_seq;
+ bt_cb(skb)->control.sar = sar;
next_skb = skb_peek(&chan->srej_q);
tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
while (next_skb) {
- if (bt_cb(next_skb)->tx_seq == tx_seq)
+ if (bt_cb(next_skb)->control.txseq == tx_seq)
return -EINVAL;
next_tx_seq_offset = __seq_offset(chan,
- bt_cb(next_skb)->tx_seq, chan->buffer_seq);
+ bt_cb(next_skb)->control.txseq, chan->buffer_seq);
if (next_tx_seq_offset > tx_seq_offset) {
__skb_queue_before(&chan->srej_q, next_skb, skb);
@@ -3800,6 +4131,7 @@ static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
BT_DBG("chan %p, Enter local busy", chan);
set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+ l2cap_seq_list_clear(&chan->srej_list);
__set_ack_timer(chan);
}
@@ -3848,11 +4180,11 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
int err;
- if (bt_cb(skb)->tx_seq != tx_seq)
+ if (bt_cb(skb)->control.txseq != tx_seq)
break;
skb = skb_dequeue(&chan->srej_q);
- control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
+ control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
err = l2cap_reassemble_sdu(chan, skb, control);
if (err < 0) {
@@ -3892,6 +4224,7 @@ static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
while (tx_seq != chan->expected_tx_seq) {
control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
control |= __set_reqseq(chan, chan->expected_tx_seq);
+ l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
l2cap_send_sframe(chan, control);
new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
@@ -4022,8 +4355,8 @@ expected:
chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- bt_cb(skb)->tx_seq = tx_seq;
- bt_cb(skb)->sar = sar;
+ bt_cb(skb)->control.txseq = tx_seq;
+ bt_cb(skb)->control.sar = sar;
__skb_queue_tail(&chan->srej_q, skb);
return 0;
}
@@ -4220,6 +4553,8 @@ static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
u16 req_seq;
int len, next_tx_seq_offset, req_seq_offset;
+ __unpack_control(chan, skb);
+
control = __get_control(chan, skb->data);
skb_pull(skb, __ctrl_size(chan));
len = skb->len;
@@ -4295,8 +4630,6 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
return 0;
}
- l2cap_chan_lock(chan);
-
BT_DBG("chan %p, len %d", chan, skb->len);
if (chan->state != BT_CONNECTED)
@@ -4375,7 +4708,7 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
{
struct l2cap_chan *chan;
- chan = l2cap_global_chan_by_psm(0, psm, conn->src);
+ chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
if (!chan)
goto drop;
@@ -4396,11 +4729,12 @@ drop:
return 0;
}
-static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
+static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
+ struct sk_buff *skb)
{
struct l2cap_chan *chan;
- chan = l2cap_global_chan_by_scid(0, cid, conn->src);
+ chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
if (!chan)
goto drop;
@@ -4445,7 +4779,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
break;
case L2CAP_CID_CONN_LESS:
- psm = get_unaligned_le16(skb->data);
+ psm = get_unaligned((__le16 *) skb->data);
skb_pull(skb, 2);
l2cap_conless_channel(conn, psm, skb);
break;
@@ -4540,7 +4874,6 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
if (encrypt == 0x00) {
if (chan->sec_level == BT_SECURITY_MEDIUM) {
- __clear_chan_timer(chan);
__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
} else if (chan->sec_level == BT_SECURITY_HIGH)
l2cap_chan_close(chan, ECONNREFUSED);
@@ -4561,7 +4894,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
BT_DBG("conn %p", conn);
if (hcon->type == LE_LINK) {
- smp_distribute_keys(conn, 0);
+ if (!status && encrypt)
+ smp_distribute_keys(conn, 0);
cancel_delayed_work(&conn->security_timer);
}
@@ -4591,7 +4925,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
chan->state == BT_CONFIG)) {
struct sock *sk = chan->sk;
- bt_sk(sk)->suspended = false;
+ clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
sk->sk_state_change(sk);
l2cap_check_encryption(chan, encrypt);
@@ -4603,7 +4937,6 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
if (!status) {
l2cap_send_conn_req(chan);
} else {
- __clear_chan_timer(chan);
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
}
} else if (chan->state == BT_CONNECT2) {
@@ -4614,7 +4947,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
lock_sock(sk);
if (!status) {
- if (bt_sk(sk)->defer_setup) {
+ if (test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
struct sock *parent = bt_sk(sk)->parent;
res = L2CAP_CR_PEND;
stat = L2CAP_CS_AUTHOR_PEND;
@@ -4664,8 +4998,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
if (!(flags & ACL_CONT)) {
struct l2cap_hdr *hdr;
- struct l2cap_chan *chan;
- u16 cid;
int len;
if (conn->rx_len) {
@@ -4685,7 +5017,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
hdr = (struct l2cap_hdr *) skb->data;
len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
- cid = __le16_to_cpu(hdr->cid);
if (len == skb->len) {
/* Complete frame received */
@@ -4702,23 +5033,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
goto drop;
}
- chan = l2cap_get_chan_by_scid(conn, cid);
-
- if (chan && chan->sk) {
- struct sock *sk = chan->sk;
- lock_sock(sk);
-
- if (chan->imtu < len - L2CAP_HDR_SIZE) {
- BT_ERR("Frame exceeding recv MTU (len %d, "
- "MTU %d)", len,
- chan->imtu);
- release_sock(sk);
- l2cap_conn_unreliable(conn, ECOMM);
- goto drop;
- }
- release_sock(sk);
- }
-
/* Allocate skb for the complete frame (with header) */
conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
if (!conn->rx_skb)
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 04e7c172d49..3bb1611b9d4 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -124,7 +124,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
return -EINVAL;
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
- &la.l2_bdaddr);
+ &la.l2_bdaddr, la.l2_bdaddr_type);
if (err)
return err;
@@ -148,12 +148,16 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
lock_sock(sk);
- if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
- || sk->sk_state != BT_BOUND) {
+ if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) {
+ err = -EINVAL;
+ goto done;
+ }
+
switch (chan->mode) {
case L2CAP_MODE_BASIC:
break;
@@ -320,8 +324,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
case L2CAP_CONNINFO:
if (sk->sk_state != BT_CONNECTED &&
- !(sk->sk_state == BT_CONNECT2 &&
- bt_sk(sk)->defer_setup)) {
+ !(sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
@@ -375,7 +379,10 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
}
memset(&sec, 0, sizeof(sec));
- sec.level = chan->sec_level;
+ if (chan->conn)
+ sec.level = chan->conn->hcon->sec_level;
+ else
+ sec.level = chan->sec_level;
if (sk->sk_state == BT_CONNECTED)
sec.key_size = chan->conn->hcon->enc_key_size;
@@ -392,7 +399,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
break;
}
- if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
+ if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
+ (u32 __user *) optval))
err = -EFAULT;
break;
@@ -594,10 +602,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
/* or for ACL link */
} else if ((sk->sk_state == BT_CONNECT2 &&
- bt_sk(sk)->defer_setup) ||
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
sk->sk_state == BT_CONNECTED) {
if (!l2cap_chan_check_security(chan))
- bt_sk(sk)->suspended = true;
+ set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
else
sk->sk_state_change(sk);
} else {
@@ -616,7 +624,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
break;
}
- bt_sk(sk)->defer_setup = opt;
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ else
+ clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
case BT_FLUSHABLE:
@@ -716,16 +727,13 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
- lock_sock(sk);
-
- if (sk->sk_state != BT_CONNECTED) {
- release_sock(sk);
+ if (sk->sk_state != BT_CONNECTED)
return -ENOTCONN;
- }
+ l2cap_chan_lock(chan);
err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
+ l2cap_chan_unlock(chan);
- release_sock(sk);
return err;
}
@@ -737,7 +745,8 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
lock_sock(sk);
- if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
+ if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
sk->sk_state = BT_CONFIG;
pi->chan->state = BT_CONFIG;
@@ -931,12 +940,19 @@ static void l2cap_sock_state_change_cb(void *data, int state)
}
static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
- unsigned long len, int nb,
- int *err)
+ unsigned long len, int nb)
{
- struct sock *sk = chan->sk;
+ struct sk_buff *skb;
+ int err;
+
+ l2cap_chan_unlock(chan);
+ skb = bt_skb_send_alloc(chan->sk, len, nb, &err);
+ l2cap_chan_lock(chan);
+
+ if (!skb)
+ return ERR_PTR(err);
- return bt_skb_send_alloc(sk, len, nb, err);
+ return skb;
}
static struct l2cap_ops l2cap_chan_ops = {
@@ -952,6 +968,7 @@ static void l2cap_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
+ l2cap_chan_put(l2cap_pi(sk)->chan);
if (l2cap_pi(sk)->rx_busy_skb) {
kfree_skb(l2cap_pi(sk)->rx_busy_skb);
l2cap_pi(sk)->rx_busy_skb = NULL;
@@ -972,7 +989,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
sk->sk_type = parent->sk_type;
- bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
+ bt_sk(sk)->flags = bt_sk(parent)->flags;
chan->chan_type = pchan->chan_type;
chan->imtu = pchan->imtu;
@@ -1010,13 +1027,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
} else {
chan->mode = L2CAP_MODE_BASIC;
}
- chan->max_tx = L2CAP_DEFAULT_MAX_TX;
- chan->fcs = L2CAP_FCS_CRC16;
- chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
- chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
- chan->sec_level = BT_SECURITY_LOW;
- chan->flags = 0;
- set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+
+ l2cap_chan_set_defaults(chan);
}
/* Default config options */
@@ -1052,12 +1064,16 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
- chan = l2cap_chan_create(sk);
+ chan = l2cap_chan_create();
if (!chan) {
l2cap_sock_kill(sk);
return NULL;
}
+ l2cap_chan_hold(chan);
+
+ chan->sk = sk;
+
l2cap_pi(sk)->chan = chan;
return sk;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 4bb03b11112..25d22077607 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -35,10 +35,9 @@
#include <net/bluetooth/smp.h>
bool enable_hs;
-bool enable_le;
#define MGMT_VERSION 1
-#define MGMT_REVISION 0
+#define MGMT_REVISION 1
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -78,6 +77,7 @@ static const u16 mgmt_commands[] = {
MGMT_OP_CONFIRM_NAME,
MGMT_OP_BLOCK_DEVICE,
MGMT_OP_UNBLOCK_DEVICE,
+ MGMT_OP_SET_DEVICE_ID,
};
static const u16 mgmt_events[] = {
@@ -224,7 +224,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
ev = (void *) skb_put(skb, sizeof(*ev));
ev->status = status;
- put_unaligned_le16(cmd, &ev->opcode);
+ ev->opcode = cpu_to_le16(cmd);
err = sock_queue_rcv_skb(sk, skb);
if (err < 0)
@@ -254,7 +254,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
- put_unaligned_le16(cmd, &ev->opcode);
+ ev->opcode = cpu_to_le16(cmd);
ev->status = status;
if (rp)
@@ -275,7 +275,7 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("sock %p", sk);
rp.version = MGMT_VERSION;
- put_unaligned_le16(MGMT_REVISION, &rp.revision);
+ rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
sizeof(rp));
@@ -285,9 +285,9 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
struct mgmt_rp_read_commands *rp;
- u16 num_commands = ARRAY_SIZE(mgmt_commands);
- u16 num_events = ARRAY_SIZE(mgmt_events);
- u16 *opcode;
+ const u16 num_commands = ARRAY_SIZE(mgmt_commands);
+ const u16 num_events = ARRAY_SIZE(mgmt_events);
+ __le16 *opcode;
size_t rp_size;
int i, err;
@@ -299,8 +299,8 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
if (!rp)
return -ENOMEM;
- put_unaligned_le16(num_commands, &rp->num_commands);
- put_unaligned_le16(num_events, &rp->num_events);
+ rp->num_commands = __constant_cpu_to_le16(num_commands);
+ rp->num_events = __constant_cpu_to_le16(num_events);
for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
put_unaligned_le16(mgmt_commands[i], opcode);
@@ -341,14 +341,14 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
return -ENOMEM;
}
- put_unaligned_le16(count, &rp->num_controllers);
+ rp->num_controllers = cpu_to_le16(count);
i = 0;
list_for_each_entry(d, &hci_dev_list, list) {
if (test_bit(HCI_SETUP, &d->dev_flags))
continue;
- put_unaligned_le16(d->id, &rp->index[i++]);
+ rp->index[i++] = cpu_to_le16(d->id);
BT_DBG("Added hci%u", d->id);
}
@@ -383,10 +383,8 @@ static u32 get_supported_settings(struct hci_dev *hdev)
if (enable_hs)
settings |= MGMT_SETTING_HS;
- if (enable_le) {
- if (hdev->features[4] & LMP_LE)
- settings |= MGMT_SETTING_LE;
- }
+ if (hdev->features[4] & LMP_LE)
+ settings |= MGMT_SETTING_LE;
return settings;
}
@@ -442,9 +440,7 @@ static u16 get_uuid16(u8 *uuid128)
return 0;
}
- memcpy(&val, &uuid128[12], 4);
-
- val = le32_to_cpu(val);
+ val = get_unaligned_le32(&uuid128[12]);
if (val > 0xffff)
return 0;
@@ -479,6 +475,28 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
ptr += (name_len + 2);
}
+ if (hdev->inq_tx_power) {
+ ptr[0] = 2;
+ ptr[1] = EIR_TX_POWER;
+ ptr[2] = (u8) hdev->inq_tx_power;
+
+ eir_len += 3;
+ ptr += 3;
+ }
+
+ if (hdev->devid_source > 0) {
+ ptr[0] = 9;
+ ptr[1] = EIR_DEVICE_ID;
+
+ put_unaligned_le16(hdev->devid_source, ptr + 2);
+ put_unaligned_le16(hdev->devid_vendor, ptr + 4);
+ put_unaligned_le16(hdev->devid_product, ptr + 6);
+ put_unaligned_le16(hdev->devid_version, ptr + 8);
+
+ eir_len += 10;
+ ptr += 10;
+ }
+
memset(uuid16_list, 0, sizeof(uuid16_list));
/* Group all UUID16 types */
@@ -642,8 +660,7 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
bacpy(&rp.bdaddr, &hdev->bdaddr);
rp.version = hdev->hci_ver;
-
- put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
+ rp.manufacturer = cpu_to_le16(hdev->manufacturer);
rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
rp.current_settings = cpu_to_le32(get_current_settings(hdev));
@@ -840,7 +857,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("request for %s", hdev->name);
- timeout = get_unaligned_le16(&cp->timeout);
+ timeout = __le16_to_cpu(cp->timeout);
if (!cp->val && timeout > 0)
return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
MGMT_STATUS_INVALID_PARAMS);
@@ -1122,8 +1139,8 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
- MGMT_STATUS_BUSY);
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_BUSY);
goto failed;
}
@@ -1179,7 +1196,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_dev_lock(hdev);
- if (!enable_le || !(hdev->features[4] & LMP_LE)) {
+ if (!(hdev->features[4] & LMP_LE)) {
err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
MGMT_STATUS_NOT_SUPPORTED);
goto unlock;
@@ -1227,10 +1244,8 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
&hci_cp);
- if (err < 0) {
+ if (err < 0)
mgmt_pending_remove(cmd);
- goto unlock;
- }
unlock:
hci_dev_unlock(hdev);
@@ -1280,10 +1295,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto failed;
- }
failed:
hci_dev_unlock(hdev);
@@ -1368,10 +1381,8 @@ update_class:
}
cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
unlock:
hci_dev_unlock(hdev);
@@ -1422,10 +1433,8 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
}
cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
unlock:
hci_dev_unlock(hdev);
@@ -1439,7 +1448,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
u16 key_count, expected_len;
int i;
- key_count = get_unaligned_le16(&cp->key_count);
+ key_count = __le16_to_cpu(cp->key_count);
expected_len = sizeof(*cp) + key_count *
sizeof(struct mgmt_link_key_info);
@@ -1512,7 +1521,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
else
err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
@@ -1524,7 +1533,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (cp->disconnect) {
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
&cp->addr.bdaddr);
else
@@ -1548,7 +1557,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- put_unaligned_le16(conn->handle, &dc.handle);
+ dc.handle = cpu_to_le16(conn->handle);
dc.reason = 0x13; /* Remote User Terminated Connection */
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
if (err < 0)
@@ -1584,7 +1593,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
@@ -1601,7 +1610,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- put_unaligned_le16(conn->handle, &dc.handle);
+ dc.handle = cpu_to_le16(conn->handle);
dc.reason = 0x13; /* Remote User Terminated Connection */
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
@@ -1613,22 +1622,22 @@ failed:
return err;
}
-static u8 link_to_mgmt(u8 link_type, u8 addr_type)
+static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
{
switch (link_type) {
case LE_LINK:
switch (addr_type) {
case ADDR_LE_DEV_PUBLIC:
- return MGMT_ADDR_LE_PUBLIC;
- case ADDR_LE_DEV_RANDOM:
- return MGMT_ADDR_LE_RANDOM;
+ return BDADDR_LE_PUBLIC;
+
default:
- return MGMT_ADDR_INVALID;
+ /* Fallback to LE Random address type */
+ return BDADDR_LE_RANDOM;
}
- case ACL_LINK:
- return MGMT_ADDR_BREDR;
+
default:
- return MGMT_ADDR_INVALID;
+ /* Fallback to BR/EDR type */
+ return BDADDR_BREDR;
}
}
@@ -1669,13 +1678,13 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
continue;
bacpy(&rp->addr[i].bdaddr, &c->dst);
- rp->addr[i].type = link_to_mgmt(c->type, c->dst_type);
- if (rp->addr[i].type == MGMT_ADDR_INVALID)
+ rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
+ if (c->type == SCO_LINK || c->type == ESCO_LINK)
continue;
i++;
}
- put_unaligned_le16(i, &rp->conn_count);
+ rp->conn_count = cpu_to_le16(i);
/* Recalculate length in case of filtered SCO connections, etc */
rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
@@ -1836,7 +1845,7 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
struct hci_conn *conn = cmd->user_data;
bacpy(&rp.addr.bdaddr, &conn->dst);
- rp.addr.type = link_to_mgmt(conn->type, conn->dst_type);
+ rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
&rp, sizeof(rp));
@@ -1890,12 +1899,12 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
else
auth_type = HCI_AT_DEDICATED_BONDING_MITM;
- if (cp->addr.type == MGMT_ADDR_BREDR)
- conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level,
- auth_type);
+ if (cp->addr.type == BDADDR_BREDR)
+ conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
+ cp->addr.type, sec_level, auth_type);
else
- conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level,
- auth_type);
+ conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
+ cp->addr.type, sec_level, auth_type);
memset(&rp, 0, sizeof(rp));
bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
@@ -1923,7 +1932,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
/* For LE, just connecting isn't a proof that the pairing finished */
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
conn->connect_cfm_cb = pairing_complete_cb;
conn->security_cfm_cb = pairing_complete_cb;
@@ -2000,7 +2009,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
goto done;
}
- if (type == MGMT_ADDR_BREDR)
+ if (type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
@@ -2011,7 +2020,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
goto done;
}
- if (type == MGMT_ADDR_LE_PUBLIC || type == MGMT_ADDR_LE_RANDOM) {
+ if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
/* Continue with pairing via SMP */
err = smp_user_confirm_reply(conn, mgmt_op, passkey);
@@ -2295,6 +2304,12 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
if (hdev->discovery.state != DISCOVERY_STOPPED) {
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
MGMT_STATUS_BUSY);
@@ -2381,27 +2396,39 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- if (hdev->discovery.state == DISCOVERY_FINDING) {
- err = hci_cancel_inquiry(hdev);
- if (err < 0)
- mgmt_pending_remove(cmd);
+ switch (hdev->discovery.state) {
+ case DISCOVERY_FINDING:
+ if (test_bit(HCI_INQUIRY, &hdev->flags))
+ err = hci_cancel_inquiry(hdev);
else
- hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
- goto unlock;
- }
+ err = hci_cancel_le_scan(hdev);
- e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING);
- if (!e) {
- mgmt_pending_remove(cmd);
- err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
- &mgmt_cp->type, sizeof(mgmt_cp->type));
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- goto unlock;
+ break;
+
+ case DISCOVERY_RESOLVING:
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
+ NAME_PENDING);
+ if (!e) {
+ mgmt_pending_remove(cmd);
+ err = cmd_complete(sk, hdev->id,
+ MGMT_OP_STOP_DISCOVERY, 0,
+ &mgmt_cp->type,
+ sizeof(mgmt_cp->type));
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ goto unlock;
+ }
+
+ bacpy(&cp.bdaddr, &e->data.bdaddr);
+ err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
+ sizeof(cp), &cp);
+
+ break;
+
+ default:
+ BT_DBG("unknown discovery state %u", hdev->discovery.state);
+ err = -EFAULT;
}
- bacpy(&cp.bdaddr, &e->data.bdaddr);
- err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
- &cp);
if (err < 0)
mgmt_pending_remove(cmd);
else
@@ -2501,6 +2528,37 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
return err;
}
+static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_device_id *cp = data;
+ int err;
+ __u16 source;
+
+ BT_DBG("%s", hdev->name);
+
+ source = __le16_to_cpu(cp->source);
+
+ if (source > 0x0002)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ hdev->devid_source = source;
+ hdev->devid_vendor = __le16_to_cpu(cp->vendor);
+ hdev->devid_product = __le16_to_cpu(cp->product);
+ hdev->devid_version = __le16_to_cpu(cp->version);
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
+
+ update_eir(hdev);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
@@ -2565,7 +2623,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
u16 key_count, expected_len;
int i;
- key_count = get_unaligned_le16(&cp->key_count);
+ key_count = __le16_to_cpu(cp->key_count);
expected_len = sizeof(*cp) + key_count *
sizeof(struct mgmt_ltk_info);
@@ -2591,7 +2649,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
else
type = HCI_SMP_LTK_SLAVE;
- hci_add_ltk(hdev, &key->addr.bdaddr, key->addr.type,
+ hci_add_ltk(hdev, &key->addr.bdaddr,
+ bdaddr_to_le(key->addr.type),
type, 0, key->authenticated, key->val,
key->enc_size, key->ediv, key->rand);
}
@@ -2601,7 +2660,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
return 0;
}
-struct mgmt_handler {
+static const struct mgmt_handler {
int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len);
bool var_len;
@@ -2647,6 +2706,7 @@ struct mgmt_handler {
{ confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
{ block_device, false, MGMT_BLOCK_DEVICE_SIZE },
{ unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
+ { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
};
@@ -2657,7 +2717,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
struct mgmt_hdr *hdr;
u16 opcode, index, len;
struct hci_dev *hdev = NULL;
- struct mgmt_handler *handler;
+ const struct mgmt_handler *handler;
int err;
BT_DBG("got %zu bytes", msglen);
@@ -2675,9 +2735,9 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
}
hdr = buf;
- opcode = get_unaligned_le16(&hdr->opcode);
- index = get_unaligned_le16(&hdr->index);
- len = get_unaligned_le16(&hdr->len);
+ opcode = __le16_to_cpu(hdr->opcode);
+ index = __le16_to_cpu(hdr->index);
+ len = __le16_to_cpu(hdr->len);
if (len != msglen - sizeof(*hdr)) {
err = -EINVAL;
@@ -2884,7 +2944,8 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
return 0;
}
-int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent)
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ bool persistent)
{
struct mgmt_ev_new_link_key ev;
@@ -2892,7 +2953,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persisten
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = MGMT_ADDR_BREDR;
+ ev.key.addr.type = BDADDR_BREDR;
ev.key.type = key->type;
memcpy(ev.key.val, key->val, 16);
ev.key.pin_len = key->pin_len;
@@ -2908,7 +2969,7 @@ int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = key->bdaddr_type;
+ ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
ev.key.authenticated = key->authenticated;
ev.key.enc_size = key->enc_size;
ev.key.ediv = key->ediv;
@@ -2932,7 +2993,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u16 eir_len = 0;
bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_mgmt(link_type, addr_type);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->flags = __cpu_to_le32(flags);
@@ -2944,7 +3005,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
eir_len = eir_append_data(ev->eir, eir_len,
EIR_CLASS_OF_DEV, dev_class, 3);
- put_unaligned_le16(eir_len, &ev->eir_len);
+ ev->eir_len = cpu_to_le16(eir_len);
return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
sizeof(*ev) + eir_len, NULL);
@@ -2995,13 +3056,13 @@ int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
bacpy(&ev.bdaddr, bdaddr);
- ev.type = link_to_mgmt(link_type, addr_type);
+ ev.type = link_to_bdaddr(link_type, addr_type);
err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
sk);
if (sk)
- sock_put(sk);
+ sock_put(sk);
mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
hdev);
@@ -3021,7 +3082,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = link_to_mgmt(link_type, addr_type);
+ rp.addr.type = link_to_bdaddr(link_type, addr_type);
err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
mgmt_status(status), &rp, sizeof(rp));
@@ -3039,7 +3100,7 @@ int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
struct mgmt_ev_connect_failed ev;
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.status = mgmt_status(status);
return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
@@ -3050,7 +3111,7 @@ int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
struct mgmt_ev_pin_code_request ev;
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = MGMT_ADDR_BREDR;
+ ev.addr.type = BDADDR_BREDR;
ev.secure = secure;
return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
@@ -3069,7 +3130,7 @@ int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = MGMT_ADDR_BREDR;
+ rp.addr.type = BDADDR_BREDR;
err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
mgmt_status(status), &rp, sizeof(rp));
@@ -3091,7 +3152,7 @@ int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = MGMT_ADDR_BREDR;
+ rp.addr.type = BDADDR_BREDR;
err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
mgmt_status(status), &rp, sizeof(rp));
@@ -3110,9 +3171,9 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
BT_DBG("%s", hdev->name);
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.confirm_hint = confirm_hint;
- put_unaligned_le32(value, &ev.value);
+ ev.value = value;
return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
NULL);
@@ -3126,7 +3187,7 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
BT_DBG("%s", hdev->name);
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
NULL);
@@ -3145,7 +3206,7 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = link_to_mgmt(link_type, addr_type);
+ rp.addr.type = link_to_bdaddr(link_type, addr_type);
err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
&rp, sizeof(rp));
@@ -3188,7 +3249,7 @@ int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
struct mgmt_ev_auth_failed ev;
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.status = mgmt_status(status);
return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
@@ -3413,10 +3474,10 @@ int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
if (enable && test_and_clear_bit(HCI_LE_ENABLED,
&hdev->dev_flags))
- err = new_settings(hdev, NULL);
+ err = new_settings(hdev, NULL);
- mgmt_pending_foreach(MGMT_OP_SET_LE, hdev,
- cmd_status_rsp, &mgmt_err);
+ mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
+ &mgmt_err);
return err;
}
@@ -3455,7 +3516,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
memset(buf, 0, sizeof(buf));
bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_mgmt(link_type, addr_type);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->rssi = rssi;
if (cfm_name)
ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME;
@@ -3469,7 +3530,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
dev_class, 3);
- put_unaligned_le16(eir_len, &ev->eir_len);
+ ev->eir_len = cpu_to_le16(eir_len);
ev_size = sizeof(*ev) + eir_len;
@@ -3488,13 +3549,13 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
memset(buf, 0, sizeof(buf));
bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_mgmt(link_type, addr_type);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->rssi = rssi;
eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
name_len);
- put_unaligned_le16(eir_len, &ev->eir_len);
+ ev->eir_len = cpu_to_le16(eir_len);
return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
sizeof(*ev) + eir_len, NULL);
@@ -3594,6 +3655,3 @@ int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
module_param(enable_hs, bool, 0644);
MODULE_PARM_DESC(enable_hs, "Enable High Speed support");
-
-module_param(enable_le, bool, 0644);
-MODULE_PARM_DESC(enable_le, "Enable Low Energy support");
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index a55a43e9f70..e8707debb86 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -260,7 +260,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
if (parent) {
sk->sk_type = parent->sk_type;
- pi->dlc->defer_setup = bt_sk(parent)->defer_setup;
+ pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(parent)->flags);
pi->sec_level = rfcomm_pi(parent)->sec_level;
pi->role_switch = rfcomm_pi(parent)->role_switch;
@@ -731,7 +732,11 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
break;
}
- bt_sk(sk)->defer_setup = opt;
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ else
+ clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+
break;
default:
@@ -849,7 +854,8 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
break;
}
- if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
+ if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
+ (u32 __user *) optval))
err = -EFAULT;
break;
@@ -972,7 +978,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
done:
bh_unlock_sock(parent);
- if (bt_sk(parent)->defer_setup)
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
parent->sk_state_change(parent);
return result;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index f6ab1290796..cbdd313659a 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -61,8 +61,6 @@ static struct bt_sock_list sco_sk_list = {
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
static void sco_chan_del(struct sock *sk, int err);
-static int sco_conn_del(struct hci_conn *conn, int err);
-
static void sco_sock_close(struct sock *sk);
static void sco_sock_kill(struct sock *sk);
@@ -95,12 +93,12 @@ static void sco_sock_clear_timer(struct sock *sk)
}
/* ---- SCO connections ---- */
-static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
+static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn = hcon->sco_data;
- if (conn || status)
+ if (conn)
return conn;
conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
@@ -195,13 +193,14 @@ static int sco_connect(struct sock *sk)
else
type = SCO_LINK;
- hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
+ hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW,
+ HCI_AT_NO_BONDING);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
}
- conn = sco_conn_add(hcon, 0);
+ conn = sco_conn_add(hcon);
if (!conn) {
hci_conn_put(hcon);
err = -ENOMEM;
@@ -233,7 +232,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
- int err, count;
+ int err;
/* Check outgoing MTU */
if (len > conn->mtu)
@@ -241,20 +240,18 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
BT_DBG("sk %p len %d", sk, len);
- count = min_t(unsigned int, conn->mtu, len);
- skb = bt_skb_send_alloc(sk, count,
- msg->msg_flags & MSG_DONTWAIT, &err);
+ skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
- if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
kfree_skb(skb);
return -EFAULT;
}
hci_send_sco(conn->hcon, skb);
- return count;
+ return len;
}
static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
@@ -277,17 +274,20 @@ drop:
}
/* -------- Socket interface ---------- */
-static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba)
+static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
{
- struct sock *sk;
struct hlist_node *node;
+ struct sock *sk;
+
+ sk_for_each(sk, node, &sco_sk_list.head) {
+ if (sk->sk_state != BT_LISTEN)
+ continue;
- sk_for_each(sk, node, &sco_sk_list.head)
if (!bacmp(&bt_sk(sk)->src, ba))
- goto found;
- sk = NULL;
-found:
- return sk;
+ return sk;
+ }
+
+ return NULL;
}
/* Find socket listening on source bdaddr.
@@ -466,7 +466,6 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
- bdaddr_t *src = &sa->sco_bdaddr;
int err = 0;
BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
@@ -481,17 +480,14 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
goto done;
}
- write_lock(&sco_sk_list.lock);
-
- if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
- err = -EADDRINUSE;
- } else {
- /* Save source address */
- bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
- sk->sk_state = BT_BOUND;
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ err = -EINVAL;
+ goto done;
}
- write_unlock(&sco_sk_list.lock);
+ bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
+
+ sk->sk_state = BT_BOUND;
done:
release_sock(sk);
@@ -537,21 +533,38 @@ done:
static int sco_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
+ bdaddr_t *src = &bt_sk(sk)->src;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
- if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
+ if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ write_lock(&sco_sk_list.lock);
+
+ if (__sco_get_sock_listen_by_addr(src)) {
+ err = -EADDRINUSE;
+ goto unlock;
+ }
+
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
+
sk->sk_state = BT_LISTEN;
+unlock:
+ write_unlock(&sco_sk_list.lock);
+
done:
release_sock(sk);
return err;
@@ -923,7 +936,7 @@ int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
if (!status) {
struct sco_conn *conn;
- conn = sco_conn_add(hcon, status);
+ conn = sco_conn_add(hcon);
if (conn)
sco_conn_ready(conn);
} else
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index deb119875fd..6fc7c4708f3 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -956,7 +956,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
HCI_SMP_LTK_SLAVE, 1, authenticated,
enc.ltk, smp->enc_key_size, ediv, ident.rand);
- ident.ediv = cpu_to_le16(ediv);
+ ident.ediv = ediv;
smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index a8bdf740543..e5b7182fa09 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -145,6 +145,12 @@ static void free_fib_info_rcu(struct rcu_head *head)
{
struct fib_info *fi = container_of(head, struct fib_info, rcu);
+ change_nexthops(fi) {
+ if (nexthop_nh->nh_dev)
+ dev_put(nexthop_nh->nh_dev);
+ } endfor_nexthops(fi);
+
+ release_net(fi->fib_net);
if (fi->fib_metrics != (u32 *) dst_default_metrics)
kfree(fi->fib_metrics);
kfree(fi);
@@ -156,13 +162,7 @@ void free_fib_info(struct fib_info *fi)
pr_warn("Freeing alive fib_info %p\n", fi);
return;
}
- change_nexthops(fi) {
- if (nexthop_nh->nh_dev)
- dev_put(nexthop_nh->nh_dev);
- nexthop_nh->nh_dev = NULL;
- } endfor_nexthops(fi);
fib_info_cnt--;
- release_net(fi->fib_net);
call_rcu(&fi->rcu, free_fib_info_rcu);
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ffcb3b01684..98b30d08efe 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3452,6 +3452,7 @@ int __init ip_rt_init(void)
0,
&rt_hash_log,
&rt_hash_mask,
+ 0,
rhash_entries ? 0 : 512 * 1024);
memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
rt_hash_lock_init();
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index bb485fcb077..3ba605f60e4 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3514,6 +3514,7 @@ void __init tcp_init(void)
0,
NULL,
&tcp_hashinfo.ehash_mask,
+ 0,
thash_entries ? 0 : 512 * 1024);
for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
@@ -3530,6 +3531,7 @@ void __init tcp_init(void)
0,
&tcp_hashinfo.bhash_size,
NULL,
+ 0,
64 * 1024);
tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index cfa2aa12834..b224eb8bce8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4555,6 +4555,11 @@ static bool tcp_try_coalesce(struct sock *sk,
if (tcp_hdr(from)->fin)
return false;
+
+ /* Its possible this segment overlaps with prior segment in queue */
+ if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
+ return false;
+
if (!skb_try_coalesce(to, from, fragstolen, &delta))
return false;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 609397ee78f..eaca73644e7 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2192,26 +2192,16 @@ void __init udp_table_init(struct udp_table *table, const char *name)
{
unsigned int i;
- if (!CONFIG_BASE_SMALL)
- table->hash = alloc_large_system_hash(name,
- 2 * sizeof(struct udp_hslot),
- uhash_entries,
- 21, /* one slot per 2 MB */
- 0,
- &table->log,
- &table->mask,
- 64 * 1024);
- /*
- * Make sure hash table has the minimum size
- */
- if (CONFIG_BASE_SMALL || table->mask < UDP_HTABLE_SIZE_MIN - 1) {
- table->hash = kmalloc(UDP_HTABLE_SIZE_MIN *
- 2 * sizeof(struct udp_hslot), GFP_KERNEL);
- if (!table->hash)
- panic(name);
- table->log = ilog2(UDP_HTABLE_SIZE_MIN);
- table->mask = UDP_HTABLE_SIZE_MIN - 1;
- }
+ table->hash = alloc_large_system_hash(name,
+ 2 * sizeof(struct udp_hslot),
+ uhash_entries,
+ 21, /* one slot per 2 MB */
+ 0,
+ &table->log,
+ &table->mask,
+ UDP_HTABLE_SIZE_MIN,
+ 64 * 1024);
+
table->hash2 = table->hash + (table->mask + 1);
for (i = 0; i <= table->mask; i++) {
INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 5b7053c5873..7cf07158805 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -421,16 +421,22 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
struct tid_ampdu_tx *tid_tx;
unsigned long timeout;
- tid_tx = rcu_dereference_protected_tid_tx(sta, *ptid);
- if (!tid_tx)
+ rcu_read_lock();
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]);
+ if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+ rcu_read_unlock();
return;
+ }
timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
if (time_is_after_jiffies(timeout)) {
mod_timer(&tid_tx->session_timer, timeout);
+ rcu_read_unlock();
return;
}
+ rcu_read_unlock();
+
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
#endif
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index ea0122dbd2b..7ed433c66d6 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -509,6 +509,7 @@ IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC);
IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC);
+IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC);
#endif
#define DEBUGFS_ADD_MODE(name, mode) \
@@ -608,6 +609,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
MESHPARAMS_ADD(rssi_threshold);
+ MESHPARAMS_ADD(ht_opmode);
#undef MESHPARAMS_ADD
}
#endif
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 3ad33a82462..33d9d0c3e3d 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -163,6 +163,11 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
sizeof(struct ieee80211_ht_operation));
pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
sband->ht_cap.cap);
+ /*
+ * Note: According to 802.11n-2009 9.13.3.1, HT Protection
+ * field and RIFS Mode are reserved in IBSS mode, therefore
+ * keep them at 0
+ */
pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap,
chan, channel_type, 0);
}
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 856237c5c1f..d4c19a7773d 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -206,8 +206,10 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE;
- else
+ else if (local->hw.queues >= IEEE80211_NUM_ACS)
sdata->vif.hw_queue[i] = i;
+ else
+ sdata->vif.hw_queue[i] = 0;
}
sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index b70f7f09da6..f5548e95325 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -596,6 +596,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE;
local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
+ local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
+ IEEE80211_RADIOTAP_MCS_HAVE_GI |
+ IEEE80211_RADIOTAP_MCS_HAVE_BW;
local->user_power_level = -1;
wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 0675a2fec6a..2913113c583 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -109,8 +109,10 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
/* Disallow HT40+/- mismatch */
if (ie->ht_operation &&
- local->_oper_channel_type > NL80211_CHAN_HT20 &&
- sta_channel_type > NL80211_CHAN_HT20 &&
+ (local->_oper_channel_type == NL80211_CHAN_HT40MINUS ||
+ local->_oper_channel_type == NL80211_CHAN_HT40PLUS) &&
+ (sta_channel_type == NL80211_CHAN_HT40MINUS ||
+ sta_channel_type == NL80211_CHAN_HT40PLUS) &&
local->_oper_channel_type != sta_channel_type)
goto mismatch;
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 27e0c2f0679..9b59658e865 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -603,7 +603,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
hopcount, ttl, cpu_to_le32(lifetime),
cpu_to_le32(metric), cpu_to_le32(preq_id),
sdata);
- ifmsh->mshstats.fwded_mcast++;
+ if (!is_multicast_ether_addr(da))
+ ifmsh->mshstats.fwded_unicast++;
+ else
+ ifmsh->mshstats.fwded_mcast++;
ifmsh->mshstats.fwded_frames++;
}
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 8cc8461b48a..60ef235c9d9 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -105,15 +105,15 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
return sta;
}
-/** mesh_set_ht_prot_mode - set correct HT protection mode
+/*
+ * mesh_set_ht_prot_mode - set correct HT protection mode
*
- * Section 9.23.3.5 of IEEE 80211s standard describes the protection rules for
- * HT mesh STA in a MBSS. Three HT protection modes are supported for now,
- * non-HT mixed mode, 20MHz-protection and no-protection mode. non-HT mixed
- * mode is selected if any non-HT peers are present in our MBSS.
- * 20MHz-protection mode is selected if all peers in our 20/40MHz MBSS support
- * HT and atleast one HT20 peer is present. Otherwise no-protection mode is
- * selected.
+ * Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT
+ * mesh STA in a MBSS. Three HT protection modes are supported for now, non-HT
+ * mixed mode, 20MHz-protection and no-protection mode. non-HT mixed mode is
+ * selected if any non-HT peers are present in our MBSS. 20MHz-protection mode
+ * is selected if all peers in our 20/40MHz MBSS support HT and atleast one
+ * HT20 peer is present. Otherwise no-protection mode is selected.
*/
static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
{
@@ -128,21 +128,22 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
- if (sdata == sta->sdata &&
- sta->plink_state == NL80211_PLINK_ESTAB) {
- switch (sta->ch_type) {
- case NL80211_CHAN_NO_HT:
- mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present",
- sdata->vif.addr, sta->sta.addr);
- non_ht_sta = true;
- goto out;
- case NL80211_CHAN_HT20:
- mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present",
- sdata->vif.addr, sta->sta.addr);
- ht20_sta = true;
- default:
- break;
- }
+ if (sdata != sta->sdata ||
+ sta->plink_state != NL80211_PLINK_ESTAB)
+ continue;
+
+ switch (sta->ch_type) {
+ case NL80211_CHAN_NO_HT:
+ mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present",
+ sdata->vif.addr, sta->sta.addr);
+ non_ht_sta = true;
+ goto out;
+ case NL80211_CHAN_HT20:
+ mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present",
+ sdata->vif.addr, sta->sta.addr);
+ ht20_sta = true;
+ default:
+ break;
}
}
out:
@@ -346,6 +347,15 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
sta = sta_info_get(sdata, addr);
if (!sta) {
+ /* Userspace handles peer allocation when security is enabled */
+ if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
+ cfg80211_notify_new_peer_candidate(sdata->dev, addr,
+ elems->ie_start,
+ elems->total_len,
+ GFP_ATOMIC);
+ return NULL;
+ }
+
sta = mesh_plink_alloc(sdata, addr);
if (!sta)
return NULL;
@@ -387,15 +397,6 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
{
struct sta_info *sta;
- /* Userspace handles peer allocation when security is enabled */
- if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
- cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr,
- elems->ie_start,
- elems->total_len,
- GFP_KERNEL);
- return;
- }
-
rcu_read_lock();
sta = mesh_peer_init(sdata, hw_addr, elems);
if (!sta)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 8257a09eeed..7bcecf73aaf 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -204,14 +204,14 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
if (status->flag & RX_FLAG_HT) {
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
- *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
- IEEE80211_RADIOTAP_MCS_HAVE_GI |
- IEEE80211_RADIOTAP_MCS_HAVE_BW;
+ *pos++ = local->hw.radiotap_mcs_details;
*pos = 0;
if (status->flag & RX_FLAG_SHORT_GI)
*pos |= IEEE80211_RADIOTAP_MCS_SGI;
if (status->flag & RX_FLAG_40MHZ)
*pos |= IEEE80211_RADIOTAP_MCS_BW_40;
+ if (status->flag & RX_FLAG_HT_GF)
+ *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
pos++;
*pos++ = status->rate_idx;
}
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 7aa31bbfaa3..c04d401dae9 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -92,6 +92,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
int keylen, int keyidx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
unsigned int hdrlen;
u8 *newhdr;
@@ -104,6 +105,13 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
hdrlen = ieee80211_hdrlen(hdr->frame_control);
newhdr = skb_push(skb, WEP_IV_LEN);
memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen);
+
+ /* the HW only needs room for the IV, but not the actual IV */
+ if (info->control.hw_key &&
+ (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
+ return newhdr + hdrlen;
+
+ skb_set_network_header(skb, skb_network_offset(skb) + WEP_IV_LEN);
ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen);
return newhdr + hdrlen;
}
@@ -313,14 +321,15 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *hw_key = info->control.hw_key;
- if (!info->control.hw_key) {
+ if (!hw_key) {
if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key,
tx->key->conf.keylen,
tx->key->conf.keyidx))
return -1;
- } else if (info->control.hw_key->flags &
- IEEE80211_KEY_FLAG_GENERATE_IV) {
+ } else if ((hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
if (!ieee80211_wep_add_iv(tx->local, skb,
tx->key->conf.keylen,
tx->key->conf.keyidx))
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 0ae23c60968..bdb53aba888 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -183,7 +183,8 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
u8 *pos;
if (info->control.hw_key &&
- !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+ !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
+ !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
/* hwaccel - with no need for software-generated IV */
return 0;
}
@@ -202,8 +203,14 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
pos = skb_push(skb, TKIP_IV_LEN);
memmove(pos, pos + TKIP_IV_LEN, hdrlen);
+ skb_set_network_header(skb, skb_network_offset(skb) + TKIP_IV_LEN);
pos += hdrlen;
+ /* the HW only needs room for the IV, but not the actual IV */
+ if (info->control.hw_key &&
+ (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
+ return 0;
+
/* Increase IV for the frame */
spin_lock_irqsave(&key->u.tkip.txlock, flags);
key->u.tkip.tx.iv16++;
@@ -422,6 +429,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
pos = skb_push(skb, CCMP_HDR_LEN);
memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
+ skb_set_network_header(skb, skb_network_offset(skb) + CCMP_HDR_LEN);
/* the HW only needs room for the IV, but not the actual IV */
if (info->control.hw_key &&
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 3192c3f589e..9f6ce011d35 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -97,7 +97,7 @@ int nfc_dev_down(struct nfc_dev *dev)
goto error;
}
- if (dev->polling || dev->activated_target_idx != NFC_TARGET_IDX_NONE) {
+ if (dev->polling || dev->active_target) {
rc = -EBUSY;
goto error;
}
@@ -183,11 +183,27 @@ error:
return rc;
}
+static struct nfc_target *nfc_find_target(struct nfc_dev *dev, u32 target_idx)
+{
+ int i;
+
+ if (dev->n_targets == 0)
+ return NULL;
+
+ for (i = 0; i < dev->n_targets ; i++) {
+ if (dev->targets[i].idx == target_idx)
+ return &dev->targets[i];
+ }
+
+ return NULL;
+}
+
int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
{
int rc = 0;
u8 *gb;
size_t gb_len;
+ struct nfc_target *target;
pr_debug("dev_name=%s comm %d\n", dev_name(&dev->dev), comm_mode);
@@ -212,9 +228,15 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
goto error;
}
- rc = dev->ops->dep_link_up(dev, target_index, comm_mode, gb, gb_len);
+ target = nfc_find_target(dev, target_index);
+ if (target == NULL) {
+ rc = -ENOTCONN;
+ goto error;
+ }
+
+ rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len);
if (!rc)
- dev->activated_target_idx = target_index;
+ dev->active_target = target;
error:
device_unlock(&dev->dev);
@@ -250,7 +272,7 @@ int nfc_dep_link_down(struct nfc_dev *dev)
rc = dev->ops->dep_link_down(dev);
if (!rc) {
dev->dep_link_up = false;
- dev->activated_target_idx = NFC_TARGET_IDX_NONE;
+ dev->active_target = NULL;
nfc_llcp_mac_is_down(dev);
nfc_genl_dep_link_down_event(dev);
}
@@ -282,6 +304,7 @@ EXPORT_SYMBOL(nfc_dep_link_is_up);
int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
{
int rc;
+ struct nfc_target *target;
pr_debug("dev_name=%s target_idx=%u protocol=%u\n",
dev_name(&dev->dev), target_idx, protocol);
@@ -293,9 +316,20 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
goto error;
}
- rc = dev->ops->activate_target(dev, target_idx, protocol);
+ if (dev->active_target) {
+ rc = -EBUSY;
+ goto error;
+ }
+
+ target = nfc_find_target(dev, target_idx);
+ if (target == NULL) {
+ rc = -ENOTCONN;
+ goto error;
+ }
+
+ rc = dev->ops->activate_target(dev, target, protocol);
if (!rc) {
- dev->activated_target_idx = target_idx;
+ dev->active_target = target;
if (dev->ops->check_presence)
mod_timer(&dev->check_pres_timer, jiffies +
@@ -327,11 +361,21 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
goto error;
}
+ if (dev->active_target == NULL) {
+ rc = -ENOTCONN;
+ goto error;
+ }
+
+ if (dev->active_target->idx != target_idx) {
+ rc = -ENOTCONN;
+ goto error;
+ }
+
if (dev->ops->check_presence)
del_timer_sync(&dev->check_pres_timer);
- dev->ops->deactivate_target(dev, target_idx);
- dev->activated_target_idx = NFC_TARGET_IDX_NONE;
+ dev->ops->deactivate_target(dev, dev->active_target);
+ dev->active_target = NULL;
error:
device_unlock(&dev->dev);
@@ -365,13 +409,13 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
goto error;
}
- if (dev->activated_target_idx == NFC_TARGET_IDX_NONE) {
+ if (dev->active_target == NULL) {
rc = -ENOTCONN;
kfree_skb(skb);
goto error;
}
- if (target_idx != dev->activated_target_idx) {
+ if (dev->active_target->idx != target_idx) {
rc = -EADDRNOTAVAIL;
kfree_skb(skb);
goto error;
@@ -380,7 +424,8 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
if (dev->ops->check_presence)
del_timer_sync(&dev->check_pres_timer);
- rc = dev->ops->data_exchange(dev, target_idx, skb, cb, cb_context);
+ rc = dev->ops->data_exchange(dev, dev->active_target, skb, cb,
+ cb_context);
if (!rc && dev->ops->check_presence)
mod_timer(&dev->check_pres_timer, jiffies +
@@ -456,6 +501,9 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb);
* The device driver must call this function when one or many nfc targets
* are found. After calling this function, the device driver must stop
* polling for targets.
+ * IMPORTANT: this function must not be called from an atomic context.
+ * In addition, it must also not be called from a context that would prevent
+ * the NFC Core to call other nfc ops entry point concurrently.
*/
int nfc_targets_found(struct nfc_dev *dev,
struct nfc_target *targets, int n_targets)
@@ -469,7 +517,7 @@ int nfc_targets_found(struct nfc_dev *dev,
for (i = 0; i < n_targets; i++)
targets[i].idx = dev->target_next_idx++;
- spin_lock_bh(&dev->targets_lock);
+ device_lock(&dev->dev);
dev->targets_generation++;
@@ -479,12 +527,12 @@ int nfc_targets_found(struct nfc_dev *dev,
if (!dev->targets) {
dev->n_targets = 0;
- spin_unlock_bh(&dev->targets_lock);
+ device_unlock(&dev->dev);
return -ENOMEM;
}
dev->n_targets = n_targets;
- spin_unlock_bh(&dev->targets_lock);
+ device_unlock(&dev->dev);
nfc_genl_targets_found(dev);
@@ -492,6 +540,18 @@ int nfc_targets_found(struct nfc_dev *dev,
}
EXPORT_SYMBOL(nfc_targets_found);
+/**
+ * nfc_target_lost - inform that an activated target went out of field
+ *
+ * @dev: The nfc device that had the activated target in field
+ * @target_idx: the nfc index of the target
+ *
+ * The device driver must call this function when the activated target
+ * goes out of the field.
+ * IMPORTANT: this function must not be called from an atomic context.
+ * In addition, it must also not be called from a context that would prevent
+ * the NFC Core to call other nfc ops entry point concurrently.
+ */
int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
{
struct nfc_target *tg;
@@ -499,7 +559,7 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
pr_debug("dev_name %s n_target %d\n", dev_name(&dev->dev), target_idx);
- spin_lock_bh(&dev->targets_lock);
+ device_lock(&dev->dev);
for (i = 0; i < dev->n_targets; i++) {
tg = &dev->targets[i];
@@ -508,13 +568,13 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
}
if (i == dev->n_targets) {
- spin_unlock_bh(&dev->targets_lock);
+ device_unlock(&dev->dev);
return -EINVAL;
}
dev->targets_generation++;
dev->n_targets--;
- dev->activated_target_idx = NFC_TARGET_IDX_NONE;
+ dev->active_target = NULL;
if (dev->n_targets) {
memcpy(&dev->targets[i], &dev->targets[i + 1],
@@ -524,7 +584,7 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
dev->targets = NULL;
}
- spin_unlock_bh(&dev->targets_lock);
+ device_unlock(&dev->dev);
nfc_genl_target_lost(dev, target_idx);
@@ -556,15 +616,16 @@ static void nfc_check_pres_work(struct work_struct *work)
device_lock(&dev->dev);
- if (dev->activated_target_idx != NFC_TARGET_IDX_NONE &&
- timer_pending(&dev->check_pres_timer) == 0) {
- rc = dev->ops->check_presence(dev, dev->activated_target_idx);
+ if (dev->active_target && timer_pending(&dev->check_pres_timer) == 0) {
+ rc = dev->ops->check_presence(dev, dev->active_target);
if (!rc) {
mod_timer(&dev->check_pres_timer, jiffies +
msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
} else {
- nfc_target_lost(dev, dev->activated_target_idx);
- dev->activated_target_idx = NFC_TARGET_IDX_NONE;
+ u32 active_target_idx = dev->active_target->idx;
+ device_unlock(&dev->dev);
+ nfc_target_lost(dev, active_target_idx);
+ return;
}
}
@@ -637,14 +698,12 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
dev->tx_headroom = tx_headroom;
dev->tx_tailroom = tx_tailroom;
- spin_lock_init(&dev->targets_lock);
nfc_genl_data_init(&dev->genl_data);
+
/* first generation must not be 0 */
dev->targets_generation = 1;
- dev->activated_target_idx = NFC_TARGET_IDX_NONE;
-
if (ops->check_presence) {
char name[32];
init_timer(&dev->check_pres_timer);
@@ -662,7 +721,6 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
}
}
-
return dev;
}
EXPORT_SYMBOL(nfc_allocate_device);
diff --git a/net/nfc/hci/Kconfig b/net/nfc/hci/Kconfig
index 17213a6362b..fd67f51d18e 100644
--- a/net/nfc/hci/Kconfig
+++ b/net/nfc/hci/Kconfig
@@ -9,6 +9,7 @@ config NFC_HCI
config NFC_SHDLC
depends on NFC_HCI
+ select CRC_CCITT
bool "SHDLC link layer for HCI based NFC drivers"
default n
---help---
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 86fd00d5a09..e1a640d2b58 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -235,13 +235,6 @@ static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
targets->hci_reader_gate = gate;
r = nfc_targets_found(hdev->ndev, targets, 1);
- if (r < 0)
- goto exit;
-
- kfree(hdev->targets);
- hdev->targets = targets;
- targets = NULL;
- hdev->target_count = 1;
exit:
kfree(targets);
@@ -258,11 +251,6 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
switch (event) {
case NFC_HCI_EVT_TARGET_DISCOVERED:
- if (hdev->poll_started == false) {
- r = -EPROTO;
- goto exit;
- }
-
if (skb->len < 1) { /* no status data? */
r = -EPROTO;
goto exit;
@@ -496,74 +484,42 @@ static int hci_dev_down(struct nfc_dev *nfc_dev)
static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols)
{
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
- int r;
if (hdev->ops->start_poll)
- r = hdev->ops->start_poll(hdev, protocols);
+ return hdev->ops->start_poll(hdev, protocols);
else
- r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
- if (r == 0)
- hdev->poll_started = true;
-
- return r;
}
static void hci_stop_poll(struct nfc_dev *nfc_dev)
{
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
- if (hdev->poll_started) {
- nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
- NFC_HCI_EVT_END_OPERATION, NULL, 0);
- hdev->poll_started = false;
- }
-}
-
-static struct nfc_target *hci_find_target(struct nfc_hci_dev *hdev,
- u32 target_idx)
-{
- int i;
- if (hdev->poll_started == false || hdev->targets == NULL)
- return NULL;
-
- for (i = 0; i < hdev->target_count; i++) {
- if (hdev->targets[i].idx == target_idx)
- return &hdev->targets[i];
- }
-
- return NULL;
+ nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
}
-static int hci_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
- u32 protocol)
+static int hci_activate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target, u32 protocol)
{
- struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
-
- if (hci_find_target(hdev, target_idx) == NULL)
- return -ENOMEDIUM;
-
return 0;
}
-static void hci_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx)
+static void hci_deactivate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target)
{
}
-static int hci_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
+static int hci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
struct sk_buff *skb, data_exchange_cb_t cb,
void *cb_context)
{
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
int r;
- struct nfc_target *target;
struct sk_buff *res_skb = NULL;
- pr_debug("target_idx=%d\n", target_idx);
-
- target = hci_find_target(hdev, target_idx);
- if (target == NULL)
- return -ENOMEDIUM;
+ pr_debug("target_idx=%d\n", target->idx);
switch (target->hci_reader_gate) {
case NFC_HCI_RF_READER_A_GATE:
@@ -605,7 +561,18 @@ static int hci_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
return 0;
}
-struct nfc_ops hci_nfc_ops = {
+static int hci_check_presence(struct nfc_dev *nfc_dev,
+ struct nfc_target *target)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (hdev->ops->check_presence)
+ return hdev->ops->check_presence(hdev, target);
+
+ return 0;
+}
+
+static struct nfc_ops hci_nfc_ops = {
.dev_up = hci_dev_up,
.dev_down = hci_dev_down,
.start_poll = hci_start_poll,
@@ -613,6 +580,7 @@ struct nfc_ops hci_nfc_ops = {
.activate_target = hci_activate_target,
.deactivate_target = hci_deactivate_target,
.data_exchange = hci_data_exchange,
+ .check_presence = hci_check_presence,
};
struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/shdlc.c
index 923bdf7c26d..5665dc6d893 100644
--- a/net/nfc/hci/shdlc.c
+++ b/net/nfc/hci/shdlc.c
@@ -816,6 +816,17 @@ static int nfc_shdlc_data_exchange(struct nfc_hci_dev *hdev,
return -EPERM;
}
+static int nfc_shdlc_check_presence(struct nfc_hci_dev *hdev,
+ struct nfc_target *target)
+{
+ struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+
+ if (shdlc->ops->check_presence)
+ return shdlc->ops->check_presence(shdlc, target);
+
+ return 0;
+}
+
static struct nfc_hci_ops shdlc_ops = {
.open = nfc_shdlc_open,
.close = nfc_shdlc_close,
@@ -825,6 +836,7 @@ static struct nfc_hci_ops shdlc_ops = {
.target_from_gate = nfc_shdlc_target_from_gate,
.complete_target_discovered = nfc_shdlc_complete_target_discovered,
.data_exchange = nfc_shdlc_data_exchange,
+ .check_presence = nfc_shdlc_check_presence,
};
struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index 11a3b7d98dc..bf8ae4f0b90 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -488,7 +488,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
memcpy(skb_put(pdu, frag_len), msg_ptr, frag_len);
- skb_queue_head(&sock->tx_queue, pdu);
+ skb_queue_tail(&sock->tx_queue, pdu);
lock_sock(sk);
@@ -502,7 +502,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
kfree(msg_data);
- return 0;
+ return len;
}
int nfc_llcp_send_rr(struct nfc_llcp_sock *sock)
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 92988aa620d..42994fac26d 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -448,6 +448,8 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
{
struct nfc_llcp_sock *sock, *llcp_sock, *n;
+ pr_debug("ssap dsap %d %d\n", ssap, dsap);
+
if (ssap == 0 && dsap == 0)
return NULL;
@@ -783,6 +785,7 @@ static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
{
struct nfc_llcp_sock *llcp_sock;
+ struct sock *sk;
u8 dsap, ssap;
dsap = nfc_llcp_dsap(skb);
@@ -801,10 +804,14 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
}
llcp_sock->dsap = ssap;
+ sk = &llcp_sock->sk;
nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
skb->len - LLCP_HEADER_SIZE);
+ sk->sk_state = LLCP_CONNECTED;
+ sk->sk_state_change(sk);
+
nfc_llcp_sock_put(llcp_sock);
}
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index c13e02ebdef..3f339b19d14 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -27,6 +27,42 @@
#include "../nfc.h"
#include "llcp.h"
+static int sock_wait_state(struct sock *sk, int state, unsigned long timeo)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int err = 0;
+
+ pr_debug("sk %p", sk);
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ while (sk->sk_state != state) {
+ if (!timeo) {
+ err = -EINPROGRESS;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ break;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ err = sock_error(sk);
+ if (err)
+ break;
+ }
+
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return err;
+}
+
static struct proto llcp_sock_proto = {
.name = "NFC_LLCP",
.owner = THIS_MODULE,
@@ -304,11 +340,24 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
mask |= POLLERR;
if (!skb_queue_empty(&sk->sk_receive_queue))
- mask |= POLLIN;
+ mask |= POLLIN | POLLRDNORM;
if (sk->sk_state == LLCP_CLOSED)
mask |= POLLHUP;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= POLLRDHUP | POLLIN | POLLRDNORM;
+
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
+ mask |= POLLHUP;
+
+ if (sock_writeable(sk))
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ else
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+
+ pr_debug("mask 0x%x\n", mask);
+
return mask;
}
@@ -462,9 +511,13 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
if (ret)
goto put_dev;
- sk->sk_state = LLCP_CONNECTED;
+ ret = sock_wait_state(sk, LLCP_CONNECTED,
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
+ if (ret)
+ goto put_dev;
release_sock(sk);
+
return 0;
put_dev:
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 8737c2089fd..d560e6f1307 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -436,16 +436,16 @@ static void nci_stop_poll(struct nfc_dev *nfc_dev)
msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
}
-static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
- __u32 protocol)
+static int nci_activate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target, __u32 protocol)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
struct nci_rf_discover_select_param param;
- struct nfc_target *target = NULL;
+ struct nfc_target *nci_target = NULL;
int i;
int rc = 0;
- pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol);
+ pr_debug("target_idx %d, protocol 0x%x\n", target->idx, protocol);
if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
(atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
@@ -459,25 +459,25 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
}
for (i = 0; i < ndev->n_targets; i++) {
- if (ndev->targets[i].idx == target_idx) {
- target = &ndev->targets[i];
+ if (ndev->targets[i].idx == target->idx) {
+ nci_target = &ndev->targets[i];
break;
}
}
- if (!target) {
+ if (!nci_target) {
pr_err("unable to find the selected target\n");
return -EINVAL;
}
- if (!(target->supported_protocols & (1 << protocol))) {
+ if (!(nci_target->supported_protocols & (1 << protocol))) {
pr_err("target does not support the requested protocol 0x%x\n",
protocol);
return -EINVAL;
}
if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
- param.rf_discovery_id = target->logical_idx;
+ param.rf_discovery_id = nci_target->logical_idx;
if (protocol == NFC_PROTO_JEWEL)
param.rf_protocol = NCI_RF_PROTOCOL_T1T;
@@ -501,11 +501,12 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
return rc;
}
-static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
+static void nci_deactivate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
- pr_debug("target_idx %d\n", target_idx);
+ pr_debug("target_idx %d\n", target->idx);
if (!ndev->target_active_prot) {
pr_err("unable to deactivate target, no active target\n");
@@ -520,14 +521,14 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
}
}
-static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
+static int nci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
struct sk_buff *skb,
data_exchange_cb_t cb, void *cb_context)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
int rc;
- pr_debug("target_idx %d, len %d\n", target_idx, skb->len);
+ pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
if (!ndev->target_active_prot) {
pr_err("unable to exchange data, no active target\n");
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index a0bc326308a..76c48c5324f 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -49,7 +49,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
if (cb) {
ndev->data_exchange_cb = NULL;
- ndev->data_exchange_cb_context = 0;
+ ndev->data_exchange_cb_context = NULL;
/* forward skb to nfc core */
cb(cb_context, skb, err);
@@ -200,10 +200,10 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
pr_err("error adding room for accumulated rx data\n");
kfree_skb(skb);
- skb = 0;
+ skb = NULL;
kfree_skb(ndev->rx_data_reassembly);
- ndev->rx_data_reassembly = 0;
+ ndev->rx_data_reassembly = NULL;
err = -ENOMEM;
goto exit;
@@ -216,7 +216,7 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
/* third, free old reassembly */
kfree_skb(ndev->rx_data_reassembly);
- ndev->rx_data_reassembly = 0;
+ ndev->rx_data_reassembly = NULL;
}
if (pbf == NCI_PBF_CONT) {
diff --git a/net/nfc/nci/lib.c b/net/nfc/nci/lib.c
index 6a63e5eb483..6b7fd26c68d 100644
--- a/net/nfc/nci/lib.c
+++ b/net/nfc/nci/lib.c
@@ -31,6 +31,7 @@
#include <linux/errno.h>
#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
/* NCI status codes to Unix errno mapping */
int nci_to_errno(__u8 code)
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 99e1632e6aa..cb2646179e5 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -497,7 +497,7 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
/* drop partial rx data packet */
if (ndev->rx_data_reassembly) {
kfree_skb(ndev->rx_data_reassembly);
- ndev->rx_data_reassembly = 0;
+ ndev->rx_data_reassembly = NULL;
}
/* complete the data exchange transaction, if exists */
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index f1829f6ae9c..581d419083a 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -33,7 +33,7 @@ static struct genl_multicast_group nfc_genl_event_mcgrp = {
.name = NFC_GENL_MCAST_EVENT_NAME,
};
-struct genl_family nfc_genl_family = {
+static struct genl_family nfc_genl_family = {
.id = GENL_ID_GENERATE,
.hdrsize = 0,
.name = NFC_GENL_NAME,
@@ -128,7 +128,7 @@ static int nfc_genl_dump_targets(struct sk_buff *skb,
cb->args[1] = (long) dev;
}
- spin_lock_bh(&dev->targets_lock);
+ device_lock(&dev->dev);
cb->seq = dev->targets_generation;
@@ -141,7 +141,7 @@ static int nfc_genl_dump_targets(struct sk_buff *skb,
i++;
}
- spin_unlock_bh(&dev->targets_lock);
+ device_unlock(&dev->dev);
cb->args[0] = i;
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 7d589a81942..3dd4232ae66 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -84,7 +84,7 @@ static inline int nfc_llcp_set_remote_gb(struct nfc_dev *dev,
return 0;
}
-static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, u8 *gb_len)
+static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *gb_len)
{
*gb_len = 0;
return NULL;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 2fcfe0993ca..884801ac4dd 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -45,7 +45,7 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
return chan;
}
-int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
+bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 39f2538a46f..a87d4355297 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -664,7 +664,7 @@ void wiphy_unregister(struct wiphy *wiphy)
mutex_lock(&rdev->devlist_mtx);
__count = rdev->opencount;
mutex_unlock(&rdev->devlist_mtx);
- __count == 0;}));
+ __count == 0; }));
mutex_lock(&rdev->devlist_mtx);
BUG_ON(!list_empty(&rdev->netdev_list));
@@ -776,7 +776,7 @@ static struct device_type wiphy_type = {
.name = "wlan",
};
-static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
+static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
unsigned long state,
void *ndev)
{
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 3ac2dd00d71..8523f387867 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -445,8 +445,6 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev, int freq,
enum nl80211_channel_type channel_type);
-u16 cfg80211_calculate_bitrate(struct rate_info *rate);
-
int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
const u8 *rates, unsigned int n_rates,
u32 *mask);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index b67b1114e25..206465dc0ca 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1179,6 +1179,27 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
wdev->iftype == NL80211_IFTYPE_P2P_GO;
}
+static bool nl80211_valid_channel_type(struct genl_info *info,
+ enum nl80211_channel_type *channel_type)
+{
+ enum nl80211_channel_type tmp;
+
+ if (!info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE])
+ return false;
+
+ tmp = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
+ if (tmp != NL80211_CHAN_NO_HT &&
+ tmp != NL80211_CHAN_HT20 &&
+ tmp != NL80211_CHAN_HT40PLUS &&
+ tmp != NL80211_CHAN_HT40MINUS)
+ return false;
+
+ if (channel_type)
+ *channel_type = tmp;
+
+ return true;
+}
+
static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev,
struct genl_info *info)
@@ -1193,15 +1214,9 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
if (!nl80211_can_set_dev_channel(wdev))
return -EOPNOTSUPP;
- if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
- channel_type = nla_get_u32(info->attrs[
- NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
- if (channel_type != NL80211_CHAN_NO_HT &&
- channel_type != NL80211_CHAN_HT20 &&
- channel_type != NL80211_CHAN_HT40PLUS &&
- channel_type != NL80211_CHAN_HT40MINUS)
- return -EINVAL;
- }
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
+ !nl80211_valid_channel_type(info, &channel_type))
+ return -EINVAL;
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
@@ -2410,10 +2425,16 @@ static int parse_station_flags(struct genl_info *info,
return -EINVAL;
}
- for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++)
- if (flags[flag])
+ for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++) {
+ if (flags[flag]) {
params->sta_flags_set |= (1<<flag);
+ /* no longer support new API additions in old API */
+ if (flag > NL80211_STA_FLAG_MAX_OLD_API)
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -4912,12 +4933,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
enum nl80211_channel_type channel_type;
- channel_type = nla_get_u32(
- info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
- if (channel_type != NL80211_CHAN_NO_HT &&
- channel_type != NL80211_CHAN_HT20 &&
- channel_type != NL80211_CHAN_HT40MINUS &&
- channel_type != NL80211_CHAN_HT40PLUS)
+ if (!nl80211_valid_channel_type(info, &channel_type))
return -EINVAL;
if (channel_type != NL80211_CHAN_NO_HT &&
@@ -5485,15 +5501,9 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
!(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
return -EOPNOTSUPP;
- if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
- channel_type = nla_get_u32(
- info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
- if (channel_type != NL80211_CHAN_NO_HT &&
- channel_type != NL80211_CHAN_HT20 &&
- channel_type != NL80211_CHAN_HT40PLUS &&
- channel_type != NL80211_CHAN_HT40MINUS)
- return -EINVAL;
- }
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
+ !nl80211_valid_channel_type(info, &channel_type))
+ return -EINVAL;
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
chan = rdev_freq_to_chan(rdev, freq, channel_type);
@@ -5764,12 +5774,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
}
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
- channel_type = nla_get_u32(
- info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
- if (channel_type != NL80211_CHAN_NO_HT &&
- channel_type != NL80211_CHAN_HT20 &&
- channel_type != NL80211_CHAN_HT40PLUS &&
- channel_type != NL80211_CHAN_HT40MINUS)
+ if (!nl80211_valid_channel_type(info, &channel_type))
return -EINVAL;
channel_type_valid = true;
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 1cd255892a4..55d99466bab 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -879,7 +879,7 @@ u16 cfg80211_calculate_bitrate(struct rate_info *rate)
return rate->legacy;
/* the formula below does only work for MCS values smaller than 32 */
- if (rate->mcs >= 32)
+ if (WARN_ON_ONCE(rate->mcs >= 32))
return 0;
modulation = rate->mcs & 7;