summaryrefslogtreecommitdiffstats
path: root/net/bluetooth
diff options
context:
space:
mode:
Diffstat (limited to 'net/bluetooth')
-rw-r--r--net/bluetooth/bnep/core.c13
-rw-r--r--net/bluetooth/cmtp/core.c13
-rw-r--r--net/bluetooth/hci_conn.c182
-rw-r--r--net/bluetooth/hci_core.c561
-rw-r--r--net/bluetooth/hci_event.c444
-rw-r--r--net/bluetooth/hci_sock.c21
-rw-r--r--net/bluetooth/hci_sysfs.c131
-rw-r--r--net/bluetooth/hidp/core.c157
-rw-r--r--net/bluetooth/l2cap_core.c1378
-rw-r--r--net/bluetooth/l2cap_sock.c168
-rw-r--r--net/bluetooth/mgmt.c1724
-rw-r--r--net/bluetooth/rfcomm/core.c65
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c45
-rw-r--r--net/bluetooth/sco.c4
-rw-r--r--net/bluetooth/smp.c35
16 files changed, 3159 insertions, 1784 deletions
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 1eea8208b2c..42d53b85a80 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -65,15 +65,13 @@ static DECLARE_RWSEM(bnep_session_sem);
static struct bnep_session *__bnep_get_session(u8 *dst)
{
struct bnep_session *s;
- struct list_head *p;
BT_DBG("");
- list_for_each(p, &bnep_session_list) {
- s = list_entry(p, struct bnep_session, list);
+ list_for_each_entry(s, &bnep_session_list, list)
if (!compare_ether_addr(dst, s->eh.h_source))
return s;
- }
+
return NULL;
}
@@ -665,17 +663,14 @@ static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s)
int bnep_get_connlist(struct bnep_connlist_req *req)
{
- struct list_head *p;
+ struct bnep_session *s;
int err = 0, n = 0;
down_read(&bnep_session_sem);
- list_for_each(p, &bnep_session_list) {
- struct bnep_session *s;
+ list_for_each_entry(s, &bnep_session_list, list) {
struct bnep_conninfo ci;
- s = list_entry(p, struct bnep_session, list);
-
__bnep_copy_ci(&ci, s);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 5a6e634f7fc..6c9c1fd601c 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -53,15 +53,13 @@ static LIST_HEAD(cmtp_session_list);
static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr)
{
struct cmtp_session *session;
- struct list_head *p;
BT_DBG("");
- list_for_each(p, &cmtp_session_list) {
- session = list_entry(p, struct cmtp_session, list);
+ list_for_each_entry(session, &cmtp_session_list, list)
if (!bacmp(bdaddr, &session->bdaddr))
return session;
- }
+
return NULL;
}
@@ -432,19 +430,16 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
int cmtp_get_connlist(struct cmtp_connlist_req *req)
{
- struct list_head *p;
+ struct cmtp_session *session;
int err = 0, n = 0;
BT_DBG("");
down_read(&cmtp_session_sem);
- list_for_each(p, &cmtp_session_list) {
- struct cmtp_session *session;
+ list_for_each_entry(session, &cmtp_session_list, list) {
struct cmtp_conninfo ci;
- session = list_entry(p, struct cmtp_session, list);
-
__cmtp_copy_session(session, &ci);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index c1c597e3e19..401d8ea266a 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -123,7 +123,7 @@ static void hci_acl_connect_cancel(struct hci_conn *conn)
BT_DBG("%p", conn);
- if (conn->hdev->hci_ver < 2)
+ if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
return;
bacpy(&cp.bdaddr, &conn->dst);
@@ -275,9 +275,10 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
}
}
-static void hci_conn_timeout(unsigned long arg)
+static void hci_conn_timeout(struct work_struct *work)
{
- struct hci_conn *conn = (void *) arg;
+ struct hci_conn *conn = container_of(work, struct hci_conn,
+ disc_work.work);
struct hci_dev *hdev = conn->hdev;
__u8 reason;
@@ -311,6 +312,42 @@ static void hci_conn_timeout(unsigned long arg)
hci_dev_unlock(hdev);
}
+/* Enter sniff mode */
+static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("conn %p mode %d", conn, conn->mode);
+
+ if (test_bit(HCI_RAW, &hdev->flags))
+ return;
+
+ if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
+ return;
+
+ if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
+ return;
+
+ if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
+ struct hci_cp_sniff_subrate cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.max_latency = cpu_to_le16(0);
+ cp.min_remote_timeout = cpu_to_le16(0);
+ cp.min_local_timeout = cpu_to_le16(0);
+ hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
+ }
+
+ if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
+ struct hci_cp_sniff_mode cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
+ cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
+ cp.attempt = cpu_to_le16(4);
+ cp.timeout = cpu_to_le16(1);
+ hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
+ }
+}
+
static void hci_conn_idle(unsigned long arg)
{
struct hci_conn *conn = (void *) arg;
@@ -325,12 +362,8 @@ static void hci_conn_auto_accept(unsigned long arg)
struct hci_conn *conn = (void *) arg;
struct hci_dev *hdev = conn->hdev;
- hci_dev_lock(hdev);
-
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
&conn->dst);
-
- hci_dev_unlock(hdev);
}
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
@@ -374,7 +407,9 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
skb_queue_head_init(&conn->data_q);
- setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
+ INIT_LIST_HEAD(&conn->chan_list);;
+
+ INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
(unsigned long) conn);
@@ -383,8 +418,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
hci_dev_hold(hdev);
- tasklet_disable(&hdev->tx_task);
-
hci_conn_hash_add(hdev, conn);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
@@ -393,8 +426,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
hci_conn_init_sysfs(conn);
- tasklet_enable(&hdev->tx_task);
-
return conn;
}
@@ -406,7 +437,7 @@ int hci_conn_del(struct hci_conn *conn)
del_timer(&conn->idle_timer);
- del_timer(&conn->disc_timer);
+ cancel_delayed_work_sync(&conn->disc_work);
del_timer(&conn->auto_accept_timer);
@@ -430,14 +461,13 @@ int hci_conn_del(struct hci_conn *conn)
}
}
- tasklet_disable(&hdev->tx_task);
+
+ hci_chan_list_flush(conn);
hci_conn_hash_del(hdev, conn);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
- tasklet_enable(&hdev->tx_task);
-
skb_queue_purge(&conn->data_q);
hci_conn_put_device(conn);
@@ -453,16 +483,13 @@ int hci_conn_del(struct hci_conn *conn)
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
{
int use_src = bacmp(src, BDADDR_ANY);
- struct hci_dev *hdev = NULL;
- struct list_head *p;
+ struct hci_dev *hdev = NULL, *d;
BT_DBG("%s -> %s", batostr(src), batostr(dst));
read_lock_bh(&hci_dev_list_lock);
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
-
+ list_for_each_entry(d, &hci_dev_list, list) {
if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
continue;
@@ -766,60 +793,18 @@ timer:
jiffies + msecs_to_jiffies(hdev->idle_timeout));
}
-/* Enter sniff mode */
-void hci_conn_enter_sniff_mode(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
-
- BT_DBG("conn %p mode %d", conn, conn->mode);
-
- if (test_bit(HCI_RAW, &hdev->flags))
- return;
-
- if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
- return;
-
- if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
- return;
-
- if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
- struct hci_cp_sniff_subrate cp;
- cp.handle = cpu_to_le16(conn->handle);
- cp.max_latency = cpu_to_le16(0);
- cp.min_remote_timeout = cpu_to_le16(0);
- cp.min_local_timeout = cpu_to_le16(0);
- hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
- }
-
- if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
- struct hci_cp_sniff_mode cp;
- cp.handle = cpu_to_le16(conn->handle);
- cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
- cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
- cp.attempt = cpu_to_le16(4);
- cp.timeout = cpu_to_le16(1);
- hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
- }
-}
-
/* Drop all connection on the device */
void hci_conn_hash_flush(struct hci_dev *hdev)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
+ struct hci_conn *c;
BT_DBG("hdev %s", hdev->name);
- p = h->list.next;
- while (p != &h->list) {
- struct hci_conn *c;
-
- c = list_entry(p, struct hci_conn, list);
- p = p->next;
-
+ list_for_each_entry_rcu(c, &h->list, list) {
c->state = BT_CLOSED;
- hci_proto_disconn_cfm(c, 0x16);
+ hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
hci_conn_del(c);
}
}
@@ -855,10 +840,10 @@ EXPORT_SYMBOL(hci_conn_put_device);
int hci_get_conn_list(void __user *arg)
{
+ register struct hci_conn *c;
struct hci_conn_list_req req, *cl;
struct hci_conn_info *ci;
struct hci_dev *hdev;
- struct list_head *p;
int n = 0, size, err;
if (copy_from_user(&req, arg, sizeof(req)))
@@ -881,11 +866,8 @@ int hci_get_conn_list(void __user *arg)
ci = cl->conn_info;
- hci_dev_lock_bh(hdev);
- list_for_each(p, &hdev->conn_hash.list) {
- register struct hci_conn *c;
- c = list_entry(p, struct hci_conn, list);
-
+ hci_dev_lock(hdev);
+ list_for_each_entry(c, &hdev->conn_hash.list, list) {
bacpy(&(ci + n)->bdaddr, &c->dst);
(ci + n)->handle = c->handle;
(ci + n)->type = c->type;
@@ -895,7 +877,7 @@ int hci_get_conn_list(void __user *arg)
if (++n >= req.conn_num)
break;
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
cl->dev_id = hdev->id;
cl->conn_num = n;
@@ -919,7 +901,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
if (conn) {
bacpy(&ci.bdaddr, &conn->dst);
@@ -929,7 +911,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
ci.state = conn->state;
ci.link_mode = conn->link_mode;
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
if (!conn)
return -ENOENT;
@@ -945,14 +927,60 @@ int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
if (conn)
req.type = conn->auth_type;
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
if (!conn)
return -ENOENT;
return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
}
+
+struct hci_chan *hci_chan_create(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ struct hci_chan *chan;
+
+ BT_DBG("%s conn %p", hdev->name, conn);
+
+ chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC);
+ if (!chan)
+ return NULL;
+
+ chan->conn = conn;
+ skb_queue_head_init(&chan->data_q);
+
+ list_add_rcu(&chan->list, &conn->chan_list);
+
+ return chan;
+}
+
+int hci_chan_del(struct hci_chan *chan)
+{
+ struct hci_conn *conn = chan->conn;
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
+
+ list_del_rcu(&chan->list);
+
+ synchronize_rcu();
+
+ skb_queue_purge(&chan->data_q);
+ kfree(chan);
+
+ return 0;
+}
+
+void hci_chan_list_flush(struct hci_conn *conn)
+{
+ struct hci_chan *chan;
+
+ BT_DBG("conn %p", conn);
+
+ list_for_each_entry_rcu(chan, &conn->chan_list, list)
+ hci_chan_del(chan);
+}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index be84ae33ae3..d6382dbb7b7 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1,6 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (C) 2011 ProFUSION Embedded Systems
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -54,11 +55,13 @@
#define AUTO_OFF_TIMEOUT 2000
-static void hci_cmd_task(unsigned long arg);
-static void hci_rx_task(unsigned long arg);
-static void hci_tx_task(unsigned long arg);
+int enable_hs;
-static DEFINE_RWLOCK(hci_task_lock);
+static void hci_rx_work(struct work_struct *work);
+static void hci_cmd_work(struct work_struct *work);
+static void hci_tx_work(struct work_struct *work);
+
+static DEFINE_MUTEX(hci_task_lock);
/* HCI device list */
LIST_HEAD(hci_dev_list);
@@ -207,7 +210,7 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
skb->dev = (void *) hdev;
skb_queue_tail(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
skb_queue_purge(&hdev->driver_init);
@@ -228,18 +231,6 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
-#if 0
- /* Host buffer size */
- {
- struct hci_cp_host_buffer_size cp;
- cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
- cp.sco_mtu = HCI_MAX_SCO_SIZE;
- cp.acl_max_pkt = cpu_to_le16(0xffff);
- cp.sco_max_pkt = cpu_to_le16(0xffff);
- hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
- }
-#endif
-
/* Read BD Address */
hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
@@ -319,8 +310,7 @@ static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
* Device is held on return. */
struct hci_dev *hci_dev_get(int index)
{
- struct hci_dev *hdev = NULL;
- struct list_head *p;
+ struct hci_dev *hdev = NULL, *d;
BT_DBG("%d", index);
@@ -328,8 +318,7 @@ struct hci_dev *hci_dev_get(int index)
return NULL;
read_lock(&hci_dev_list_lock);
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
+ list_for_each_entry(d, &hci_dev_list, list) {
if (d->id == index) {
hdev = hci_dev_hold(d);
break;
@@ -445,14 +434,14 @@ int hci_inquiry(void __user *arg)
if (!hdev)
return -ENODEV;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
inquiry_cache_empty(hdev) ||
ir.flags & IREQ_CACHE_FLUSH) {
inquiry_cache_flush(hdev);
do_inquiry = 1;
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
timeo = ir.length * msecs_to_jiffies(2000);
@@ -474,9 +463,9 @@ int hci_inquiry(void __user *arg)
goto done;
}
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
BT_DBG("num_rsp %d", ir.num_rsp);
@@ -523,8 +512,9 @@ int hci_dev_open(__u16 dev)
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
set_bit(HCI_RAW, &hdev->flags);
- /* Treat all non BR/EDR controllers as raw devices for now */
- if (hdev->dev_type != HCI_BREDR)
+ /* Treat all non BR/EDR controllers as raw devices if
+ enable_hs is not set */
+ if (hdev->dev_type != HCI_BREDR && !enable_hs)
set_bit(HCI_RAW, &hdev->flags);
if (hdev->open(hdev)) {
@@ -551,13 +541,16 @@ int hci_dev_open(__u16 dev)
hci_dev_hold(hdev);
set_bit(HCI_UP, &hdev->flags);
hci_notify(hdev, HCI_DEV_UP);
- if (!test_bit(HCI_SETUP, &hdev->flags))
- mgmt_powered(hdev->id, 1);
+ if (!test_bit(HCI_SETUP, &hdev->flags)) {
+ hci_dev_lock(hdev);
+ mgmt_powered(hdev, 1);
+ hci_dev_unlock(hdev);
+ }
} else {
/* Init failed, cleanup */
- tasklet_kill(&hdev->rx_task);
- tasklet_kill(&hdev->tx_task);
- tasklet_kill(&hdev->cmd_task);
+ flush_work(&hdev->tx_work);
+ flush_work(&hdev->cmd_work);
+ flush_work(&hdev->rx_work);
skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->rx_q);
@@ -593,14 +586,25 @@ static int hci_dev_do_close(struct hci_dev *hdev)
return 0;
}
- /* Kill RX and TX tasks */
- tasklet_kill(&hdev->rx_task);
- tasklet_kill(&hdev->tx_task);
+ /* Flush RX and TX works */
+ flush_work(&hdev->tx_work);
+ flush_work(&hdev->rx_work);
+
+ if (hdev->discov_timeout > 0) {
+ cancel_delayed_work(&hdev->discov_off);
+ hdev->discov_timeout = 0;
+ }
+
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+ cancel_delayed_work(&hdev->power_off);
- hci_dev_lock_bh(hdev);
+ if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ cancel_delayed_work(&hdev->service_cache);
+
+ hci_dev_lock(hdev);
inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_notify(hdev, HCI_DEV_DOWN);
@@ -617,8 +621,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
clear_bit(HCI_INIT, &hdev->flags);
}
- /* Kill cmd task */
- tasklet_kill(&hdev->cmd_task);
+ /* flush cmd work */
+ flush_work(&hdev->cmd_work);
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
@@ -636,7 +640,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
* and no tasks are scheduled. */
hdev->close(hdev);
- mgmt_powered(hdev->id, 0);
+ hci_dev_lock(hdev);
+ mgmt_powered(hdev, 0);
+ hci_dev_unlock(hdev);
/* Clear flags */
hdev->flags = 0;
@@ -670,7 +676,6 @@ int hci_dev_reset(__u16 dev)
return -ENODEV;
hci_req_lock(hdev);
- tasklet_disable(&hdev->tx_task);
if (!test_bit(HCI_UP, &hdev->flags))
goto done;
@@ -679,10 +684,10 @@ int hci_dev_reset(__u16 dev)
skb_queue_purge(&hdev->rx_q);
skb_queue_purge(&hdev->cmd_q);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
if (hdev->flush)
hdev->flush(hdev);
@@ -695,7 +700,6 @@ int hci_dev_reset(__u16 dev)
msecs_to_jiffies(HCI_INIT_TIMEOUT));
done:
- tasklet_enable(&hdev->tx_task);
hci_req_unlock(hdev);
hci_dev_put(hdev);
return ret;
@@ -794,9 +798,9 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
int hci_get_dev_list(void __user *arg)
{
+ struct hci_dev *hdev;
struct hci_dev_list_req *dl;
struct hci_dev_req *dr;
- struct list_head *p;
int n = 0, size, err;
__u16 dev_num;
@@ -815,12 +819,9 @@ int hci_get_dev_list(void __user *arg)
dr = dl->dev_req;
read_lock_bh(&hci_dev_list_lock);
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *hdev;
-
- hdev = list_entry(p, struct hci_dev, list);
-
- hci_del_off_timer(hdev);
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+ cancel_delayed_work(&hdev->power_off);
if (!test_bit(HCI_MGMT, &hdev->flags))
set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -855,7 +856,8 @@ int hci_get_dev_info(void __user *arg)
if (!hdev)
return -ENODEV;
- hci_del_off_timer(hdev);
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+ cancel_delayed_work_sync(&hdev->power_off);
if (!test_bit(HCI_MGMT, &hdev->flags))
set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -912,6 +914,7 @@ struct hci_dev *hci_alloc_dev(void)
if (!hdev)
return NULL;
+ hci_init_sysfs(hdev);
skb_queue_head_init(&hdev->driver_init);
return hdev;
@@ -938,39 +941,41 @@ static void hci_power_on(struct work_struct *work)
return;
if (test_bit(HCI_AUTO_OFF, &hdev->flags))
- mod_timer(&hdev->off_timer,
- jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
+ schedule_delayed_work(&hdev->power_off,
+ msecs_to_jiffies(AUTO_OFF_TIMEOUT));
if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
- mgmt_index_added(hdev->id);
+ mgmt_index_added(hdev);
}
static void hci_power_off(struct work_struct *work)
{
- struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ power_off.work);
BT_DBG("%s", hdev->name);
+ clear_bit(HCI_AUTO_OFF, &hdev->flags);
+
hci_dev_close(hdev->id);
}
-static void hci_auto_off(unsigned long data)
+static void hci_discov_off(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) data;
+ struct hci_dev *hdev;
+ u8 scan = SCAN_PAGE;
+
+ hdev = container_of(work, struct hci_dev, discov_off.work);
BT_DBG("%s", hdev->name);
- clear_bit(HCI_AUTO_OFF, &hdev->flags);
+ hci_dev_lock(hdev);
- queue_work(hdev->workqueue, &hdev->power_off);
-}
+ hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
-void hci_del_off_timer(struct hci_dev *hdev)
-{
- BT_DBG("%s", hdev->name);
+ hdev->discov_timeout = 0;
- clear_bit(HCI_AUTO_OFF, &hdev->flags);
- del_timer(&hdev->off_timer);
+ hci_dev_unlock(hdev);
}
int hci_uuids_clear(struct hci_dev *hdev)
@@ -1007,16 +1012,11 @@ int hci_link_keys_clear(struct hci_dev *hdev)
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
- struct list_head *p;
-
- list_for_each(p, &hdev->link_keys) {
- struct link_key *k;
-
- k = list_entry(p, struct link_key, list);
+ struct link_key *k;
+ list_for_each_entry(k, &hdev->link_keys, list)
if (bacmp(bdaddr, &k->bdaddr) == 0)
return k;
- }
return NULL;
}
@@ -1138,7 +1138,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
persistent = hci_persistent_key(hdev, conn, type, old_key_type);
- mgmt_new_key(hdev->id, key, persistent);
+ mgmt_new_link_key(hdev, key, persistent);
if (!persistent) {
list_del(&key->list);
@@ -1181,7 +1181,7 @@ int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
memcpy(id->rand, rand, sizeof(id->rand));
if (new_key)
- mgmt_new_key(hdev->id, key, old_key_type);
+ mgmt_new_link_key(hdev, key, old_key_type);
return 0;
}
@@ -1209,7 +1209,7 @@ static void hci_cmd_timer(unsigned long arg)
BT_ERR("%s command tx timeout", hdev->name);
atomic_set(&hdev->cmd_cnt, 1);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
@@ -1279,16 +1279,11 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
bdaddr_t *bdaddr)
{
- struct list_head *p;
-
- list_for_each(p, &hdev->blacklist) {
- struct bdaddr_list *b;
-
- b = list_entry(p, struct bdaddr_list, list);
+ struct bdaddr_list *b;
+ list_for_each_entry(b, &hdev->blacklist, list)
if (bacmp(bdaddr, &b->bdaddr) == 0)
return b;
- }
return NULL;
}
@@ -1327,31 +1322,30 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
list_add(&entry->list, &hdev->blacklist);
- return mgmt_device_blocked(hdev->id, bdaddr);
+ return mgmt_device_blocked(hdev, bdaddr);
}
int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct bdaddr_list *entry;
- if (bacmp(bdaddr, BDADDR_ANY) == 0) {
+ if (bacmp(bdaddr, BDADDR_ANY) == 0)
return hci_blacklist_clear(hdev);
- }
entry = hci_blacklist_lookup(hdev, bdaddr);
- if (!entry) {
+ if (!entry)
return -ENOENT;
- }
list_del(&entry->list);
kfree(entry);
- return mgmt_device_unblocked(hdev->id, bdaddr);
+ return mgmt_device_unblocked(hdev, bdaddr);
}
-static void hci_clear_adv_cache(unsigned long arg)
+static void hci_clear_adv_cache(struct work_struct *work)
{
- struct hci_dev *hdev = (void *) arg;
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ adv_work.work);
hci_dev_lock(hdev);
@@ -1425,7 +1419,7 @@ int hci_add_adv_entry(struct hci_dev *hdev,
int hci_register_dev(struct hci_dev *hdev)
{
struct list_head *head = &hci_dev_list, *p;
- int i, id = 0;
+ int i, id, error;
BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
hdev->bus, hdev->owner);
@@ -1433,6 +1427,11 @@ int hci_register_dev(struct hci_dev *hdev)
if (!hdev->open || !hdev->close || !hdev->destruct)
return -EINVAL;
+ /* Do not allow HCI_AMP devices to register at index 0,
+ * so the index can be used as the AMP controller ID.
+ */
+ id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
+
write_lock_bh(&hci_dev_list_lock);
/* Find first available device id */
@@ -1444,12 +1443,13 @@ int hci_register_dev(struct hci_dev *hdev)
sprintf(hdev->name, "hci%d", id);
hdev->id = id;
- list_add(&hdev->list, head);
+ list_add_tail(&hdev->list, head);
atomic_set(&hdev->refcnt, 1);
- spin_lock_init(&hdev->lock);
+ mutex_init(&hdev->lock);
hdev->flags = 0;
+ hdev->dev_flags = 0;
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
@@ -1459,9 +1459,10 @@ int hci_register_dev(struct hci_dev *hdev)
hdev->sniff_max_interval = 800;
hdev->sniff_min_interval = 80;
- tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
- tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
- tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
+ INIT_WORK(&hdev->rx_work, hci_rx_work);
+ INIT_WORK(&hdev->cmd_work, hci_cmd_work);
+ INIT_WORK(&hdev->tx_work, hci_tx_work);
+
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
@@ -1479,6 +1480,8 @@ int hci_register_dev(struct hci_dev *hdev)
hci_conn_hash_init(hdev);
+ INIT_LIST_HEAD(&hdev->mgmt_pending);
+
INIT_LIST_HEAD(&hdev->blacklist);
INIT_LIST_HEAD(&hdev->uuids);
@@ -1488,12 +1491,12 @@ int hci_register_dev(struct hci_dev *hdev)
INIT_LIST_HEAD(&hdev->remote_oob_data);
INIT_LIST_HEAD(&hdev->adv_entries);
- setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
- (unsigned long) hdev);
+ INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
INIT_WORK(&hdev->power_on, hci_power_on);
- INIT_WORK(&hdev->power_off, hci_power_off);
- setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
+ INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+
+ INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
@@ -1501,11 +1504,16 @@ int hci_register_dev(struct hci_dev *hdev)
write_unlock_bh(&hci_dev_list_lock);
- hdev->workqueue = create_singlethread_workqueue(hdev->name);
- if (!hdev->workqueue)
- goto nomem;
+ hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!hdev->workqueue) {
+ error = -ENOMEM;
+ goto err;
+ }
- hci_register_sysfs(hdev);
+ error = hci_add_sysfs(hdev);
+ if (error < 0)
+ goto err_wqueue;
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
@@ -1518,23 +1526,25 @@ int hci_register_dev(struct hci_dev *hdev)
set_bit(HCI_AUTO_OFF, &hdev->flags);
set_bit(HCI_SETUP, &hdev->flags);
- queue_work(hdev->workqueue, &hdev->power_on);
+ schedule_work(&hdev->power_on);
hci_notify(hdev, HCI_DEV_REG);
return id;
-nomem:
+err_wqueue:
+ destroy_workqueue(hdev->workqueue);
+err:
write_lock_bh(&hci_dev_list_lock);
list_del(&hdev->list);
write_unlock_bh(&hci_dev_list_lock);
- return -ENOMEM;
+ return error;
}
EXPORT_SYMBOL(hci_register_dev);
/* Unregister HCI device */
-int hci_unregister_dev(struct hci_dev *hdev)
+void hci_unregister_dev(struct hci_dev *hdev)
{
int i;
@@ -1550,8 +1560,15 @@ int hci_unregister_dev(struct hci_dev *hdev)
kfree_skb(hdev->reassembly[i]);
if (!test_bit(HCI_INIT, &hdev->flags) &&
- !test_bit(HCI_SETUP, &hdev->flags))
- mgmt_index_removed(hdev->id);
+ !test_bit(HCI_SETUP, &hdev->flags)) {
+ hci_dev_lock(hdev);
+ mgmt_index_removed(hdev);
+ hci_dev_unlock(hdev);
+ }
+
+ /* mgmt_index_removed should take care of emptying the
+ * pending list */
+ BUG_ON(!list_empty(&hdev->mgmt_pending));
hci_notify(hdev, HCI_DEV_UNREG);
@@ -1560,24 +1577,21 @@ int hci_unregister_dev(struct hci_dev *hdev)
rfkill_destroy(hdev->rfkill);
}
- hci_unregister_sysfs(hdev);
+ hci_del_sysfs(hdev);
- hci_del_off_timer(hdev);
- del_timer(&hdev->adv_timer);
+ cancel_delayed_work_sync(&hdev->adv_work);
destroy_workqueue(hdev->workqueue);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hci_blacklist_clear(hdev);
hci_uuids_clear(hdev);
hci_link_keys_clear(hdev);
hci_remote_oob_data_clear(hdev);
hci_adv_entries_clear(hdev);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
__hci_dev_put(hdev);
-
- return 0;
}
EXPORT_SYMBOL(hci_unregister_dev);
@@ -1613,9 +1627,8 @@ int hci_recv_frame(struct sk_buff *skb)
/* Time stamp */
__net_timestamp(skb);
- /* Queue frame for rx task */
skb_queue_tail(&hdev->rx_q, skb);
- tasklet_schedule(&hdev->rx_task);
+ queue_work(hdev->workqueue, &hdev->rx_work);
return 0;
}
@@ -1798,14 +1811,14 @@ int hci_register_proto(struct hci_proto *hp)
if (hp->id >= HCI_MAX_PROTO)
return -EINVAL;
- write_lock_bh(&hci_task_lock);
+ mutex_lock(&hci_task_lock);
if (!hci_proto[hp->id])
hci_proto[hp->id] = hp;
else
err = -EEXIST;
- write_unlock_bh(&hci_task_lock);
+ mutex_unlock(&hci_task_lock);
return err;
}
@@ -1820,14 +1833,14 @@ int hci_unregister_proto(struct hci_proto *hp)
if (hp->id >= HCI_MAX_PROTO)
return -EINVAL;
- write_lock_bh(&hci_task_lock);
+ mutex_lock(&hci_task_lock);
if (hci_proto[hp->id])
hci_proto[hp->id] = NULL;
else
err = -ENOENT;
- write_unlock_bh(&hci_task_lock);
+ mutex_unlock(&hci_task_lock);
return err;
}
@@ -1912,7 +1925,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
hdev->init_last_cmd = opcode;
skb_queue_tail(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
return 0;
}
@@ -1948,23 +1961,18 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
hdr->dlen = cpu_to_le16(len);
}
-void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
+static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
+ struct sk_buff *skb, __u16 flags)
{
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
- BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
-
- skb->dev = (void *) hdev;
- bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags);
-
list = skb_shinfo(skb)->frag_list;
if (!list) {
/* Non fragmented */
BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
- skb_queue_tail(&conn->data_q, skb);
+ skb_queue_tail(queue, skb);
} else {
/* Fragmented */
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -1972,9 +1980,9 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
skb_shinfo(skb)->frag_list = NULL;
/* Queue all fragments atomically */
- spin_lock_bh(&conn->data_q.lock);
+ spin_lock_bh(&queue->lock);
- __skb_queue_tail(&conn->data_q, skb);
+ __skb_queue_tail(queue, skb);
flags &= ~ACL_START;
flags |= ACL_CONT;
@@ -1987,13 +1995,27 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
- __skb_queue_tail(&conn->data_q, skb);
+ __skb_queue_tail(queue, skb);
} while (list);
- spin_unlock_bh(&conn->data_q.lock);
+ spin_unlock_bh(&queue->lock);
}
+}
+
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
+{
+ struct hci_conn *conn = chan->conn;
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
+
+ skb->dev = (void *) hdev;
+ bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
+ hci_add_acl_hdr(skb, conn->handle, flags);
- tasklet_schedule(&hdev->tx_task);
+ hci_queue_acl(conn, &chan->data_q, skb, flags);
+
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
EXPORT_SYMBOL(hci_send_acl);
@@ -2016,7 +2038,7 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
skb_queue_tail(&conn->data_q, skb);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
EXPORT_SYMBOL(hci_send_sco);
@@ -2026,16 +2048,15 @@ EXPORT_SYMBOL(hci_send_sco);
static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *conn = NULL;
+ struct hci_conn *conn = NULL, *c;
int num = 0, min = ~0;
- struct list_head *p;
/* We don't have to lock device here. Connections are always
* added and removed with TX task disabled. */
- list_for_each(p, &h->list) {
- struct hci_conn *c;
- c = list_entry(p, struct hci_conn, list);
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
if (c->type != type || skb_queue_empty(&c->data_q))
continue;
@@ -2053,6 +2074,8 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
break;
}
+ rcu_read_unlock();
+
if (conn) {
int cnt, q;
@@ -2084,27 +2107,159 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
- struct hci_conn *c;
+ struct hci_conn *c;
BT_ERR("%s link tx timeout", hdev->name);
+ rcu_read_lock();
+
/* Kill stalled connections */
- list_for_each(p, &h->list) {
- c = list_entry(p, struct hci_conn, list);
+ list_for_each_entry_rcu(c, &h->list, list) {
if (c->type == type && c->sent) {
BT_ERR("%s killing stalled connection %s",
hdev->name, batostr(&c->dst));
hci_acl_disconn(c, 0x13);
}
}
+
+ rcu_read_unlock();
}
-static inline void hci_sched_acl(struct hci_dev *hdev)
+static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
+ int *quote)
{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_chan *chan = NULL;
+ int num = 0, min = ~0, cur_prio = 0;
struct hci_conn *conn;
+ int cnt, q, conn_num = 0;
+
+ BT_DBG("%s", hdev->name);
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(conn, &h->list, list) {
+ struct hci_chan *tmp;
+
+ if (conn->type != type)
+ continue;
+
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+ continue;
+
+ conn_num++;
+
+ list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(&tmp->data_q))
+ continue;
+
+ skb = skb_peek(&tmp->data_q);
+ if (skb->priority < cur_prio)
+ continue;
+
+ if (skb->priority > cur_prio) {
+ num = 0;
+ min = ~0;
+ cur_prio = skb->priority;
+ }
+
+ num++;
+
+ if (conn->sent < min) {
+ min = conn->sent;
+ chan = tmp;
+ }
+ }
+
+ if (hci_conn_num(hdev, type) == conn_num)
+ break;
+ }
+
+ rcu_read_unlock();
+
+ if (!chan)
+ return NULL;
+
+ switch (chan->conn->type) {
+ case ACL_LINK:
+ cnt = hdev->acl_cnt;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ cnt = hdev->sco_cnt;
+ break;
+ case LE_LINK:
+ cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
+ break;
+ default:
+ cnt = 0;
+ BT_ERR("Unknown link type");
+ }
+
+ q = cnt / num;
+ *quote = q ? q : 1;
+ BT_DBG("chan %p quote %d", chan, *quote);
+ return chan;
+}
+
+static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *conn;
+ int num = 0;
+
+ BT_DBG("%s", hdev->name);
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(conn, &h->list, list) {
+ struct hci_chan *chan;
+
+ if (conn->type != type)
+ continue;
+
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+ continue;
+
+ num++;
+
+ list_for_each_entry_rcu(chan, &conn->chan_list, list) {
+ struct sk_buff *skb;
+
+ if (chan->sent) {
+ chan->sent = 0;
+ continue;
+ }
+
+ if (skb_queue_empty(&chan->data_q))
+ continue;
+
+ skb = skb_peek(&chan->data_q);
+ if (skb->priority >= HCI_PRIO_MAX - 1)
+ continue;
+
+ skb->priority = HCI_PRIO_MAX - 1;
+
+ BT_DBG("chan %p skb %p promoted to %d", chan, skb,
+ skb->priority);
+ }
+
+ if (hci_conn_num(hdev, type) == num)
+ break;
+ }
+
+ rcu_read_unlock();
+
+}
+
+static inline void hci_sched_acl(struct hci_dev *hdev)
+{
+ struct hci_chan *chan;
struct sk_buff *skb;
int quote;
+ unsigned int cnt;
BT_DBG("%s", hdev->name);
@@ -2118,19 +2273,35 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
hci_link_tx_to(hdev, ACL_LINK);
}
- while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
- while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
- BT_DBG("skb %p len %d", skb, skb->len);
+ cnt = hdev->acl_cnt;
+
+ while (hdev->acl_cnt &&
+ (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
+ u32 priority = (skb_peek(&chan->data_q))->priority;
+ while (quote-- && (skb = skb_peek(&chan->data_q))) {
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ skb->len, skb->priority);
+
+ /* Stop if priority has changed */
+ if (skb->priority < priority)
+ break;
- hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
+ skb = skb_dequeue(&chan->data_q);
+
+ hci_conn_enter_active_mode(chan->conn,
+ bt_cb(skb)->force_active);
hci_send_frame(skb);
hdev->acl_last_tx = jiffies;
hdev->acl_cnt--;
- conn->sent++;
+ chan->sent++;
+ chan->conn->sent++;
}
}
+
+ if (cnt != hdev->acl_cnt)
+ hci_prio_recalculate(hdev, ACL_LINK);
}
/* Schedule SCO */
@@ -2182,9 +2353,9 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
static inline void hci_sched_le(struct hci_dev *hdev)
{
- struct hci_conn *conn;
+ struct hci_chan *chan;
struct sk_buff *skb;
- int quote, cnt;
+ int quote, cnt, tmp;
BT_DBG("%s", hdev->name);
@@ -2200,29 +2371,43 @@ static inline void hci_sched_le(struct hci_dev *hdev)
}
cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
- while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
- while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
- BT_DBG("skb %p len %d", skb, skb->len);
+ tmp = cnt;
+ while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
+ u32 priority = (skb_peek(&chan->data_q))->priority;
+ while (quote-- && (skb = skb_peek(&chan->data_q))) {
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ skb->len, skb->priority);
+
+ /* Stop if priority has changed */
+ if (skb->priority < priority)
+ break;
+
+ skb = skb_dequeue(&chan->data_q);
hci_send_frame(skb);
hdev->le_last_tx = jiffies;
cnt--;
- conn->sent++;
+ chan->sent++;
+ chan->conn->sent++;
}
}
+
if (hdev->le_pkts)
hdev->le_cnt = cnt;
else
hdev->acl_cnt = cnt;
+
+ if (cnt != tmp)
+ hci_prio_recalculate(hdev, LE_LINK);
}
-static void hci_tx_task(unsigned long arg)
+static void hci_tx_work(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) arg;
+ struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
struct sk_buff *skb;
- read_lock(&hci_task_lock);
+ mutex_lock(&hci_task_lock);
BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
hdev->sco_cnt, hdev->le_cnt);
@@ -2241,7 +2426,7 @@ static void hci_tx_task(unsigned long arg)
while ((skb = skb_dequeue(&hdev->raw_q)))
hci_send_frame(skb);
- read_unlock(&hci_task_lock);
+ mutex_unlock(&hci_task_lock);
}
/* ----- HCI RX task (incoming data processing) ----- */
@@ -2270,7 +2455,7 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
if (conn) {
register struct hci_proto *hp;
- hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
+ hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
/* Send to upper protocol */
hp = hci_proto[HCI_PROTO_L2CAP];
@@ -2322,14 +2507,14 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
kfree_skb(skb);
}
-static void hci_rx_task(unsigned long arg)
+static void hci_rx_work(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) arg;
+ struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
struct sk_buff *skb;
BT_DBG("%s", hdev->name);
- read_lock(&hci_task_lock);
+ mutex_lock(&hci_task_lock);
while ((skb = skb_dequeue(&hdev->rx_q))) {
if (atomic_read(&hdev->promisc)) {
@@ -2355,6 +2540,7 @@ static void hci_rx_task(unsigned long arg)
/* Process frame */
switch (bt_cb(skb)->pkt_type) {
case HCI_EVENT_PKT:
+ BT_DBG("%s Event packet", hdev->name);
hci_event_packet(hdev, skb);
break;
@@ -2374,12 +2560,12 @@ static void hci_rx_task(unsigned long arg)
}
}
- read_unlock(&hci_task_lock);
+ mutex_unlock(&hci_task_lock);
}
-static void hci_cmd_task(unsigned long arg)
+static void hci_cmd_work(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) arg;
+ struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
struct sk_buff *skb;
BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
@@ -2403,7 +2589,38 @@ static void hci_cmd_task(unsigned long arg)
jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
} else {
skb_queue_head(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
}
}
+
+int hci_do_inquiry(struct hci_dev *hdev, u8 length)
+{
+ /* General inquiry access code (GIAC) */
+ u8 lap[3] = { 0x33, 0x8b, 0x9e };
+ struct hci_cp_inquiry cp;
+
+ BT_DBG("%s", hdev->name);
+
+ if (test_bit(HCI_INQUIRY, &hdev->flags))
+ return -EINPROGRESS;
+
+ memset(&cp, 0, sizeof(cp));
+ memcpy(&cp.lap, lap, sizeof(cp.lap));
+ cp.length = length;
+
+ return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+}
+
+int hci_cancel_inquiry(struct hci_dev *hdev)
+{
+ BT_DBG("%s", hdev->name);
+
+ if (!test_bit(HCI_INQUIRY, &hdev->flags))
+ return -EPERM;
+
+ return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+}
+
+module_param(enable_hs, bool, 0644);
+MODULE_PARM_DESC(enable_hs, "Enable High Speed");
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 643a41b76e2..fc5338fc2a6 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -55,12 +55,18 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s status 0x%x", hdev->name, status);
- if (status)
+ if (status) {
+ hci_dev_lock(hdev);
+ mgmt_stop_discovery_failed(hdev, status);
+ hci_dev_unlock(hdev);
return;
+ }
+
+ clear_bit(HCI_INQUIRY, &hdev->flags);
- if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
- test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 0);
+ hci_dev_lock(hdev);
+ mgmt_discovering(hdev, 0);
+ hci_dev_unlock(hdev);
hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
@@ -76,10 +82,6 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
if (status)
return;
- if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
- test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 0);
-
hci_conn_check_pending(hdev);
}
@@ -192,6 +194,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
clear_bit(HCI_RESET, &hdev->flags);
hci_req_complete(hdev, HCI_OP_RESET, status);
+
+ hdev->dev_flags = 0;
}
static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -205,13 +209,15 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
+ hci_dev_lock(hdev);
+
if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_set_local_name_complete(hdev->id, sent, status);
+ mgmt_set_local_name_complete(hdev, sent, status);
- if (status)
- return;
+ if (status == 0)
+ memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
- memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -274,7 +280,8 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ __u8 param, status = *((__u8 *) skb->data);
+ int old_pscan, old_iscan;
void *sent;
BT_DBG("%s status 0x%x", hdev->name, status);
@@ -283,28 +290,40 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
- if (!status) {
- __u8 param = *((__u8 *) sent);
- int old_pscan, old_iscan;
-
- old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
- old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
+ param = *((__u8 *) sent);
- if (param & SCAN_INQUIRY) {
- set_bit(HCI_ISCAN, &hdev->flags);
- if (!old_iscan)
- mgmt_discoverable(hdev->id, 1);
- } else if (old_iscan)
- mgmt_discoverable(hdev->id, 0);
+ hci_dev_lock(hdev);
- if (param & SCAN_PAGE) {
- set_bit(HCI_PSCAN, &hdev->flags);
- if (!old_pscan)
- mgmt_connectable(hdev->id, 1);
- } else if (old_pscan)
- mgmt_connectable(hdev->id, 0);
+ if (status != 0) {
+ mgmt_write_scan_failed(hdev, param, status);
+ hdev->discov_timeout = 0;
+ goto done;
}
+ old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
+ old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
+
+ if (param & SCAN_INQUIRY) {
+ set_bit(HCI_ISCAN, &hdev->flags);
+ if (!old_iscan)
+ mgmt_discoverable(hdev, 1);
+ if (hdev->discov_timeout > 0) {
+ int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
+ queue_delayed_work(hdev->workqueue, &hdev->discov_off,
+ to);
+ }
+ } else if (old_iscan)
+ mgmt_discoverable(hdev, 0);
+
+ if (param & SCAN_PAGE) {
+ set_bit(HCI_PSCAN, &hdev->flags);
+ if (!old_pscan)
+ mgmt_connectable(hdev, 1);
+ } else if (old_pscan)
+ mgmt_connectable(hdev, 0);
+
+done:
+ hci_dev_unlock(hdev);
hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
}
@@ -359,11 +378,8 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
- if (hdev->notify) {
- tasklet_disable(&hdev->tx_task);
+ if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
- tasklet_enable(&hdev->tx_task);
- }
}
static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
@@ -390,11 +406,8 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb
BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
- if (hdev->notify) {
- tasklet_disable(&hdev->tx_task);
+ if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
- tasklet_enable(&hdev->tx_task);
- }
}
static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
@@ -481,7 +494,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
/* CSR 1.1 dongles does not accept any bitfield so don't try to set
* any event mask for pre 1.2 devices */
- if (hdev->lmp_ver <= 1)
+ if (hdev->hci_ver < BLUETOOTH_VER_1_2)
return;
events[4] |= 0x01; /* Flow Specification Complete */
@@ -545,7 +558,7 @@ static void hci_setup(struct hci_dev *hdev)
{
hci_setup_event_mask(hdev);
- if (hdev->hci_ver > 1)
+ if (hdev->hci_ver > BLUETOOTH_VER_1_1)
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
if (hdev->features[6] & LMP_SIMPLE_PAIR) {
@@ -700,6 +713,21 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
}
+static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->flow_ctl_mode = rp->mode;
+
+ hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
+}
+
static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_buffer_size *rp = (void *) skb->data;
@@ -739,6 +767,28 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
}
+static void hci_cc_read_data_block_size(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_data_block_size *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
+ hdev->block_len = __le16_to_cpu(rp->block_len);
+ hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
+
+ hdev->block_cnt = hdev->num_blocks;
+
+ BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
+ hdev->block_cnt, hdev->block_len);
+
+ hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
+}
+
static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -748,6 +798,30 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
}
+static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->amp_status = rp->amp_status;
+ hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
+ hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
+ hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
+ hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
+ hdev->amp_type = rp->amp_type;
+ hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
+ hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
+ hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
+ hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
+
+ hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
+}
+
static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -804,19 +878,24 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ hci_dev_lock(hdev);
+
if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
+ mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
if (rp->status != 0)
- return;
+ goto unlock;
cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
if (!cp)
- return;
+ goto unlock;
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
if (conn)
conn->pin_length = cp->pin_len;
+
+unlock:
+ hci_dev_unlock(hdev);
}
static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -825,10 +904,15 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ hci_dev_lock(hdev);
+
if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
+ mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
rp->status);
+
+ hci_dev_unlock(hdev);
}
+
static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -855,9 +939,13 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ hci_dev_lock(hdev);
+
if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
+ mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
rp->status);
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
@@ -867,9 +955,44 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ hci_dev_lock(hdev);
+
if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
+ mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
rp->status);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ hci_dev_lock(hdev);
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr,
+ rp->status);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ hci_dev_lock(hdev);
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
+ rp->status);
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
@@ -879,8 +1002,17 @@ static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
BT_DBG("%s status 0x%x", hdev->name, rp->status);
- mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
+ hci_dev_lock(hdev);
+ mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
rp->randomizer, rp->status);
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
}
static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
@@ -899,13 +1031,19 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
return;
if (cp->enable == 0x01) {
- del_timer(&hdev->adv_timer);
+ set_bit(HCI_LE_SCAN, &hdev->dev_flags);
+
+ cancel_delayed_work_sync(&hdev->adv_work);
hci_dev_lock(hdev);
hci_adv_entries_clear(hdev);
hci_dev_unlock(hdev);
} else if (cp->enable == 0x00) {
- mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
+ clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
+
+ cancel_delayed_work_sync(&hdev->adv_work);
+ queue_delayed_work(hdev->workqueue, &hdev->adv_work,
+ jiffies + ADV_CLEAR_TIMEOUT);
}
}
@@ -955,12 +1093,18 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
if (status) {
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
hci_conn_check_pending(hdev);
+ hci_dev_lock(hdev);
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_start_discovery_failed(hdev, status);
+ hci_dev_unlock(hdev);
return;
}
- if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) &&
- test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 1);
+ set_bit(HCI_INQUIRY, &hdev->flags);
+
+ hci_dev_lock(hdev);
+ mgmt_discovering(hdev, 1);
+ hci_dev_unlock(hdev);
}
static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
@@ -1339,13 +1483,16 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
BT_DBG("%s status %d", hdev->name, status);
- if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
- test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 0);
-
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
hci_conn_check_pending(hdev);
+
+ if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+ return;
+
+ hci_dev_lock(hdev);
+ mgmt_discovering(hdev, 0);
+ hci_dev_unlock(hdev);
}
static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1361,12 +1508,6 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
hci_dev_lock(hdev);
- if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 1);
- }
-
for (; num_rsp; num_rsp--, info++) {
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
@@ -1377,8 +1518,8 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
data.rssi = 0x00;
data.ssp_mode = 0x00;
hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0,
- NULL);
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+ info->dev_class, 0, NULL);
}
hci_dev_unlock(hdev);
@@ -1412,7 +1553,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
conn->state = BT_CONFIG;
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
+ mgmt_connected(hdev, &ev->bdaddr, conn->type,
+ conn->dst_type);
} else
conn->state = BT_CONNECTED;
@@ -1434,7 +1576,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
}
/* Set packet type for incoming connection */
- if (!conn->out && hdev->hci_ver < 3) {
+ if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
struct hci_cp_change_conn_ptype cp;
cp.handle = ev->handle;
cp.pkt_type = cpu_to_le16(conn->pkt_type);
@@ -1444,7 +1586,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
} else {
conn->state = BT_CLOSED;
if (conn->type == ACL_LINK)
- mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
+ mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
+ conn->dst_type, ev->status);
}
if (conn->type == ACL_LINK)
@@ -1531,7 +1674,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
struct hci_cp_reject_conn_req cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
- cp.reason = 0x0f;
+ cp.reason = HCI_ERROR_REJ_BAD_ADDR;
hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
}
}
@@ -1543,24 +1686,27 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
BT_DBG("%s status %d", hdev->name, ev->status);
- if (ev->status) {
- mgmt_disconnect_failed(hdev->id);
- return;
- }
-
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (!conn)
goto unlock;
- conn->state = BT_CLOSED;
+ if (ev->status == 0)
+ conn->state = BT_CLOSED;
- if (conn->type == ACL_LINK || conn->type == LE_LINK)
- mgmt_disconnected(hdev->id, &conn->dst);
+ if (conn->type == ACL_LINK || conn->type == LE_LINK) {
+ if (ev->status != 0)
+ mgmt_disconnect_failed(hdev, &conn->dst, ev->status);
+ else
+ mgmt_disconnected(hdev, &conn->dst, conn->type,
+ conn->dst_type);
+ }
- hci_proto_disconn_cfm(conn, ev->reason);
- hci_conn_del(conn);
+ if (ev->status == 0) {
+ hci_proto_disconn_cfm(conn, ev->reason);
+ hci_conn_del(conn);
+ }
unlock:
hci_dev_unlock(hdev);
@@ -1588,7 +1734,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
conn->sec_level = conn->pending_sec_level;
}
} else {
- mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+ mgmt_auth_failed(hdev, &conn->dst, ev->status);
}
clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
@@ -1643,7 +1789,7 @@ static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb
hci_dev_lock(hdev);
if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
- mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
+ mgmt_remote_name(hdev, &ev->bdaddr, ev->name);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!conn)
@@ -1894,10 +2040,22 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_read_bd_addr(hdev, skb);
break;
+ case HCI_OP_READ_DATA_BLOCK_SIZE:
+ hci_cc_read_data_block_size(hdev, skb);
+ break;
+
case HCI_OP_WRITE_CA_TIMEOUT:
hci_cc_write_ca_timeout(hdev, skb);
break;
+ case HCI_OP_READ_FLOW_CONTROL_MODE:
+ hci_cc_read_flow_control_mode(hdev, skb);
+ break;
+
+ case HCI_OP_READ_LOCAL_AMP_INFO:
+ hci_cc_read_local_amp_info(hdev, skb);
+ break;
+
case HCI_OP_DELETE_STORED_LINK_KEY:
hci_cc_delete_stored_link_key(hdev, skb);
break;
@@ -1942,6 +2100,17 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_user_confirm_neg_reply(hdev, skb);
break;
+ case HCI_OP_USER_PASSKEY_REPLY:
+ hci_cc_user_passkey_reply(hdev, skb);
+ break;
+
+ case HCI_OP_USER_PASSKEY_NEG_REPLY:
+ hci_cc_user_passkey_neg_reply(hdev, skb);
+
+ case HCI_OP_LE_SET_SCAN_PARAM:
+ hci_cc_le_set_scan_param(hdev, skb);
+ break;
+
case HCI_OP_LE_SET_SCAN_ENABLE:
hci_cc_le_set_scan_enable(hdev, skb);
break;
@@ -1969,7 +2138,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
if (ev->ncmd) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
}
@@ -2029,7 +2198,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_OP_DISCONNECT:
if (ev->status != 0)
- mgmt_disconnect_failed(hdev->id);
+ mgmt_disconnect_failed(hdev, NULL, ev->status);
break;
case HCI_OP_LE_CREATE_CONN:
@@ -2051,7 +2220,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
}
@@ -2096,8 +2265,6 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
return;
}
- tasklet_disable(&hdev->tx_task);
-
for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
struct hci_conn *conn;
__u16 handle, count;
@@ -2106,34 +2273,43 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
count = get_unaligned_le16(ptr++);
conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (conn) {
- conn->sent -= count;
-
- if (conn->type == ACL_LINK) {
+ if (!conn)
+ continue;
+
+ conn->sent -= count;
+
+ switch (conn->type) {
+ case ACL_LINK:
+ hdev->acl_cnt += count;
+ if (hdev->acl_cnt > hdev->acl_pkts)
+ hdev->acl_cnt = hdev->acl_pkts;
+ break;
+
+ case LE_LINK:
+ if (hdev->le_pkts) {
+ hdev->le_cnt += count;
+ if (hdev->le_cnt > hdev->le_pkts)
+ hdev->le_cnt = hdev->le_pkts;
+ } else {
hdev->acl_cnt += count;
if (hdev->acl_cnt > hdev->acl_pkts)
hdev->acl_cnt = hdev->acl_pkts;
- } else if (conn->type == LE_LINK) {
- if (hdev->le_pkts) {
- hdev->le_cnt += count;
- if (hdev->le_cnt > hdev->le_pkts)
- hdev->le_cnt = hdev->le_pkts;
- } else {
- hdev->acl_cnt += count;
- if (hdev->acl_cnt > hdev->acl_pkts)
- hdev->acl_cnt = hdev->acl_pkts;
- }
- } else {
- hdev->sco_cnt += count;
- if (hdev->sco_cnt > hdev->sco_pkts)
- hdev->sco_cnt = hdev->sco_pkts;
}
+ break;
+
+ case SCO_LINK:
+ hdev->sco_cnt += count;
+ if (hdev->sco_cnt > hdev->sco_pkts)
+ hdev->sco_cnt = hdev->sco_pkts;
+ break;
+
+ default:
+ BT_ERR("Unknown type %d conn %p", conn->type, conn);
+ break;
}
}
- tasklet_schedule(&hdev->tx_task);
-
- tasklet_enable(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -2194,7 +2370,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
else
secure = 0;
- mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
+ mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
}
unlock:
@@ -2363,12 +2539,6 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
hci_dev_lock(hdev);
- if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 1);
- }
-
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
struct inquiry_info_with_rssi_and_pscan_mode *info;
info = (void *) (skb->data + 1);
@@ -2383,7 +2553,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
data.rssi = info->rssi;
data.ssp_mode = 0x00;
hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr,
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi,
NULL);
}
@@ -2400,7 +2570,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
data.rssi = info->rssi;
data.ssp_mode = 0x00;
hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr,
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi,
NULL);
}
@@ -2531,12 +2701,6 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
if (!num_rsp)
return;
- if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
- if (test_bit(HCI_MGMT, &hdev->flags))
- mgmt_discovering(hdev->id, 1);
- }
-
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
@@ -2549,8 +2713,8 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
data.rssi = info->rssi;
data.ssp_mode = 0x01;
hci_inquiry_cache_update(hdev, &data);
- mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class,
- info->rssi, info->data);
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+ info->dev_class, info->rssi, info->data);
}
hci_dev_unlock(hdev);
@@ -2614,7 +2778,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
struct hci_cp_io_capability_neg_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
- cp.reason = 0x18; /* Pairing not allowed */
+ cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
sizeof(cp), &cp);
@@ -2706,13 +2870,28 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
}
confirm:
- mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey,
+ mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
confirm_hint);
unlock:
hci_dev_unlock(hdev);
}
+static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_user_passkey_req *ev = (void *) skb->data;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (test_bit(HCI_MGMT, &hdev->flags))
+ mgmt_user_passkey_request(hdev, &ev->bdaddr);
+
+ hci_dev_unlock(hdev);
+}
+
static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
@@ -2732,7 +2911,7 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_
* event gets always produced as initiator and is also mapped to
* the mgmt_auth_failed event */
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
- mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+ mgmt_auth_failed(hdev, &conn->dst, ev->status);
hci_conn_put(conn);
@@ -2813,14 +2992,15 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
}
if (ev->status) {
- mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
+ mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
+ conn->dst_type, ev->status);
hci_proto_connect_cfm(conn, ev->status);
conn->state = BT_CLOSED;
hci_conn_del(conn);
goto unlock;
}
- mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
+ mgmt_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type);
conn->sec_level = BT_SECURITY_LOW;
conn->handle = __le16_to_cpu(ev->handle);
@@ -3051,6 +3231,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_user_confirm_request_evt(hdev, skb);
break;
+ case HCI_EV_USER_PASSKEY_REQUEST:
+ hci_user_passkey_request_evt(hdev, skb);
+ break;
+
case HCI_EV_SIMPLE_PAIR_COMPLETE:
hci_simple_pair_complete_evt(hdev, skb);
break;
@@ -3104,5 +3288,5 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
kfree_skb(skb);
}
-module_param(enable_le, bool, 0444);
+module_param(enable_le, bool, 0644);
MODULE_PARM_DESC(enable_le, "Enable LE support");
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index f6afe3d76a6..189a667c293 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -188,11 +188,11 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
return -EFAULT;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
err = hci_blacklist_add(hdev, &bdaddr);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return err;
}
@@ -205,11 +205,11 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
return -EFAULT;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
err = hci_blacklist_del(hdev, &bdaddr);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return err;
}
@@ -343,8 +343,11 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
if (haddr.hci_channel > HCI_CHANNEL_CONTROL)
return -EINVAL;
- if (haddr.hci_channel == HCI_CHANNEL_CONTROL && !enable_mgmt)
- return -EINVAL;
+ if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
+ if (!enable_mgmt)
+ return -EINVAL;
+ set_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags);
+ }
lock_sock(sk);
@@ -535,10 +538,10 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
skb_queue_tail(&hdev->raw_q, skb);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
} else {
skb_queue_tail(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
} else {
if (!capable(CAP_NET_RAW)) {
@@ -547,7 +550,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
}
skb_queue_tail(&hdev->raw_q, skb);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
err = len;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 22f1a6c8703..db6af705f8f 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -88,11 +88,35 @@ static struct device_type bt_link = {
.release = bt_link_release,
};
-static void add_conn(struct work_struct *work)
+/*
+ * The rfcomm tty device will possibly retain even when conn
+ * is down, and sysfs doesn't support move zombie device,
+ * so we should move the device before conn device is destroyed.
+ */
+static int __match_tty(struct device *dev, void *data)
+{
+ return !strncmp(dev_name(dev), "rfcomm", 6);
+}
+
+void hci_conn_init_sysfs(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("conn %p", conn);
+
+ conn->dev.type = &bt_link;
+ conn->dev.class = bt_class;
+ conn->dev.parent = &hdev->dev;
+
+ device_initialize(&conn->dev);
+}
+
+void hci_conn_add_sysfs(struct hci_conn *conn)
{
- struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
struct hci_dev *hdev = conn->hdev;
+ BT_DBG("conn %p", conn);
+
dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
dev_set_drvdata(&conn->dev, conn);
@@ -105,19 +129,8 @@ static void add_conn(struct work_struct *work)
hci_dev_hold(hdev);
}
-/*
- * The rfcomm tty device will possibly retain even when conn
- * is down, and sysfs doesn't support move zombie device,
- * so we should move the device before conn device is destroyed.
- */
-static int __match_tty(struct device *dev, void *data)
-{
- return !strncmp(dev_name(dev), "rfcomm", 6);
-}
-
-static void del_conn(struct work_struct *work)
+void hci_conn_del_sysfs(struct hci_conn *conn)
{
- struct hci_conn *conn = container_of(work, struct hci_conn, work_del);
struct hci_dev *hdev = conn->hdev;
if (!device_is_registered(&conn->dev))
@@ -139,36 +152,6 @@ static void del_conn(struct work_struct *work)
hci_dev_put(hdev);
}
-void hci_conn_init_sysfs(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
-
- BT_DBG("conn %p", conn);
-
- conn->dev.type = &bt_link;
- conn->dev.class = bt_class;
- conn->dev.parent = &hdev->dev;
-
- device_initialize(&conn->dev);
-
- INIT_WORK(&conn->work_add, add_conn);
- INIT_WORK(&conn->work_del, del_conn);
-}
-
-void hci_conn_add_sysfs(struct hci_conn *conn)
-{
- BT_DBG("conn %p", conn);
-
- queue_work(conn->hdev->workqueue, &conn->work_add);
-}
-
-void hci_conn_del_sysfs(struct hci_conn *conn)
-{
- BT_DBG("conn %p", conn);
-
- queue_work(conn->hdev->workqueue, &conn->work_del);
-}
-
static inline char *host_bustostr(int bus)
{
switch (bus) {
@@ -402,7 +385,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p)
struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *e;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
for (e = cache->list; e; e = e->next) {
struct inquiry_data *data = &e->data;
@@ -415,7 +398,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p)
data->rssi, data->ssp_mode, e->timestamp);
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return 0;
}
@@ -435,19 +418,14 @@ static const struct file_operations inquiry_cache_fops = {
static int blacklist_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
- struct list_head *l;
+ struct bdaddr_list *b;
- hci_dev_lock_bh(hdev);
-
- list_for_each(l, &hdev->blacklist) {
- struct bdaddr_list *b;
-
- b = list_entry(l, struct bdaddr_list, list);
+ hci_dev_lock(hdev);
+ list_for_each_entry(b, &hdev->blacklist, list)
seq_printf(f, "%s\n", batostr(&b->bdaddr));
- }
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return 0;
}
@@ -484,19 +462,14 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid)
static int uuids_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
- struct list_head *l;
-
- hci_dev_lock_bh(hdev);
-
- list_for_each(l, &hdev->uuids) {
- struct bt_uuid *uuid;
+ struct bt_uuid *uuid;
- uuid = list_entry(l, struct bt_uuid, list);
+ hci_dev_lock(hdev);
+ list_for_each_entry(uuid, &hdev->uuids, list)
print_bt_uuid(f, uuid->uuid);
- }
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return 0;
}
@@ -517,11 +490,11 @@ static int auto_accept_delay_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hdev->auto_accept_delay = val;
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return 0;
}
@@ -530,11 +503,11 @@ static int auto_accept_delay_get(void *data, u64 *val)
{
struct hci_dev *hdev = data;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
*val = hdev->auto_accept_delay;
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
return 0;
}
@@ -542,22 +515,28 @@ static int auto_accept_delay_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
auto_accept_delay_set, "%llu\n");
-int hci_register_sysfs(struct hci_dev *hdev)
+void hci_init_sysfs(struct hci_dev *hdev)
+{
+ struct device *dev = &hdev->dev;
+
+ dev->type = &bt_host;
+ dev->class = bt_class;
+
+ dev_set_drvdata(dev, hdev);
+ device_initialize(dev);
+}
+
+int hci_add_sysfs(struct hci_dev *hdev)
{
struct device *dev = &hdev->dev;
int err;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- dev->type = &bt_host;
- dev->class = bt_class;
dev->parent = hdev->parent;
-
dev_set_name(dev, "%s", hdev->name);
- dev_set_drvdata(dev, hdev);
-
- err = device_register(dev);
+ err = device_add(dev);
if (err < 0)
return err;
@@ -581,7 +560,7 @@ int hci_register_sysfs(struct hci_dev *hdev)
return 0;
}
-void hci_unregister_sysfs(struct hci_dev *hdev)
+void hci_del_sysfs(struct hci_dev *hdev)
{
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 075a3e920ca..d478be11d56 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -81,24 +81,20 @@ static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 };
static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr)
{
struct hidp_session *session;
- struct list_head *p;
BT_DBG("");
- list_for_each(p, &hidp_session_list) {
- session = list_entry(p, struct hidp_session, list);
+ list_for_each_entry(session, &hidp_session_list, list) {
if (!bacmp(bdaddr, &session->bdaddr))
return session;
}
+
return NULL;
}
static void __hidp_link_session(struct hidp_session *session)
{
- __module_get(THIS_MODULE);
list_add(&session->list, &hidp_session_list);
-
- hci_conn_hold_device(session->conn);
}
static void __hidp_unlink_session(struct hidp_session *session)
@@ -106,7 +102,6 @@ static void __hidp_unlink_session(struct hidp_session *session)
hci_conn_put_device(session->conn);
list_del(&session->list);
- module_put(THIS_MODULE);
}
static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
@@ -255,6 +250,9 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
BT_DBG("session %p data %p size %d", session, data, size);
+ if (atomic_read(&session->terminate))
+ return -EIO;
+
skb = alloc_skb(size + 1, GFP_ATOMIC);
if (!skb) {
BT_ERR("Can't allocate memory for new frame");
@@ -329,6 +327,7 @@ static int hidp_get_raw_report(struct hid_device *hid,
struct sk_buff *skb;
size_t len;
int numbered_reports = hid->report_enum[report_type].numbered;
+ int ret;
switch (report_type) {
case HID_FEATURE_REPORT:
@@ -352,8 +351,9 @@ static int hidp_get_raw_report(struct hid_device *hid,
session->waiting_report_number = numbered_reports ? report_number : -1;
set_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
data[0] = report_number;
- if (hidp_send_ctrl_message(hid->driver_data, report_type, data, 1))
- goto err_eio;
+ ret = hidp_send_ctrl_message(hid->driver_data, report_type, data, 1);
+ if (ret)
+ goto err;
/* Wait for the return of the report. The returned report
gets put in session->report_return. */
@@ -365,11 +365,13 @@ static int hidp_get_raw_report(struct hid_device *hid,
5*HZ);
if (res == 0) {
/* timeout */
- goto err_eio;
+ ret = -EIO;
+ goto err;
}
if (res < 0) {
/* signal */
- goto err_restartsys;
+ ret = -ERESTARTSYS;
+ goto err;
}
}
@@ -390,14 +392,10 @@ static int hidp_get_raw_report(struct hid_device *hid,
return len;
-err_restartsys:
- clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
- mutex_unlock(&session->report_mutex);
- return -ERESTARTSYS;
-err_eio:
+err:
clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
mutex_unlock(&session->report_mutex);
- return -EIO;
+ return ret;
}
static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
@@ -422,11 +420,10 @@ static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, s
/* Set up our wait, and send the report request to the device. */
set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
- if (hidp_send_ctrl_message(hid->driver_data, report_type,
- data, count)) {
- ret = -ENOMEM;
+ ret = hidp_send_ctrl_message(hid->driver_data, report_type, data,
+ count);
+ if (ret)
goto err;
- }
/* Wait for the ACK from the device. */
while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
@@ -496,10 +493,9 @@ static void hidp_process_handshake(struct hidp_session *session,
case HIDP_HSHK_ERR_INVALID_REPORT_ID:
case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST:
case HIDP_HSHK_ERR_INVALID_PARAMETER:
- if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) {
- clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ if (test_and_clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags))
wake_up_interruptible(&session->report_queue);
- }
+
/* FIXME: Call into SET_ GET_ handlers here */
break;
@@ -520,10 +516,8 @@ static void hidp_process_handshake(struct hidp_session *session,
}
/* Wake up the waiting thread. */
- if (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
- clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+ if (test_and_clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags))
wake_up_interruptible(&session->report_queue);
- }
}
static void hidp_process_hid_control(struct hidp_session *session,
@@ -663,25 +657,32 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len)
return kernel_sendmsg(sock, &msg, &iv, 1, len);
}
-static void hidp_process_transmit(struct hidp_session *session)
+static void hidp_process_intr_transmit(struct hidp_session *session)
{
struct sk_buff *skb;
BT_DBG("session %p", session);
- while ((skb = skb_dequeue(&session->ctrl_transmit))) {
- if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) {
- skb_queue_head(&session->ctrl_transmit, skb);
+ while ((skb = skb_dequeue(&session->intr_transmit))) {
+ if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
+ skb_queue_head(&session->intr_transmit, skb);
break;
}
hidp_set_timer(session);
kfree_skb(skb);
}
+}
- while ((skb = skb_dequeue(&session->intr_transmit))) {
- if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
- skb_queue_head(&session->intr_transmit, skb);
+static void hidp_process_ctrl_transmit(struct hidp_session *session)
+{
+ struct sk_buff *skb;
+
+ BT_DBG("session %p", session);
+
+ while ((skb = skb_dequeue(&session->ctrl_transmit))) {
+ if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) {
+ skb_queue_head(&session->ctrl_transmit, skb);
break;
}
@@ -700,6 +701,7 @@ static int hidp_session(void *arg)
BT_DBG("session %p", session);
+ __module_get(THIS_MODULE);
set_user_nice(current, -15);
init_waitqueue_entry(&ctrl_wait, current);
@@ -714,23 +716,25 @@ static int hidp_session(void *arg)
intr_sk->sk_state != BT_CONNECTED)
break;
- while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
+ while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
skb_orphan(skb);
if (!skb_linearize(skb))
- hidp_recv_ctrl_frame(session, skb);
+ hidp_recv_intr_frame(session, skb);
else
kfree_skb(skb);
}
- while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
+ hidp_process_intr_transmit(session);
+
+ while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
skb_orphan(skb);
if (!skb_linearize(skb))
- hidp_recv_intr_frame(session, skb);
+ hidp_recv_ctrl_frame(session, skb);
else
kfree_skb(skb);
}
- hidp_process_transmit(session);
+ hidp_process_ctrl_transmit(session);
schedule();
set_current_state(TASK_INTERRUPTIBLE);
@@ -739,6 +743,10 @@ static int hidp_session(void *arg)
remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
+ clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+ clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ wake_up_interruptible(&session->report_queue);
+
down_write(&hidp_session_sem);
hidp_del_timer(session);
@@ -772,34 +780,37 @@ static int hidp_session(void *arg)
kfree(session->rd_data);
kfree(session);
+ module_put_and_exit(0);
return 0;
}
-static struct device *hidp_get_device(struct hidp_session *session)
+static struct hci_conn *hidp_get_connection(struct hidp_session *session)
{
bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src;
bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst;
- struct device *device = NULL;
+ struct hci_conn *conn;
struct hci_dev *hdev;
hdev = hci_get_route(dst, src);
if (!hdev)
return NULL;
- session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
- if (session->conn)
- device = &session->conn->dev;
+ hci_dev_lock(hdev);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+ if (conn)
+ hci_conn_hold_device(conn);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
- return device;
+ return conn;
}
static int hidp_setup_input(struct hidp_session *session,
struct hidp_connadd_req *req)
{
struct input_dev *input;
- int err, i;
+ int i;
input = input_allocate_device();
if (!input)
@@ -842,17 +853,10 @@ static int hidp_setup_input(struct hidp_session *session,
input->relbit[0] |= BIT_MASK(REL_WHEEL);
}
- input->dev.parent = hidp_get_device(session);
+ input->dev.parent = &session->conn->dev;
input->event = hidp_input_event;
- err = input_register_device(input);
- if (err < 0) {
- input_free_device(input);
- session->input = NULL;
- return err;
- }
-
return 0;
}
@@ -949,7 +953,7 @@ static int hidp_setup_hid(struct hidp_session *session,
strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
- hid->dev.parent = hidp_get_device(session);
+ hid->dev.parent = &session->conn->dev;
hid->ll_driver = &hidp_hid_driver;
hid->hid_get_raw_report = hidp_get_raw_report;
@@ -976,18 +980,20 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst))
return -ENOTUNIQ;
- session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
- if (!session)
- return -ENOMEM;
-
BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size);
down_write(&hidp_session_sem);
s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst);
if (s && s->state == BT_CONNECTED) {
- err = -EEXIST;
- goto failed;
+ up_write(&hidp_session_sem);
+ return -EEXIST;
+ }
+
+ session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
+ if (!session) {
+ up_write(&hidp_session_sem);
+ return -ENOMEM;
}
bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst);
@@ -1003,6 +1009,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
session->intr_sock = intr_sock;
session->state = BT_CONNECTED;
+ session->conn = hidp_get_connection(session);
+ if (!session->conn) {
+ err = -ENOTCONN;
+ goto failed;
+ }
+
setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session);
skb_queue_head_init(&session->ctrl_transmit);
@@ -1015,9 +1027,11 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
session->idle_to = req->idle_to;
+ __hidp_link_session(session);
+
if (req->rd_size > 0) {
err = hidp_setup_hid(session, req);
- if (err && err != -ENODEV)
+ if (err)
goto purge;
}
@@ -1027,8 +1041,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
goto purge;
}
- __hidp_link_session(session);
-
hidp_set_timer(session);
if (session->hid) {
@@ -1054,7 +1066,11 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
!session->waiting_for_startup);
}
- err = hid_add_device(session->hid);
+ if (session->hid)
+ err = hid_add_device(session->hid);
+ else
+ err = input_register_device(session->input);
+
if (err < 0) {
atomic_inc(&session->terminate);
wake_up_process(session->task);
@@ -1077,8 +1093,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
unlink:
hidp_del_timer(session);
- __hidp_unlink_session(session);
-
if (session->input) {
input_unregister_device(session->input);
session->input = NULL;
@@ -1093,6 +1107,8 @@ unlink:
session->rd_data = NULL;
purge:
+ __hidp_unlink_session(session);
+
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
@@ -1134,19 +1150,16 @@ int hidp_del_connection(struct hidp_conndel_req *req)
int hidp_get_connlist(struct hidp_connlist_req *req)
{
- struct list_head *p;
+ struct hidp_session *session;
int err = 0, n = 0;
BT_DBG("");
down_read(&hidp_session_sem);
- list_for_each(p, &hidp_session_list) {
- struct hidp_session *session;
+ list_for_each_entry(session, &hidp_session_list, list) {
struct hidp_conninfo ci;
- session = list_entry(p, struct hidp_session, list);
-
__hidp_copy_session(session, &ci);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 17b5b1cd965..ffa2f6b8408 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3,6 +3,7 @@
Copyright (C) 2000-2001 Qualcomm Incorporated
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Copyright (C) 2010 Google Inc.
+ Copyright (C) 2011 ProFUSION Embedded Systems
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -59,7 +60,7 @@
int disable_ertm;
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
-static u8 l2cap_fixed_chan[8] = { 0x02, };
+static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
static LIST_HEAD(chan_list);
static DEFINE_RWLOCK(chan_list_lock);
@@ -89,25 +90,36 @@ static inline void chan_put(struct l2cap_chan *c)
static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
{
- struct l2cap_chan *c;
+ struct l2cap_chan *c, *r = NULL;
- list_for_each_entry(c, &conn->chan_l, list) {
- if (c->dcid == cid)
- return c;
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &conn->chan_l, list) {
+ if (c->dcid == cid) {
+ r = c;
+ break;
+ }
}
- return NULL;
+ rcu_read_unlock();
+ return r;
}
static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
{
- struct l2cap_chan *c;
+ struct l2cap_chan *c, *r = NULL;
- list_for_each_entry(c, &conn->chan_l, list) {
- if (c->scid == cid)
- return c;
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &conn->chan_l, list) {
+ if (c->scid == cid) {
+ r = c;
+ break;
+ }
}
- return NULL;
+
+ rcu_read_unlock();
+ return r;
}
/* Find channel with given SCID.
@@ -116,34 +128,36 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 ci
{
struct l2cap_chan *c;
- read_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_scid(conn, cid);
if (c)
- bh_lock_sock(c->sk);
- read_unlock(&conn->chan_lock);
+ lock_sock(c->sk);
return c;
}
static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
{
- struct l2cap_chan *c;
+ struct l2cap_chan *c, *r = NULL;
- list_for_each_entry(c, &conn->chan_l, list) {
- if (c->ident == ident)
- return c;
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &conn->chan_l, list) {
+ if (c->ident == ident) {
+ r = c;
+ break;
+ }
}
- return NULL;
+
+ rcu_read_unlock();
+ return r;
}
static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
{
struct l2cap_chan *c;
- read_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_ident(conn, ident);
if (c)
- bh_lock_sock(c->sk);
- read_unlock(&conn->chan_lock);
+ lock_sock(c->sk);
return c;
}
@@ -153,12 +167,9 @@ static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
list_for_each_entry(c, &chan_list, global_l) {
if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
- goto found;
+ return c;
}
-
- c = NULL;
-found:
- return c;
+ return NULL;
}
int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
@@ -217,45 +228,65 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
return 0;
}
-static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
+static void l2cap_set_timer(struct l2cap_chan *chan, struct delayed_work *work, long timeout)
{
- BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
+ BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
- if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
- chan_hold(chan);
+ cancel_delayed_work_sync(work);
+
+ schedule_delayed_work(work, timeout);
}
-static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
+static void l2cap_clear_timer(struct delayed_work *work)
{
- BT_DBG("chan %p state %d", chan, chan->state);
+ cancel_delayed_work_sync(work);
+}
- if (timer_pending(timer) && del_timer(timer))
- chan_put(chan);
+static char *state_to_string(int state)
+{
+ switch(state) {
+ case BT_CONNECTED:
+ return "BT_CONNECTED";
+ case BT_OPEN:
+ return "BT_OPEN";
+ case BT_BOUND:
+ return "BT_BOUND";
+ case BT_LISTEN:
+ return "BT_LISTEN";
+ case BT_CONNECT:
+ return "BT_CONNECT";
+ case BT_CONNECT2:
+ return "BT_CONNECT2";
+ case BT_CONFIG:
+ return "BT_CONFIG";
+ case BT_DISCONN:
+ return "BT_DISCONN";
+ case BT_CLOSED:
+ return "BT_CLOSED";
+ }
+
+ return "invalid state";
}
static void l2cap_state_change(struct l2cap_chan *chan, int state)
{
+ BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
+ state_to_string(state));
+
chan->state = state;
chan->ops->state_change(chan->data, state);
}
-static void l2cap_chan_timeout(unsigned long arg)
+static void l2cap_chan_timeout(struct work_struct *work)
{
- struct l2cap_chan *chan = (struct l2cap_chan *) arg;
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ chan_timer.work);
struct sock *sk = chan->sk;
int reason;
BT_DBG("chan %p state %d", chan, chan->state);
- bh_lock_sock(sk);
-
- if (sock_owned_by_user(sk)) {
- /* sk is owned by user. Try again later */
- __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
- bh_unlock_sock(sk);
- chan_put(chan);
- return;
- }
+ lock_sock(sk);
if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
reason = ECONNREFUSED;
@@ -267,7 +298,7 @@ static void l2cap_chan_timeout(unsigned long arg)
l2cap_chan_close(chan, reason);
- bh_unlock_sock(sk);
+ release_sock(sk);
chan->ops->close(chan->data);
chan_put(chan);
@@ -287,12 +318,14 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
list_add(&chan->global_l, &chan_list);
write_unlock_bh(&chan_list_lock);
- setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
+ INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
chan->state = BT_OPEN;
atomic_set(&chan->refcnt, 1);
+ BT_DBG("sk %p chan %p", sk, chan);
+
return chan;
}
@@ -305,12 +338,12 @@ void l2cap_chan_destroy(struct l2cap_chan *chan)
chan_put(chan);
}
-static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
chan->psm, chan->dcid);
- conn->disc_reason = 0x13;
+ conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
chan->conn = conn;
@@ -337,9 +370,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
chan->omtu = L2CAP_DEFAULT_MTU;
}
+ chan->local_id = L2CAP_BESTEFFORT_ID;
+ chan->local_stype = L2CAP_SERV_BESTEFFORT;
+ chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
+ chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
+ chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
+ chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
+
chan_hold(chan);
- list_add(&chan->list, &conn->chan_l);
+ list_add_rcu(&chan->list, &conn->chan_l);
}
/* Delete channel.
@@ -356,9 +396,9 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
if (conn) {
/* Delete from channel list */
- write_lock_bh(&conn->chan_lock);
- list_del(&chan->list);
- write_unlock_bh(&conn->chan_lock);
+ list_del_rcu(&chan->list);
+ synchronize_rcu();
+
chan_put(chan);
chan->conn = NULL;
@@ -508,7 +548,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
}
/* Service level security */
-static inline int l2cap_check_security(struct l2cap_chan *chan)
+int l2cap_chan_check_security(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
__u8 auth_type;
@@ -556,34 +596,58 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
flags = ACL_START;
bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
+ skb->priority = HCI_PRIO_MAX;
+
+ hci_send_acl(conn->hchan, skb, flags);
+}
+
+static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ struct hci_conn *hcon = chan->conn->hcon;
+ u16 flags;
- hci_send_acl(conn->hcon, skb, flags);
+ BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
+ skb->priority);
+
+ if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
+ lmp_no_flush_capable(hcon->hdev))
+ flags = ACL_START_NO_FLUSH;
+ else
+ flags = ACL_START;
+
+ bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ hci_send_acl(chan->conn->hchan, skb, flags);
}
-static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
+static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
{
struct sk_buff *skb;
struct l2cap_hdr *lh;
struct l2cap_conn *conn = chan->conn;
- int count, hlen = L2CAP_HDR_SIZE + 2;
- u8 flags;
+ int count, hlen;
if (chan->state != BT_CONNECTED)
return;
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ hlen = L2CAP_EXT_HDR_SIZE;
+ else
+ hlen = L2CAP_ENH_HDR_SIZE;
+
if (chan->fcs == L2CAP_FCS_CRC16)
- hlen += 2;
+ hlen += L2CAP_FCS_SIZE;
- BT_DBG("chan %p, control 0x%2.2x", chan, control);
+ BT_DBG("chan %p, control 0x%8.8x", chan, control);
count = min_t(unsigned int, conn->mtu, hlen);
- control |= L2CAP_CTRL_FRAME_TYPE;
+
+ control |= __set_sframe(chan);
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= L2CAP_CTRL_FINAL;
+ control |= __set_ctrl_final(chan);
if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
- control |= L2CAP_CTRL_POLL;
+ control |= __set_ctrl_poll(chan);
skb = bt_skb_alloc(count, GFP_ATOMIC);
if (!skb)
@@ -592,32 +656,27 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
- put_unaligned_le16(control, skb_put(skb, 2));
+
+ __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
if (chan->fcs == L2CAP_FCS_CRC16) {
- u16 fcs = crc16(0, (u8 *)lh, count - 2);
- put_unaligned_le16(fcs, skb_put(skb, 2));
+ u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
+ put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
}
- if (lmp_no_flush_capable(conn->hcon->hdev))
- flags = ACL_START_NO_FLUSH;
- else
- flags = ACL_START;
-
- bt_cb(skb)->force_active = chan->force_active;
-
- hci_send_acl(chan->conn->hcon, skb, flags);
+ skb->priority = HCI_PRIO_MAX;
+ l2cap_do_send(chan, skb);
}
-static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
+static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
{
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= L2CAP_SUPER_RCV_NOT_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
set_bit(CONN_RNR_SENT, &chan->conn_state);
} else
- control |= L2CAP_SUPER_RCV_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
- control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control |= __set_reqseq(chan, chan->buffer_seq);
l2cap_send_sframe(chan, control);
}
@@ -635,7 +694,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
return;
- if (l2cap_check_security(chan) &&
+ if (l2cap_chan_check_security(chan) &&
__l2cap_no_conn_pending(chan)) {
struct l2cap_conn_req req;
req.scid = cpu_to_le16(chan->scid);
@@ -654,7 +713,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
conn->info_ident = l2cap_get_ident(conn);
- mod_timer(&conn->info_timer, jiffies +
+ schedule_delayed_work(&conn->info_work,
msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
l2cap_send_cmd(conn, conn->info_ident,
@@ -706,13 +765,13 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
/* ---- L2CAP connections ---- */
static void l2cap_conn_start(struct l2cap_conn *conn)
{
- struct l2cap_chan *chan, *tmp;
+ struct l2cap_chan *chan;
BT_DBG("conn %p", conn);
- read_lock(&conn->chan_lock);
+ rcu_read_lock();
- list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
+ list_for_each_entry_rcu(chan, &conn->chan_l, list) {
struct sock *sk = chan->sk;
bh_lock_sock(sk);
@@ -725,7 +784,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
if (chan->state == BT_CONNECT) {
struct l2cap_conn_req req;
- if (!l2cap_check_security(chan) ||
+ if (!l2cap_chan_check_security(chan) ||
!__l2cap_no_conn_pending(chan)) {
bh_unlock_sock(sk);
continue;
@@ -736,9 +795,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
&chan->conf_state)) {
/* l2cap_chan_close() calls list_del(chan)
* so release the lock */
- read_unlock(&conn->chan_lock);
l2cap_chan_close(chan, ECONNRESET);
- read_lock(&conn->chan_lock);
bh_unlock_sock(sk);
continue;
}
@@ -758,7 +815,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
rsp.scid = cpu_to_le16(chan->dcid);
rsp.dcid = cpu_to_le16(chan->scid);
- if (l2cap_check_security(chan)) {
+ if (l2cap_chan_check_security(chan)) {
if (bt_sk(sk)->defer_setup) {
struct sock *parent = bt_sk(sk)->parent;
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
@@ -794,7 +851,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
bh_unlock_sock(sk);
}
- read_unlock(&conn->chan_lock);
+ rcu_read_unlock();
}
/* Find socket with cid and source bdaddr.
@@ -845,7 +902,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
parent = pchan->sk;
- bh_lock_sock(parent);
+ lock_sock(parent);
/* Check for backlog size */
if (sk_acceptq_is_full(parent)) {
@@ -859,8 +916,6 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
sk = chan->sk;
- write_lock_bh(&conn->chan_lock);
-
hci_conn_hold(conn->hcon);
bacpy(&bt_sk(sk)->src, conn->src);
@@ -868,17 +923,15 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
bt_accept_enqueue(parent, sk);
- __l2cap_chan_add(conn, chan);
+ l2cap_chan_add(conn, chan);
__set_chan_timer(chan, sk->sk_sndtimeo);
l2cap_state_change(chan, BT_CONNECTED);
parent->sk_data_ready(parent, 0);
- write_unlock_bh(&conn->chan_lock);
-
clean:
- bh_unlock_sock(parent);
+ release_sock(parent);
}
static void l2cap_chan_ready(struct sock *sk)
@@ -910,9 +963,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
if (conn->hcon->out && conn->hcon->type == LE_LINK)
smp_conn_security(conn, conn->hcon->pending_sec_level);
- read_lock(&conn->chan_lock);
+ rcu_read_lock();
- list_for_each_entry(chan, &conn->chan_l, list) {
+ list_for_each_entry_rcu(chan, &conn->chan_l, list) {
struct sock *sk = chan->sk;
bh_lock_sock(sk);
@@ -932,7 +985,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
bh_unlock_sock(sk);
}
- read_unlock(&conn->chan_lock);
+ rcu_read_unlock();
}
/* Notify sockets that we cannot guaranty reliability anymore */
@@ -942,21 +995,22 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
BT_DBG("conn %p", conn);
- read_lock(&conn->chan_lock);
+ rcu_read_lock();
- list_for_each_entry(chan, &conn->chan_l, list) {
+ list_for_each_entry_rcu(chan, &conn->chan_l, list) {
struct sock *sk = chan->sk;
- if (chan->force_reliable)
+ if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
sk->sk_err = err;
}
- read_unlock(&conn->chan_lock);
+ rcu_read_unlock();
}
-static void l2cap_info_timeout(unsigned long arg)
+static void l2cap_info_timeout(struct work_struct *work)
{
- struct l2cap_conn *conn = (void *) arg;
+ struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
+ info_work.work);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
@@ -980,14 +1034,16 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
/* Kill channels */
list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
sk = chan->sk;
- bh_lock_sock(sk);
+ lock_sock(sk);
l2cap_chan_del(chan, err);
- bh_unlock_sock(sk);
+ release_sock(sk);
chan->ops->close(chan->data);
}
+ hci_chan_del(conn->hchan);
+
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
- del_timer_sync(&conn->info_timer);
+ cancel_delayed_work_sync(&conn->info_work);
if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
del_timer(&conn->security_timer);
@@ -1008,18 +1064,26 @@ static void security_timeout(unsigned long arg)
static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
{
struct l2cap_conn *conn = hcon->l2cap_data;
+ struct hci_chan *hchan;
if (conn || status)
return conn;
+ hchan = hci_chan_create(hcon);
+ if (!hchan)
+ return NULL;
+
conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
- if (!conn)
+ if (!conn) {
+ hci_chan_del(hchan);
return NULL;
+ }
hcon->l2cap_data = conn;
conn->hcon = hcon;
+ conn->hchan = hchan;
- BT_DBG("hcon %p conn %p", hcon, conn);
+ BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
conn->mtu = hcon->hdev->le_mtu;
@@ -1032,7 +1096,6 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
conn->feat_mask = 0;
spin_lock_init(&conn->lock);
- rwlock_init(&conn->chan_lock);
INIT_LIST_HEAD(&conn->chan_l);
@@ -1040,21 +1103,13 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
setup_timer(&conn->security_timer, security_timeout,
(unsigned long) conn);
else
- setup_timer(&conn->info_timer, l2cap_info_timeout,
- (unsigned long) conn);
+ INIT_DELAYED_WORK(&conn->info_work, l2cap_info_timeout);
- conn->disc_reason = 0x13;
+ conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
return conn;
}
-static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
-{
- write_lock_bh(&conn->chan_lock);
- __l2cap_chan_add(conn, chan);
- write_unlock_bh(&conn->chan_lock);
-}
-
/* ---- Socket interface ---- */
/* Find socket with psm and source bdaddr.
@@ -1090,11 +1145,10 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
return c1;
}
-int l2cap_chan_connect(struct l2cap_chan *chan)
+inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
{
struct sock *sk = chan->sk;
bdaddr_t *src = &bt_sk(sk)->src;
- bdaddr_t *dst = &bt_sk(sk)->dst;
struct l2cap_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
@@ -1108,7 +1162,62 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
if (!hdev)
return -EHOSTUNREACH;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
+
+ lock_sock(sk);
+
+ /* PSM must be odd and lsb of upper byte must be 0 */
+ if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
+ chan->chan_type != L2CAP_CHAN_RAW) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ switch (chan->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (!disable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -ENOTSUPP;
+ goto done;
+ }
+
+ switch (sk->sk_state) {
+ case BT_CONNECT:
+ case BT_CONNECT2:
+ case BT_CONFIG:
+ /* Already connecting */
+ err = 0;
+ goto done;
+
+ case BT_CONNECTED:
+ /* Already connected */
+ err = -EISCONN;
+ goto done;
+
+ case BT_OPEN:
+ case BT_BOUND:
+ /* Can connect */
+ break;
+
+ default:
+ err = -EBADFD;
+ goto done;
+ }
+
+ /* Set destination address and psm */
+ bacpy(&bt_sk(sk)->dst, src);
+ chan->psm = psm;
+ chan->dcid = cid;
auth_type = l2cap_get_auth_type(chan);
@@ -1142,7 +1251,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
if (hcon->state == BT_CONNECTED) {
if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
__clear_chan_timer(chan);
- if (l2cap_check_security(chan))
+ if (l2cap_chan_check_security(chan))
l2cap_state_change(chan, BT_CONNECTED);
} else
l2cap_do_start(chan);
@@ -1151,7 +1260,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
err = 0;
done:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
@@ -1188,17 +1297,18 @@ int __l2cap_wait_ack(struct sock *sk)
return err;
}
-static void l2cap_monitor_timeout(unsigned long arg)
+static void l2cap_monitor_timeout(struct work_struct *work)
{
- struct l2cap_chan *chan = (void *) arg;
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ monitor_timer.work);
struct sock *sk = chan->sk;
BT_DBG("chan %p", chan);
- bh_lock_sock(sk);
+ lock_sock(sk);
if (chan->retry_count >= chan->remote_max_tx) {
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
- bh_unlock_sock(sk);
+ release_sock(sk);
return;
}
@@ -1206,24 +1316,25 @@ static void l2cap_monitor_timeout(unsigned long arg)
__set_monitor_timer(chan);
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
- bh_unlock_sock(sk);
+ release_sock(sk);
}
-static void l2cap_retrans_timeout(unsigned long arg)
+static void l2cap_retrans_timeout(struct work_struct *work)
{
- struct l2cap_chan *chan = (void *) arg;
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ retrans_timer.work);
struct sock *sk = chan->sk;
BT_DBG("chan %p", chan);
- bh_lock_sock(sk);
+ lock_sock(sk);
chan->retry_count = 1;
__set_monitor_timer(chan);
set_bit(CONN_WAIT_F, &chan->conn_state);
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
- bh_unlock_sock(sk);
+ release_sock(sk);
}
static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
@@ -1245,60 +1356,46 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
__clear_retrans_timer(chan);
}
-static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
-{
- struct hci_conn *hcon = chan->conn->hcon;
- u16 flags;
-
- BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
-
- if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
- flags = ACL_START_NO_FLUSH;
- else
- flags = ACL_START;
-
- bt_cb(skb)->force_active = chan->force_active;
- hci_send_acl(hcon, skb, flags);
-}
-
static void l2cap_streaming_send(struct l2cap_chan *chan)
{
struct sk_buff *skb;
- u16 control, fcs;
+ u32 control;
+ u16 fcs;
while ((skb = skb_dequeue(&chan->tx_q))) {
- control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
- control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
- put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
+ control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
+ control |= __set_txseq(chan, chan->next_tx_seq);
+ __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
- put_unaligned_le16(fcs, skb->data + skb->len - 2);
+ fcs = crc16(0, (u8 *)skb->data,
+ skb->len - L2CAP_FCS_SIZE);
+ put_unaligned_le16(fcs,
+ skb->data + skb->len - L2CAP_FCS_SIZE);
}
l2cap_do_send(chan, skb);
- chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
+ chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
}
}
-static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
{
struct sk_buff *skb, *tx_skb;
- u16 control, fcs;
+ u16 fcs;
+ u32 control;
skb = skb_peek(&chan->tx_q);
if (!skb)
return;
- do {
- if (bt_cb(skb)->tx_seq == tx_seq)
- break;
-
+ while (bt_cb(skb)->tx_seq != tx_seq) {
if (skb_queue_is_last(&chan->tx_q, skb))
return;
- } while ((skb = skb_queue_next(&chan->tx_q, skb)));
+ skb = skb_queue_next(&chan->tx_q, skb);
+ }
if (chan->remote_max_tx &&
bt_cb(skb)->retries == chan->remote_max_tx) {
@@ -1308,20 +1405,23 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
tx_skb = skb_clone(skb, GFP_ATOMIC);
bt_cb(skb)->retries++;
- control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
- control &= L2CAP_CTRL_SAR;
+
+ control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
+ control &= __get_sar_mask(chan);
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= L2CAP_CTRL_FINAL;
+ control |= __set_ctrl_final(chan);
- control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
- | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
+ control |= __set_reqseq(chan, chan->buffer_seq);
+ control |= __set_txseq(chan, tx_seq);
- put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+ __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
- put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
+ fcs = crc16(0, (u8 *)tx_skb->data,
+ tx_skb->len - L2CAP_FCS_SIZE);
+ put_unaligned_le16(fcs,
+ tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
}
l2cap_do_send(chan, tx_skb);
@@ -1330,7 +1430,8 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
static int l2cap_ertm_send(struct l2cap_chan *chan)
{
struct sk_buff *skb, *tx_skb;
- u16 control, fcs;
+ u16 fcs;
+ u32 control;
int nsent = 0;
if (chan->state != BT_CONNECTED)
@@ -1348,20 +1449,22 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
bt_cb(skb)->retries++;
- control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
- control &= L2CAP_CTRL_SAR;
+ control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
+ control &= __get_sar_mask(chan);
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= L2CAP_CTRL_FINAL;
+ control |= __set_ctrl_final(chan);
- control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
- | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
- put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+ control |= __set_reqseq(chan, chan->buffer_seq);
+ control |= __set_txseq(chan, chan->next_tx_seq);
+ __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
- put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
+ fcs = crc16(0, (u8 *)skb->data,
+ tx_skb->len - L2CAP_FCS_SIZE);
+ put_unaligned_le16(fcs, skb->data +
+ tx_skb->len - L2CAP_FCS_SIZE);
}
l2cap_do_send(chan, tx_skb);
@@ -1369,7 +1472,8 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
__set_retrans_timer(chan);
bt_cb(skb)->tx_seq = chan->next_tx_seq;
- chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
+
+ chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
if (bt_cb(skb)->retries == 1)
chan->unacked_frames++;
@@ -1401,12 +1505,12 @@ static int l2cap_retransmit_frames(struct l2cap_chan *chan)
static void l2cap_send_ack(struct l2cap_chan *chan)
{
- u16 control = 0;
+ u32 control = 0;
- control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control |= __set_reqseq(chan, chan->buffer_seq);
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= L2CAP_SUPER_RCV_NOT_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
set_bit(CONN_RNR_SENT, &chan->conn_state);
l2cap_send_sframe(chan, control);
return;
@@ -1415,20 +1519,20 @@ static void l2cap_send_ack(struct l2cap_chan *chan)
if (l2cap_ertm_send(chan) > 0)
return;
- control |= L2CAP_SUPER_RCV_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
l2cap_send_sframe(chan, control);
}
static void l2cap_send_srejtail(struct l2cap_chan *chan)
{
struct srej_list *tail;
- u16 control;
+ u32 control;
- control = L2CAP_SUPER_SELECT_REJECT;
- control |= L2CAP_CTRL_FINAL;
+ control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
+ control |= __set_ctrl_final(chan);
tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
- control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control |= __set_reqseq(chan, tail->tx_seq);
l2cap_send_sframe(chan, control);
}
@@ -1456,6 +1560,8 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
return -EFAULT;
+ (*frag)->priority = skb->priority;
+
sent += count;
len -= count;
@@ -1465,15 +1571,17 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
return sent;
}
-static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
+ struct msghdr *msg, size_t len,
+ u32 priority)
{
struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE + 2;
+ int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
struct l2cap_hdr *lh;
- BT_DBG("sk %p len %d", sk, (int)len);
+ BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
count = min_t(unsigned int, (conn->mtu - hlen), len);
skb = bt_skb_send_alloc(sk, count + hlen,
@@ -1481,6 +1589,8 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct
if (!skb)
return ERR_PTR(err);
+ skb->priority = priority;
+
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
@@ -1495,7 +1605,9 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct
return skb;
}
-static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
+ struct msghdr *msg, size_t len,
+ u32 priority)
{
struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
@@ -1511,6 +1623,8 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct ms
if (!skb)
return ERR_PTR(err);
+ skb->priority = priority;
+
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
@@ -1526,12 +1640,12 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct ms
static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
struct msghdr *msg, size_t len,
- u16 control, u16 sdulen)
+ u32 control, u16 sdulen)
{
struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE + 2;
+ int err, count, hlen;
struct l2cap_hdr *lh;
BT_DBG("sk %p len %d", sk, (int)len);
@@ -1539,11 +1653,16 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
if (!conn)
return ERR_PTR(-ENOTCONN);
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ hlen = L2CAP_EXT_HDR_SIZE;
+ else
+ hlen = L2CAP_ENH_HDR_SIZE;
+
if (sdulen)
- hlen += 2;
+ hlen += L2CAP_SDULEN_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16)
- hlen += 2;
+ hlen += L2CAP_FCS_SIZE;
count = min_t(unsigned int, (conn->mtu - hlen), len);
skb = bt_skb_send_alloc(sk, count + hlen,
@@ -1555,9 +1674,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- put_unaligned_le16(control, skb_put(skb, 2));
+
+ __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
+
if (sdulen)
- put_unaligned_le16(sdulen, skb_put(skb, 2));
+ put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
if (unlikely(err < 0)) {
@@ -1566,7 +1687,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
}
if (chan->fcs == L2CAP_FCS_CRC16)
- put_unaligned_le16(0, skb_put(skb, 2));
+ put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
bt_cb(skb)->retries = 0;
return skb;
@@ -1576,11 +1697,11 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si
{
struct sk_buff *skb;
struct sk_buff_head sar_queue;
- u16 control;
+ u32 control;
size_t size = 0;
skb_queue_head_init(&sar_queue);
- control = L2CAP_SDU_START;
+ control = __set_ctrl_sar(chan, L2CAP_SAR_START);
skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1593,10 +1714,10 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si
size_t buflen;
if (len > chan->remote_mps) {
- control = L2CAP_SDU_CONTINUE;
+ control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
buflen = chan->remote_mps;
} else {
- control = L2CAP_SDU_END;
+ control = __set_ctrl_sar(chan, L2CAP_SAR_END);
buflen = len;
}
@@ -1617,15 +1738,16 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si
return size;
}
-int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
+ u32 priority)
{
struct sk_buff *skb;
- u16 control;
+ u32 control;
int err;
/* Connectionless channel */
if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
- skb = l2cap_create_connless_pdu(chan, msg, len);
+ skb = l2cap_create_connless_pdu(chan, msg, len, priority);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1640,7 +1762,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
return -EMSGSIZE;
/* Create a basic PDU */
- skb = l2cap_create_basic_pdu(chan, msg, len);
+ skb = l2cap_create_basic_pdu(chan, msg, len, priority);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1652,7 +1774,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
case L2CAP_MODE_STREAMING:
/* Entire SDU fits into one PDU */
if (len <= chan->remote_mps) {
- control = L2CAP_SDU_UNSEGMENTED;
+ control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
skb = l2cap_create_iframe_pdu(chan, msg, len, control,
0);
if (IS_ERR(skb))
@@ -1704,8 +1826,9 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
BT_DBG("conn %p", conn);
- read_lock(&conn->chan_lock);
- list_for_each_entry(chan, &conn->chan_l, list) {
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(chan, &conn->chan_l, list) {
struct sock *sk = chan->sk;
if (chan->chan_type != L2CAP_CHAN_RAW)
continue;
@@ -1720,7 +1843,8 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
if (chan->ops->recv(chan->data, nskb))
kfree_skb(nskb);
}
- read_unlock(&conn->chan_lock);
+
+ rcu_read_unlock();
}
/* ---- L2CAP signalling commands ---- */
@@ -1850,37 +1974,62 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
*ptr += L2CAP_CONF_OPT_SIZE + len;
}
-static void l2cap_ack_timeout(unsigned long arg)
+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
{
- struct l2cap_chan *chan = (void *) arg;
+ struct l2cap_conf_efs efs;
+
+ switch (chan->mode) {
+ case L2CAP_MODE_ERTM:
+ efs.id = chan->local_id;
+ efs.stype = chan->local_stype;
+ efs.msdu = cpu_to_le16(chan->local_msdu);
+ efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
+ efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
+ efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
+ break;
- bh_lock_sock(chan->sk);
+ case L2CAP_MODE_STREAMING:
+ efs.id = 1;
+ efs.stype = L2CAP_SERV_BESTEFFORT;
+ efs.msdu = cpu_to_le16(chan->local_msdu);
+ efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
+ efs.acc_lat = 0;
+ efs.flush_to = 0;
+ break;
+
+ default:
+ return;
+ }
+
+ l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
+ (unsigned long) &efs);
+}
+
+static void l2cap_ack_timeout(struct work_struct *work)
+{
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ ack_timer.work);
+
+ lock_sock(chan->sk);
l2cap_send_ack(chan);
- bh_unlock_sock(chan->sk);
+ release_sock(chan->sk);
}
static inline void l2cap_ertm_init(struct l2cap_chan *chan)
{
- struct sock *sk = chan->sk;
-
chan->expected_ack_seq = 0;
chan->unacked_frames = 0;
chan->buffer_seq = 0;
chan->num_acked = 0;
chan->frames_sent = 0;
- setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
- (unsigned long) chan);
- setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
- (unsigned long) chan);
- setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
+ INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
+ INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
+ INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
skb_queue_head_init(&chan->srej_q);
INIT_LIST_HEAD(&chan->srej_l);
-
-
- sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
}
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -1896,11 +2045,36 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
}
}
+static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
+{
+ return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
+}
+
+static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
+{
+ return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
+}
+
+static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
+{
+ if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
+ __l2cap_ews_supported(chan)) {
+ /* use extended control field */
+ set_bit(FLAG_EXT_CTRL, &chan->flags);
+ chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+ } else {
+ chan->tx_win = min_t(u16, chan->tx_win,
+ L2CAP_DEFAULT_TX_WINDOW);
+ chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+ }
+}
+
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
{
struct l2cap_conf_req *req = data;
struct l2cap_conf_rfc rfc = { .mode = chan->mode };
void *ptr = req->data;
+ u16 size;
BT_DBG("chan %p", chan);
@@ -1913,6 +2087,9 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
break;
+ if (__l2cap_efs_supported(chan))
+ set_bit(FLAG_EFS_ENABLE, &chan->flags);
+
/* fall through */
default:
chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
@@ -1942,17 +2119,27 @@ done:
case L2CAP_MODE_ERTM:
rfc.mode = L2CAP_MODE_ERTM;
- rfc.txwin_size = chan->tx_win;
rfc.max_transmit = chan->max_tx;
rfc.retrans_timeout = 0;
rfc.monitor_timeout = 0;
- rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
- if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+
+ size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+
+ l2cap_txwin_setup(chan);
+
+ rfc.txwin_size = min_t(u16, chan->tx_win,
+ L2CAP_DEFAULT_TX_WINDOW);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
(unsigned long) &rfc);
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+ l2cap_add_opt_efs(&ptr, chan);
+
if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
break;
@@ -1961,6 +2148,10 @@ done:
chan->fcs = L2CAP_FCS_NONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
}
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+ chan->tx_win);
break;
case L2CAP_MODE_STREAMING:
@@ -1969,13 +2160,19 @@ done:
rfc.max_transmit = 0;
rfc.retrans_timeout = 0;
rfc.monitor_timeout = 0;
- rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
- if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+
+ size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
(unsigned long) &rfc);
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+ l2cap_add_opt_efs(&ptr, chan);
+
if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
break;
@@ -2002,8 +2199,11 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
int type, hint, olen;
unsigned long val;
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+ struct l2cap_conf_efs efs;
+ u8 remote_efs = 0;
u16 mtu = L2CAP_DEFAULT_MTU;
u16 result = L2CAP_CONF_SUCCESS;
+ u16 size;
BT_DBG("chan %p", chan);
@@ -2033,7 +2233,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
case L2CAP_CONF_FCS:
if (val == L2CAP_FCS_NONE)
set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
+ break;
+
+ case L2CAP_CONF_EFS:
+ remote_efs = 1;
+ if (olen == sizeof(efs))
+ memcpy(&efs, (void *) val, olen);
+ break;
+
+ case L2CAP_CONF_EWS:
+ if (!enable_hs)
+ return -ECONNREFUSED;
+ set_bit(FLAG_EXT_CTRL, &chan->flags);
+ set_bit(CONF_EWS_RECV, &chan->conf_state);
+ chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+ chan->remote_tx_win = val;
break;
default:
@@ -2058,6 +2273,13 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
break;
}
+ if (remote_efs) {
+ if (__l2cap_efs_supported(chan))
+ set_bit(FLAG_EFS_ENABLE, &chan->flags);
+ else
+ return -ECONNREFUSED;
+ }
+
if (chan->mode != rfc.mode)
return -ECONNREFUSED;
@@ -2076,7 +2298,6 @@ done:
sizeof(rfc), (unsigned long) &rfc);
}
-
if (result == L2CAP_CONF_SUCCESS) {
/* Configure output options and let the other side know
* which ones we don't like. */
@@ -2089,6 +2310,26 @@ done:
}
l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
+ if (remote_efs) {
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != chan->local_stype) {
+
+ result = L2CAP_CONF_UNACCEPT;
+
+ if (chan->num_conf_req >= 1)
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs),
+ (unsigned long) &efs);
+ } else {
+ /* Send PENDING Conf Rsp */
+ result = L2CAP_CONF_PENDING;
+ set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+ }
+ }
+
switch (rfc.mode) {
case L2CAP_MODE_BASIC:
chan->fcs = L2CAP_FCS_NONE;
@@ -2096,13 +2337,20 @@ done:
break;
case L2CAP_MODE_ERTM:
- chan->remote_tx_win = rfc.txwin_size;
- chan->remote_max_tx = rfc.max_transmit;
+ if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
+ chan->remote_tx_win = rfc.txwin_size;
+ else
+ rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
- if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+ chan->remote_max_tx = rfc.max_transmit;
- chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+ size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
+ chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+ chan->remote_mps = size;
rfc.retrans_timeout =
le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
@@ -2114,13 +2362,29 @@ done:
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ chan->remote_id = efs.id;
+ chan->remote_stype = efs.stype;
+ chan->remote_msdu = le16_to_cpu(efs.msdu);
+ chan->remote_flush_to =
+ le32_to_cpu(efs.flush_to);
+ chan->remote_acc_lat =
+ le32_to_cpu(efs.acc_lat);
+ chan->remote_sdu_itime =
+ le32_to_cpu(efs.sdu_itime);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs), (unsigned long) &efs);
+ }
break;
case L2CAP_MODE_STREAMING:
- if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
-
- chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+ size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
+ chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+ chan->remote_mps = size;
set_bit(CONF_MODE_DONE, &chan->conf_state);
@@ -2153,6 +2417,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
int type, olen;
unsigned long val;
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+ struct l2cap_conf_efs efs;
BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
@@ -2188,6 +2453,26 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
sizeof(rfc), (unsigned long) &rfc);
break;
+
+ case L2CAP_CONF_EWS:
+ chan->tx_win = min_t(u16, val,
+ L2CAP_DEFAULT_EXT_WINDOW);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+ chan->tx_win);
+ break;
+
+ case L2CAP_CONF_EFS:
+ if (olen == sizeof(efs))
+ memcpy(&efs, (void *)val, olen);
+
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != chan->local_stype)
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs), (unsigned long) &efs);
+ break;
}
}
@@ -2196,13 +2481,23 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
chan->mode = rfc.mode;
- if (*result == L2CAP_CONF_SUCCESS) {
+ if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
chan->mps = le16_to_cpu(rfc.max_pdu_size);
+
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ chan->local_msdu = le16_to_cpu(efs.msdu);
+ chan->local_sdu_itime =
+ le32_to_cpu(efs.sdu_itime);
+ chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
+ chan->local_flush_to =
+ le32_to_cpu(efs.flush_to);
+ }
break;
+
case L2CAP_MODE_STREAMING:
chan->mps = le16_to_cpu(rfc.max_pdu_size);
}
@@ -2302,7 +2597,7 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd
if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
cmd->ident == conn->info_ident) {
- del_timer(&conn->info_timer);
+ cancel_delayed_work_sync(&conn->info_work);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
@@ -2335,12 +2630,12 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
parent = pchan->sk;
- bh_lock_sock(parent);
+ lock_sock(parent);
/* Check if the ACL is secure enough (if not SDP) */
if (psm != cpu_to_le16(0x0001) &&
!hci_conn_check_link_mode(conn->hcon)) {
- conn->disc_reason = 0x05;
+ conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
result = L2CAP_CR_SEC_BLOCK;
goto response;
}
@@ -2359,11 +2654,8 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
sk = chan->sk;
- write_lock_bh(&conn->chan_lock);
-
/* Check if we already have channel with that dcid */
if (__l2cap_get_chan_by_dcid(conn, scid)) {
- write_unlock_bh(&conn->chan_lock);
sock_set_flag(sk, SOCK_ZAPPED);
chan->ops->close(chan->data);
goto response;
@@ -2378,7 +2670,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
bt_accept_enqueue(parent, sk);
- __l2cap_chan_add(conn, chan);
+ l2cap_chan_add(conn, chan);
dcid = chan->scid;
@@ -2387,7 +2679,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
chan->ident = cmd->ident;
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
- if (l2cap_check_security(chan)) {
+ if (l2cap_chan_check_security(chan)) {
if (bt_sk(sk)->defer_setup) {
l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
@@ -2409,10 +2701,8 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
status = L2CAP_CS_NO_INFO;
}
- write_unlock_bh(&conn->chan_lock);
-
response:
- bh_unlock_sock(parent);
+ release_sock(parent);
sendresp:
rsp.scid = cpu_to_le16(scid);
@@ -2428,7 +2718,7 @@ sendresp:
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
conn->info_ident = l2cap_get_ident(conn);
- mod_timer(&conn->info_timer, jiffies +
+ schedule_delayed_work(&conn->info_work,
msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
l2cap_send_cmd(conn, conn->info_ident,
@@ -2494,19 +2784,11 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
break;
default:
- /* don't delete l2cap channel if sk is owned by user */
- if (sock_owned_by_user(sk)) {
- l2cap_state_change(chan, BT_DISCONN);
- __clear_chan_timer(chan);
- __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
- break;
- }
-
l2cap_chan_del(chan, ECONNREFUSED);
break;
}
- bh_unlock_sock(sk);
+ release_sock(sk);
return 0;
}
@@ -2612,8 +2894,23 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
chan->num_conf_req++;
}
+ /* Got Conf Rsp PENDING from remote side and asume we sent
+ Conf Rsp PENDING in the code above */
+ if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
+ test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
+
+ /* check compatibility */
+
+ clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+ set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+ l2cap_build_conf_rsp(chan, rsp,
+ L2CAP_CONF_SUCCESS, 0x0000), rsp);
+ }
+
unlock:
- bh_unlock_sock(sk);
+ release_sock(sk);
return 0;
}
@@ -2641,8 +2938,33 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
switch (result) {
case L2CAP_CONF_SUCCESS:
l2cap_conf_rfc_get(chan, rsp->data, len);
+ clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
break;
+ case L2CAP_CONF_PENDING:
+ set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
+
+ if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
+ char buf[64];
+
+ len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+ buf, &result);
+ if (len < 0) {
+ l2cap_send_disconn_req(conn, chan, ECONNRESET);
+ goto done;
+ }
+
+ /* check compatibility */
+
+ clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+ set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+ l2cap_build_conf_rsp(chan, buf,
+ L2CAP_CONF_SUCCESS, 0x0000), buf);
+ }
+ goto done;
+
case L2CAP_CONF_UNACCEPT:
if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
char req[64];
@@ -2695,7 +3017,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
}
done:
- bh_unlock_sock(sk);
+ release_sock(sk);
return 0;
}
@@ -2724,17 +3046,8 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
sk->sk_shutdown = SHUTDOWN_MASK;
- /* don't delete l2cap channel if sk is owned by user */
- if (sock_owned_by_user(sk)) {
- l2cap_state_change(chan, BT_DISCONN);
- __clear_chan_timer(chan);
- __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
- bh_unlock_sock(sk);
- return 0;
- }
-
l2cap_chan_del(chan, ECONNRESET);
- bh_unlock_sock(sk);
+ release_sock(sk);
chan->ops->close(chan->data);
return 0;
@@ -2758,17 +3071,8 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
sk = chan->sk;
- /* don't delete l2cap channel if sk is owned by user */
- if (sock_owned_by_user(sk)) {
- l2cap_state_change(chan,BT_DISCONN);
- __clear_chan_timer(chan);
- __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
- bh_unlock_sock(sk);
- return 0;
- }
-
l2cap_chan_del(chan, 0);
- bh_unlock_sock(sk);
+ release_sock(sk);
chan->ops->close(chan->data);
return 0;
@@ -2792,15 +3096,25 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
if (!disable_ertm)
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
| L2CAP_FEAT_FCS;
+ if (enable_hs)
+ feat_mask |= L2CAP_FEAT_EXT_FLOW
+ | L2CAP_FEAT_EXT_WINDOW;
+
put_unaligned_le32(feat_mask, rsp->data);
l2cap_send_cmd(conn, cmd->ident,
L2CAP_INFO_RSP, sizeof(buf), buf);
} else if (type == L2CAP_IT_FIXED_CHAN) {
u8 buf[12];
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
+
+ if (enable_hs)
+ l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
+ else
+ l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
+
rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
- memcpy(buf + 4, l2cap_fixed_chan, 8);
+ memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
l2cap_send_cmd(conn, cmd->ident,
L2CAP_INFO_RSP, sizeof(buf), buf);
} else {
@@ -2829,7 +3143,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
return 0;
- del_timer(&conn->info_timer);
+ cancel_delayed_work_sync(&conn->info_work);
if (result != L2CAP_IR_SUCCESS) {
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
@@ -2867,6 +3181,165 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
return 0;
}
+static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ void *data)
+{
+ struct l2cap_create_chan_req *req = data;
+ struct l2cap_create_chan_rsp rsp;
+ u16 psm, scid;
+
+ if (cmd_len != sizeof(*req))
+ return -EPROTO;
+
+ if (!enable_hs)
+ return -EINVAL;
+
+ psm = le16_to_cpu(req->psm);
+ scid = le16_to_cpu(req->scid);
+
+ BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
+
+ /* Placeholder: Always reject */
+ rsp.dcid = 0;
+ rsp.scid = cpu_to_le16(scid);
+ rsp.result = L2CAP_CR_NO_MEM;
+ rsp.status = L2CAP_CS_NO_INFO;
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
+ sizeof(rsp), &rsp);
+
+ return 0;
+}
+
+static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, void *data)
+{
+ BT_DBG("conn %p", conn);
+
+ return l2cap_connect_rsp(conn, cmd, data);
+}
+
+static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
+ u16 icid, u16 result)
+{
+ struct l2cap_move_chan_rsp rsp;
+
+ BT_DBG("icid %d, result %d", icid, result);
+
+ rsp.icid = cpu_to_le16(icid);
+ rsp.result = cpu_to_le16(result);
+
+ l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
+}
+
+static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
+ struct l2cap_chan *chan, u16 icid, u16 result)
+{
+ struct l2cap_move_chan_cfm cfm;
+ u8 ident;
+
+ BT_DBG("icid %d, result %d", icid, result);
+
+ ident = l2cap_get_ident(conn);
+ if (chan)
+ chan->ident = ident;
+
+ cfm.icid = cpu_to_le16(icid);
+ cfm.result = cpu_to_le16(result);
+
+ l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
+}
+
+static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
+ u16 icid)
+{
+ struct l2cap_move_chan_cfm_rsp rsp;
+
+ BT_DBG("icid %d", icid);
+
+ rsp.icid = cpu_to_le16(icid);
+ l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
+}
+
+static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_req *req = data;
+ u16 icid = 0;
+ u16 result = L2CAP_MR_NOT_ALLOWED;
+
+ if (cmd_len != sizeof(*req))
+ return -EPROTO;
+
+ icid = le16_to_cpu(req->icid);
+
+ BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
+
+ if (!enable_hs)
+ return -EINVAL;
+
+ /* Placeholder: Always refuse */
+ l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
+
+ return 0;
+}
+
+static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_rsp *rsp = data;
+ u16 icid, result;
+
+ if (cmd_len != sizeof(*rsp))
+ return -EPROTO;
+
+ icid = le16_to_cpu(rsp->icid);
+ result = le16_to_cpu(rsp->result);
+
+ BT_DBG("icid %d, result %d", icid, result);
+
+ /* Placeholder: Always unconfirmed */
+ l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
+
+ return 0;
+}
+
+static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_cfm *cfm = data;
+ u16 icid, result;
+
+ if (cmd_len != sizeof(*cfm))
+ return -EPROTO;
+
+ icid = le16_to_cpu(cfm->icid);
+ result = le16_to_cpu(cfm->result);
+
+ BT_DBG("icid %d, result %d", icid, result);
+
+ l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
+
+ return 0;
+}
+
+static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_cfm_rsp *rsp = data;
+ u16 icid;
+
+ if (cmd_len != sizeof(*rsp))
+ return -EPROTO;
+
+ icid = le16_to_cpu(rsp->icid);
+
+ BT_DBG("icid %d", icid);
+
+ return 0;
+}
+
static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
u16 to_multiplier)
{
@@ -2979,6 +3452,30 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
err = l2cap_information_rsp(conn, cmd, data);
break;
+ case L2CAP_CREATE_CHAN_REQ:
+ err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CREATE_CHAN_RSP:
+ err = l2cap_create_channel_rsp(conn, cmd, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_REQ:
+ err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_RSP:
+ err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_CFM:
+ err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_CFM_RSP:
+ err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
+ break;
+
default:
BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
err = -EINVAL;
@@ -3057,10 +3554,15 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
{
u16 our_fcs, rcv_fcs;
- int hdr_size = L2CAP_HDR_SIZE + 2;
+ int hdr_size;
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ hdr_size = L2CAP_EXT_HDR_SIZE;
+ else
+ hdr_size = L2CAP_ENH_HDR_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16) {
- skb_trim(skb, skb->len - 2);
+ skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
rcv_fcs = get_unaligned_le16(skb->data + skb->len);
our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
@@ -3072,14 +3574,14 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
{
- u16 control = 0;
+ u32 control = 0;
chan->frames_sent = 0;
- control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control |= __set_reqseq(chan, chan->buffer_seq);
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= L2CAP_SUPER_RCV_NOT_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
l2cap_send_sframe(chan, control);
set_bit(CONN_RNR_SENT, &chan->conn_state);
}
@@ -3091,12 +3593,12 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
chan->frames_sent == 0) {
- control |= L2CAP_SUPER_RCV_READY;
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
l2cap_send_sframe(chan, control);
}
}
-static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
+static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
{
struct sk_buff *next_skb;
int tx_seq_offset, next_tx_seq_offset;
@@ -3105,23 +3607,15 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
bt_cb(skb)->sar = sar;
next_skb = skb_peek(&chan->srej_q);
- if (!next_skb) {
- __skb_queue_tail(&chan->srej_q, skb);
- return 0;
- }
- tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
- if (tx_seq_offset < 0)
- tx_seq_offset += 64;
+ tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
- do {
+ while (next_skb) {
if (bt_cb(next_skb)->tx_seq == tx_seq)
return -EINVAL;
- next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
- chan->buffer_seq) % 64;
- if (next_tx_seq_offset < 0)
- next_tx_seq_offset += 64;
+ next_tx_seq_offset = __seq_offset(chan,
+ bt_cb(next_skb)->tx_seq, chan->buffer_seq);
if (next_tx_seq_offset > tx_seq_offset) {
__skb_queue_before(&chan->srej_q, next_skb, skb);
@@ -3129,9 +3623,10 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
}
if (skb_queue_is_last(&chan->srej_q, next_skb))
- break;
-
- } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
+ next_skb = NULL;
+ else
+ next_skb = skb_queue_next(&chan->srej_q, next_skb);
+ }
__skb_queue_tail(&chan->srej_q, skb);
@@ -3157,24 +3652,24 @@ static void append_skb_frag(struct sk_buff *skb,
skb->truesize += new_frag->truesize;
}
-static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
+static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
{
int err = -EINVAL;
- switch (control & L2CAP_CTRL_SAR) {
- case L2CAP_SDU_UNSEGMENTED:
+ switch (__get_ctrl_sar(chan, control)) {
+ case L2CAP_SAR_UNSEGMENTED:
if (chan->sdu)
break;
err = chan->ops->recv(chan->data, skb);
break;
- case L2CAP_SDU_START:
+ case L2CAP_SAR_START:
if (chan->sdu)
break;
chan->sdu_len = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
+ skb_pull(skb, L2CAP_SDULEN_SIZE);
if (chan->sdu_len > chan->imtu) {
err = -EMSGSIZE;
@@ -3191,7 +3686,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1
err = 0;
break;
- case L2CAP_SDU_CONTINUE:
+ case L2CAP_SAR_CONTINUE:
if (!chan->sdu)
break;
@@ -3205,7 +3700,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1
err = 0;
break;
- case L2CAP_SDU_END:
+ case L2CAP_SAR_END:
if (!chan->sdu)
break;
@@ -3240,14 +3735,14 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1
static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
{
- u16 control;
+ u32 control;
BT_DBG("chan %p, Enter local busy", chan);
set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
- control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- control |= L2CAP_SUPER_RCV_NOT_READY;
+ control = __set_reqseq(chan, chan->buffer_seq);
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
l2cap_send_sframe(chan, control);
set_bit(CONN_RNR_SENT, &chan->conn_state);
@@ -3257,13 +3752,14 @@ static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
{
- u16 control;
+ u32 control;
if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
goto done;
- control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
+ control = __set_reqseq(chan, chan->buffer_seq);
+ control |= __set_ctrl_poll(chan);
+ control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
l2cap_send_sframe(chan, control);
chan->retry_count = 1;
@@ -3289,10 +3785,10 @@ void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
}
}
-static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
{
struct sk_buff *skb;
- u16 control;
+ u32 control;
while ((skb = skb_peek(&chan->srej_q)) &&
!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
@@ -3302,7 +3798,7 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
break;
skb = skb_dequeue(&chan->srej_q);
- control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
+ control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
err = l2cap_reassemble_sdu(chan, skb, control);
if (err < 0) {
@@ -3310,16 +3806,15 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
break;
}
- chan->buffer_seq_srej =
- (chan->buffer_seq_srej + 1) % 64;
- tx_seq = (tx_seq + 1) % 64;
+ chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
+ tx_seq = __next_seq(chan, tx_seq);
}
}
-static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
{
struct srej_list *l, *tmp;
- u16 control;
+ u32 control;
list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
if (l->tx_seq == tx_seq) {
@@ -3327,45 +3822,53 @@ static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
kfree(l);
return;
}
- control = L2CAP_SUPER_SELECT_REJECT;
- control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
+ control |= __set_reqseq(chan, l->tx_seq);
l2cap_send_sframe(chan, control);
list_del(&l->list);
list_add_tail(&l->list, &chan->srej_l);
}
}
-static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
+static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
{
struct srej_list *new;
- u16 control;
+ u32 control;
while (tx_seq != chan->expected_tx_seq) {
- control = L2CAP_SUPER_SELECT_REJECT;
- control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
+ control |= __set_reqseq(chan, chan->expected_tx_seq);
l2cap_send_sframe(chan, control);
new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
+ if (!new)
+ return -ENOMEM;
+
new->tx_seq = chan->expected_tx_seq;
- chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
+
+ chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
+
list_add_tail(&new->list, &chan->srej_l);
}
- chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
+
+ chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
+
+ return 0;
}
-static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
+static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
{
- u8 tx_seq = __get_txseq(rx_control);
- u8 req_seq = __get_reqseq(rx_control);
- u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
+ u16 tx_seq = __get_txseq(chan, rx_control);
+ u16 req_seq = __get_reqseq(chan, rx_control);
+ u8 sar = __get_ctrl_sar(chan, rx_control);
int tx_seq_offset, expected_tx_seq_offset;
int num_to_ack = (chan->tx_win/6) + 1;
int err = 0;
- BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
+ BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
tx_seq, rx_control);
- if (L2CAP_CTRL_FINAL & rx_control &&
+ if (__is_ctrl_final(chan, rx_control) &&
test_bit(CONN_WAIT_F, &chan->conn_state)) {
__clear_monitor_timer(chan);
if (chan->unacked_frames > 0)
@@ -3376,9 +3879,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
chan->expected_ack_seq = req_seq;
l2cap_drop_acked_frames(chan);
- tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
- if (tx_seq_offset < 0)
- tx_seq_offset += 64;
+ tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
/* invalid tx_seq */
if (tx_seq_offset >= chan->tx_win) {
@@ -3423,13 +3924,16 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
return 0;
}
}
- l2cap_send_srejframe(chan, tx_seq);
+
+ err = l2cap_send_srejframe(chan, tx_seq);
+ if (err < 0) {
+ l2cap_send_disconn_req(chan->conn, chan, -err);
+ return err;
+ }
}
} else {
- expected_tx_seq_offset =
- (chan->expected_tx_seq - chan->buffer_seq) % 64;
- if (expected_tx_seq_offset < 0)
- expected_tx_seq_offset += 64;
+ expected_tx_seq_offset = __seq_offset(chan,
+ chan->expected_tx_seq, chan->buffer_seq);
/* duplicated tx_seq */
if (tx_seq_offset < expected_tx_seq_offset)
@@ -3447,14 +3951,18 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
set_bit(CONN_SEND_PBIT, &chan->conn_state);
- l2cap_send_srejframe(chan, tx_seq);
+ err = l2cap_send_srejframe(chan, tx_seq);
+ if (err < 0) {
+ l2cap_send_disconn_req(chan->conn, chan, -err);
+ return err;
+ }
__clear_ack_timer(chan);
}
return 0;
expected:
- chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
+ chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
bt_cb(skb)->tx_seq = tx_seq;
@@ -3464,22 +3972,24 @@ expected:
}
err = l2cap_reassemble_sdu(chan, skb, rx_control);
- chan->buffer_seq = (chan->buffer_seq + 1) % 64;
+ chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
+
if (err < 0) {
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
return err;
}
- if (rx_control & L2CAP_CTRL_FINAL) {
+ if (__is_ctrl_final(chan, rx_control)) {
if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
l2cap_retransmit_frames(chan);
}
- __set_ack_timer(chan);
chan->num_acked = (chan->num_acked + 1) % num_to_ack;
if (chan->num_acked == num_to_ack - 1)
l2cap_send_ack(chan);
+ else
+ __set_ack_timer(chan);
return 0;
@@ -3488,15 +3998,15 @@ drop:
return 0;
}
-static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
{
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
- rx_control);
+ BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
+ __get_reqseq(chan, rx_control), rx_control);
- chan->expected_ack_seq = __get_reqseq(rx_control);
+ chan->expected_ack_seq = __get_reqseq(chan, rx_control);
l2cap_drop_acked_frames(chan);
- if (rx_control & L2CAP_CTRL_POLL) {
+ if (__is_ctrl_poll(chan, rx_control)) {
set_bit(CONN_SEND_FBIT, &chan->conn_state);
if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
@@ -3509,7 +4019,7 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co
l2cap_send_i_or_rr_or_rnr(chan);
}
- } else if (rx_control & L2CAP_CTRL_FINAL) {
+ } else if (__is_ctrl_final(chan, rx_control)) {
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
@@ -3528,18 +4038,18 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co
}
}
-static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
{
- u8 tx_seq = __get_reqseq(rx_control);
+ u16 tx_seq = __get_reqseq(chan, rx_control);
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
+ BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
chan->expected_ack_seq = tx_seq;
l2cap_drop_acked_frames(chan);
- if (rx_control & L2CAP_CTRL_FINAL) {
+ if (__is_ctrl_final(chan, rx_control)) {
if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
l2cap_retransmit_frames(chan);
} else {
@@ -3549,15 +4059,15 @@ static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_c
set_bit(CONN_REJ_ACT, &chan->conn_state);
}
}
-static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
{
- u8 tx_seq = __get_reqseq(rx_control);
+ u16 tx_seq = __get_reqseq(chan, rx_control);
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
+ BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- if (rx_control & L2CAP_CTRL_POLL) {
+ if (__is_ctrl_poll(chan, rx_control)) {
chan->expected_ack_seq = tx_seq;
l2cap_drop_acked_frames(chan);
@@ -3570,7 +4080,7 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_
chan->srej_save_reqseq = tx_seq;
set_bit(CONN_SREJ_ACT, &chan->conn_state);
}
- } else if (rx_control & L2CAP_CTRL_FINAL) {
+ } else if (__is_ctrl_final(chan, rx_control)) {
if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
chan->srej_save_reqseq == tx_seq)
clear_bit(CONN_SREJ_ACT, &chan->conn_state);
@@ -3585,37 +4095,39 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_
}
}
-static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
{
- u8 tx_seq = __get_reqseq(rx_control);
+ u16 tx_seq = __get_reqseq(chan, rx_control);
- BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
+ BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
chan->expected_ack_seq = tx_seq;
l2cap_drop_acked_frames(chan);
- if (rx_control & L2CAP_CTRL_POLL)
+ if (__is_ctrl_poll(chan, rx_control))
set_bit(CONN_SEND_FBIT, &chan->conn_state);
if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
__clear_retrans_timer(chan);
- if (rx_control & L2CAP_CTRL_POLL)
+ if (__is_ctrl_poll(chan, rx_control))
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
return;
}
- if (rx_control & L2CAP_CTRL_POLL)
+ if (__is_ctrl_poll(chan, rx_control)) {
l2cap_send_srejtail(chan);
- else
- l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
+ } else {
+ rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
+ l2cap_send_sframe(chan, rx_control);
+ }
}
-static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
+static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
{
- BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
+ BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
- if (L2CAP_CTRL_FINAL & rx_control &&
+ if (__is_ctrl_final(chan, rx_control) &&
test_bit(CONN_WAIT_F, &chan->conn_state)) {
__clear_monitor_timer(chan);
if (chan->unacked_frames > 0)
@@ -3623,20 +4135,20 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont
clear_bit(CONN_WAIT_F, &chan->conn_state);
}
- switch (rx_control & L2CAP_CTRL_SUPERVISE) {
- case L2CAP_SUPER_RCV_READY:
+ switch (__get_ctrl_super(chan, rx_control)) {
+ case L2CAP_SUPER_RR:
l2cap_data_channel_rrframe(chan, rx_control);
break;
- case L2CAP_SUPER_REJECT:
+ case L2CAP_SUPER_REJ:
l2cap_data_channel_rejframe(chan, rx_control);
break;
- case L2CAP_SUPER_SELECT_REJECT:
+ case L2CAP_SUPER_SREJ:
l2cap_data_channel_srejframe(chan, rx_control);
break;
- case L2CAP_SUPER_RCV_NOT_READY:
+ case L2CAP_SUPER_RNR:
l2cap_data_channel_rnrframe(chan, rx_control);
break;
}
@@ -3648,12 +4160,12 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont
static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
{
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
- u16 control;
- u8 req_seq;
+ u32 control;
+ u16 req_seq;
int len, next_tx_seq_offset, req_seq_offset;
- control = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
+ control = __get_control(chan, skb->data);
+ skb_pull(skb, __ctrl_size(chan));
len = skb->len;
/*
@@ -3664,26 +4176,23 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
if (l2cap_check_fcs(chan, skb))
goto drop;
- if (__is_sar_start(control) && __is_iframe(control))
- len -= 2;
+ if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
+ len -= L2CAP_SDULEN_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16)
- len -= 2;
+ len -= L2CAP_FCS_SIZE;
if (len > chan->mps) {
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
goto drop;
}
- req_seq = __get_reqseq(control);
- req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
- if (req_seq_offset < 0)
- req_seq_offset += 64;
+ req_seq = __get_reqseq(chan, control);
+
+ req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
- next_tx_seq_offset =
- (chan->next_tx_seq - chan->expected_ack_seq) % 64;
- if (next_tx_seq_offset < 0)
- next_tx_seq_offset += 64;
+ next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
+ chan->expected_ack_seq);
/* check for invalid req-seq */
if (req_seq_offset > next_tx_seq_offset) {
@@ -3691,7 +4200,7 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- if (__is_iframe(control)) {
+ if (!__is_sframe(chan, control)) {
if (len < 0) {
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
goto drop;
@@ -3719,8 +4228,8 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
{
struct l2cap_chan *chan;
struct sock *sk = NULL;
- u16 control;
- u8 tx_seq;
+ u32 control;
+ u16 tx_seq;
int len;
chan = l2cap_get_chan_by_scid(conn, cid);
@@ -3751,33 +4260,28 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
break;
case L2CAP_MODE_ERTM:
- if (!sock_owned_by_user(sk)) {
- l2cap_ertm_data_rcv(sk, skb);
- } else {
- if (sk_add_backlog(sk, skb))
- goto drop;
- }
+ l2cap_ertm_data_rcv(sk, skb);
goto done;
case L2CAP_MODE_STREAMING:
- control = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
+ control = __get_control(chan, skb->data);
+ skb_pull(skb, __ctrl_size(chan));
len = skb->len;
if (l2cap_check_fcs(chan, skb))
goto drop;
- if (__is_sar_start(control))
- len -= 2;
+ if (__is_sar_start(chan, control))
+ len -= L2CAP_SDULEN_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16)
- len -= 2;
+ len -= L2CAP_FCS_SIZE;
- if (len > chan->mps || len < 0 || __is_sframe(control))
+ if (len > chan->mps || len < 0 || __is_sframe(chan, control))
goto drop;
- tx_seq = __get_txseq(control);
+ tx_seq = __get_txseq(chan, control);
if (chan->expected_tx_seq != tx_seq) {
/* Frame(s) missing - must discard partial SDU */
@@ -3789,7 +4293,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
/* TODO: Notify userland of missing data */
}
- chan->expected_tx_seq = (tx_seq + 1) % 64;
+ chan->expected_tx_seq = __next_seq(chan, tx_seq);
if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
@@ -3806,7 +4310,7 @@ drop:
done:
if (sk)
- bh_unlock_sock(sk);
+ release_sock(sk);
return 0;
}
@@ -3822,7 +4326,7 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
sk = chan->sk;
- bh_lock_sock(sk);
+ lock_sock(sk);
BT_DBG("sk %p, len %d", sk, skb->len);
@@ -3840,7 +4344,7 @@ drop:
done:
if (sk)
- bh_unlock_sock(sk);
+ release_sock(sk);
return 0;
}
@@ -3855,7 +4359,7 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct
sk = chan->sk;
- bh_lock_sock(sk);
+ lock_sock(sk);
BT_DBG("sk %p, len %d", sk, skb->len);
@@ -3873,7 +4377,7 @@ drop:
done:
if (sk)
- bh_unlock_sock(sk);
+ release_sock(sk);
return 0;
}
@@ -3943,12 +4447,12 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
lm1 |= HCI_LM_ACCEPT;
- if (c->role_switch)
+ if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
lm1 |= HCI_LM_MASTER;
exact++;
} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
lm2 |= HCI_LM_ACCEPT;
- if (c->role_switch)
+ if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
lm2 |= HCI_LM_MASTER;
}
}
@@ -3983,7 +4487,7 @@ static int l2cap_disconn_ind(struct hci_conn *hcon)
BT_DBG("hcon %p", hcon);
if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
- return 0x13;
+ return HCI_ERROR_REMOTE_USER_TERM;
return conn->disc_reason;
}
@@ -4032,9 +4536,9 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
del_timer(&conn->security_timer);
}
- read_lock(&conn->chan_lock);
+ rcu_read_lock();
- list_for_each_entry(chan, &conn->chan_l, list) {
+ list_for_each_entry_rcu(chan, &conn->chan_l, list) {
struct sock *sk = chan->sk;
bh_lock_sock(sk);
@@ -4112,7 +4616,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
bh_unlock_sock(sk);
}
- read_unlock(&conn->chan_lock);
+ rcu_read_unlock();
return 0;
}
@@ -4178,11 +4682,11 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
BT_ERR("Frame exceeding recv MTU (len %d, "
"MTU %d)", len,
chan->imtu);
- bh_unlock_sock(sk);
+ release_sock(sk);
l2cap_conn_unreliable(conn, ECOMM);
goto drop;
}
- bh_unlock_sock(sk);
+ release_sock(sk);
}
/* Allocate skb for the complete frame (with header) */
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index e8292369cdc..5e0976670b9 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -3,6 +3,7 @@
Copyright (C) 2000-2001 Qualcomm Incorporated
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Copyright (C) 2010 Google Inc.
+ Copyright (C) 2011 ProFUSION Embedded Systems
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -121,70 +122,15 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
if (la.l2_cid && la.l2_psm)
return -EINVAL;
- lock_sock(sk);
-
- if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED
- && !(la.l2_psm || la.l2_cid)) {
- err = -EINVAL;
- goto done;
- }
-
- switch (chan->mode) {
- case L2CAP_MODE_BASIC:
- break;
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- if (!disable_ertm)
- break;
- /* fall through */
- default:
- err = -ENOTSUPP;
- goto done;
- }
-
- switch (sk->sk_state) {
- case BT_CONNECT:
- case BT_CONNECT2:
- case BT_CONFIG:
- /* Already connecting */
- goto wait;
-
- case BT_CONNECTED:
- /* Already connected */
- err = -EISCONN;
- goto done;
-
- case BT_OPEN:
- case BT_BOUND:
- /* Can connect */
- break;
-
- default:
- err = -EBADFD;
- goto done;
- }
-
- /* PSM must be odd and lsb of upper byte must be 0 */
- if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && !la.l2_cid &&
- chan->chan_type != L2CAP_CHAN_RAW) {
- err = -EINVAL;
- goto done;
- }
-
- /* Set destination address and psm */
- bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
- chan->psm = la.l2_psm;
- chan->dcid = la.l2_cid;
-
- err = l2cap_chan_connect(l2cap_pi(sk)->chan);
+ err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr);
if (err)
goto done;
-wait:
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
- release_sock(sk);
+ if (sock_owned_by_user(sk))
+ release_sock(sk);
return err;
}
@@ -333,7 +279,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
opts.mode = chan->mode;
opts.fcs = chan->fcs;
opts.max_tx = chan->max_tx;
- opts.txwin_size = (__u16)chan->tx_win;
+ opts.txwin_size = chan->tx_win;
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
@@ -358,10 +304,10 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
break;
}
- if (chan->role_switch)
+ if (test_bit(FLAG_ROLE_SWITCH, &chan->flags))
opt |= L2CAP_LM_MASTER;
- if (chan->force_reliable)
+ if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
opt |= L2CAP_LM_RELIABLE;
if (put_user(opt, (u32 __user *) optval))
@@ -448,7 +394,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
break;
case BT_FLUSHABLE:
- if (put_user(chan->flushable, (u32 __user *) optval))
+ if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags),
+ (u32 __user *) optval))
err = -EFAULT;
break;
@@ -460,7 +407,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
break;
}
- pwr.force_active = chan->force_active;
+ pwr.force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
len = min_t(unsigned int, len, sizeof(pwr));
if (copy_to_user(optval, (char *) &pwr, len))
@@ -468,6 +415,16 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
break;
+ case BT_CHANNEL_POLICY:
+ if (!enable_hs) {
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ if (put_user(chan->chan_policy, (u32 __user *) optval))
+ err = -EFAULT;
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -502,7 +459,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
opts.mode = chan->mode;
opts.fcs = chan->fcs;
opts.max_tx = chan->max_tx;
- opts.txwin_size = (__u16)chan->tx_win;
+ opts.txwin_size = chan->tx_win;
len = min_t(unsigned int, sizeof(opts), optlen);
if (copy_from_user((char *) &opts, optval, len)) {
@@ -510,7 +467,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
break;
}
- if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
+ if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) {
err = -EINVAL;
break;
}
@@ -534,7 +491,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
chan->omtu = opts.omtu;
chan->fcs = opts.fcs;
chan->max_tx = opts.max_tx;
- chan->tx_win = (__u8)opts.txwin_size;
+ chan->tx_win = opts.txwin_size;
break;
case L2CAP_LM:
@@ -550,8 +507,15 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
if (opt & L2CAP_LM_SECURE)
chan->sec_level = BT_SECURITY_HIGH;
- chan->role_switch = (opt & L2CAP_LM_MASTER);
- chan->force_reliable = (opt & L2CAP_LM_RELIABLE);
+ if (opt & L2CAP_LM_MASTER)
+ set_bit(FLAG_ROLE_SWITCH, &chan->flags);
+ else
+ clear_bit(FLAG_ROLE_SWITCH, &chan->flags);
+
+ if (opt & L2CAP_LM_RELIABLE)
+ set_bit(FLAG_FORCE_RELIABLE, &chan->flags);
+ else
+ clear_bit(FLAG_FORCE_RELIABLE, &chan->flags);
break;
default:
@@ -607,8 +571,13 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
chan->sec_level = sec.level;
+ if (!chan->conn)
+ break;
+
conn = chan->conn;
- if (conn && chan->scid == L2CAP_CID_LE_DATA) {
+
+ /*change security for LE channels */
+ if (chan->scid == L2CAP_CID_LE_DATA) {
if (!conn->hcon->out) {
err = -EINVAL;
break;
@@ -616,9 +585,14 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
if (smp_conn_security(conn, sec.level))
break;
-
- err = 0;
sk->sk_state = BT_CONFIG;
+
+ /* or for ACL link, under defer_setup time */
+ } else if (sk->sk_state == BT_CONNECT2 &&
+ bt_sk(sk)->defer_setup) {
+ err = l2cap_chan_check_security(chan);
+ } else {
+ err = -EINVAL;
}
break;
@@ -657,7 +631,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
}
}
- chan->flushable = opt;
+ if (opt)
+ set_bit(FLAG_FLUSHABLE, &chan->flags);
+ else
+ clear_bit(FLAG_FLUSHABLE, &chan->flags);
break;
case BT_POWER:
@@ -674,7 +651,36 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
err = -EFAULT;
break;
}
- chan->force_active = pwr.force_active;
+
+ if (pwr.force_active)
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ else
+ clear_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ break;
+
+ case BT_CHANNEL_POLICY:
+ if (!enable_hs) {
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (chan->mode != L2CAP_MODE_ERTM &&
+ chan->mode != L2CAP_MODE_STREAMING) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ chan->chan_policy = (u8) opt;
break;
default:
@@ -708,7 +714,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
return -ENOTCONN;
}
- err = l2cap_chan_send(chan, msg, len);
+ err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
release_sock(sk);
return err;
@@ -930,11 +936,9 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
chan->fcs = pchan->fcs;
chan->max_tx = pchan->max_tx;
chan->tx_win = pchan->tx_win;
+ chan->tx_win_max = pchan->tx_win_max;
chan->sec_level = pchan->sec_level;
- chan->role_switch = pchan->role_switch;
- chan->force_reliable = pchan->force_reliable;
- chan->flushable = pchan->flushable;
- chan->force_active = pchan->force_active;
+ chan->flags = pchan->flags;
security_sk_clone(parent, sk);
} else {
@@ -963,12 +967,10 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
chan->max_tx = L2CAP_DEFAULT_MAX_TX;
chan->fcs = L2CAP_FCS_CRC16;
chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+ chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
chan->sec_level = BT_SECURITY_LOW;
- chan->role_switch = 0;
- chan->force_reliable = 0;
- chan->flushable = BT_FLUSHABLE_OFF;
- chan->force_active = BT_POWER_FORCE_ACTIVE_ON;
-
+ chan->flags = 0;
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
}
/* Default config options */
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 5caff4d4759..8413f55cc13 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -22,6 +22,7 @@
/* Bluetooth HCI Management interface */
+#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
@@ -32,22 +33,98 @@
#define MGMT_VERSION 0
#define MGMT_REVISION 1
+#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
+
+#define SERVICE_CACHE_TIMEOUT (5 * 1000)
+
struct pending_cmd {
struct list_head list;
- __u16 opcode;
+ u16 opcode;
int index;
void *param;
struct sock *sk;
void *user_data;
};
-static LIST_HEAD(cmd_list);
+/* HCI to MGMT error code conversion table */
+static u8 mgmt_status_table[] = {
+ MGMT_STATUS_SUCCESS,
+ MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
+ MGMT_STATUS_NOT_CONNECTED, /* No Connection */
+ MGMT_STATUS_FAILED, /* Hardware Failure */
+ MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
+ MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
+ MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
+ MGMT_STATUS_NO_RESOURCES, /* Memory Full */
+ MGMT_STATUS_TIMEOUT, /* Connection Timeout */
+ MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
+ MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
+ MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
+ MGMT_STATUS_BUSY, /* Command Disallowed */
+ MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
+ MGMT_STATUS_REJECTED, /* Rejected Security */
+ MGMT_STATUS_REJECTED, /* Rejected Personal */
+ MGMT_STATUS_TIMEOUT, /* Host Timeout */
+ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
+ MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
+ MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
+ MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
+ MGMT_STATUS_DISCONNECTED, /* OE Power Off */
+ MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
+ MGMT_STATUS_BUSY, /* Repeated Attempts */
+ MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
+ MGMT_STATUS_FAILED, /* Unknown LMP PDU */
+ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
+ MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
+ MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
+ MGMT_STATUS_REJECTED, /* Air Mode Rejected */
+ MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
+ MGMT_STATUS_FAILED, /* Unspecified Error */
+ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
+ MGMT_STATUS_FAILED, /* Role Change Not Allowed */
+ MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
+ MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
+ MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
+ MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
+ MGMT_STATUS_FAILED, /* Unit Link Key Used */
+ MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
+ MGMT_STATUS_TIMEOUT, /* Instant Passed */
+ MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
+ MGMT_STATUS_FAILED, /* Transaction Collision */
+ MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
+ MGMT_STATUS_REJECTED, /* QoS Rejected */
+ MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
+ MGMT_STATUS_REJECTED, /* Insufficient Security */
+ MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
+ MGMT_STATUS_BUSY, /* Role Switch Pending */
+ MGMT_STATUS_FAILED, /* Slot Violation */
+ MGMT_STATUS_FAILED, /* Role Switch Failed */
+ MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
+ MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
+ MGMT_STATUS_BUSY, /* Host Busy Pairing */
+ MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
+ MGMT_STATUS_BUSY, /* Controller Busy */
+ MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
+ MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
+ MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
+ MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
+ MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
+};
+
+static u8 mgmt_status(u8 hci_status)
+{
+ if (hci_status < ARRAY_SIZE(mgmt_status_table))
+ return mgmt_status_table[hci_status];
+
+ return MGMT_STATUS_FAILED;
+}
static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_status *ev;
+ int err;
BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
@@ -65,10 +142,11 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
ev->status = status;
put_unaligned_le16(cmd, &ev->opcode);
- if (sock_queue_rcv_skb(sk, skb) < 0)
+ err = sock_queue_rcv_skb(sk, skb);
+ if (err < 0)
kfree_skb(skb);
- return 0;
+ return err;
}
static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
@@ -77,6 +155,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
struct sk_buff *skb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_complete *ev;
+ int err;
BT_DBG("sock %p", sk);
@@ -96,10 +175,11 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
if (rp)
memcpy(ev->data, rp, rp_len);
- if (sock_queue_rcv_skb(sk, skb) < 0)
+ err = sock_queue_rcv_skb(sk, skb);
+ if (err < 0)
kfree_skb(skb);
- return 0;
+ return err;;
}
static int read_version(struct sock *sk)
@@ -119,6 +199,7 @@ static int read_index_list(struct sock *sk)
{
struct mgmt_rp_read_index_list *rp;
struct list_head *p;
+ struct hci_dev *d;
size_t rp_len;
u16 count;
int i, err;
@@ -142,10 +223,9 @@ static int read_index_list(struct sock *sk)
put_unaligned_le16(count, &rp->num_controllers);
i = 0;
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
-
- hci_del_off_timer(d);
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if (test_and_clear_bit(HCI_AUTO_OFF, &d->flags))
+ cancel_delayed_work(&d->power_off);
if (test_bit(HCI_SETUP, &d->flags))
continue;
@@ -164,6 +244,262 @@ static int read_index_list(struct sock *sk)
return err;
}
+static u32 get_supported_settings(struct hci_dev *hdev)
+{
+ u32 settings = 0;
+
+ settings |= MGMT_SETTING_POWERED;
+ settings |= MGMT_SETTING_CONNECTABLE;
+ settings |= MGMT_SETTING_FAST_CONNECTABLE;
+ settings |= MGMT_SETTING_DISCOVERABLE;
+ settings |= MGMT_SETTING_PAIRABLE;
+
+ if (hdev->features[6] & LMP_SIMPLE_PAIR)
+ settings |= MGMT_SETTING_SSP;
+
+ if (!(hdev->features[4] & LMP_NO_BREDR)) {
+ settings |= MGMT_SETTING_BREDR;
+ settings |= MGMT_SETTING_LINK_SECURITY;
+ }
+
+ if (hdev->features[4] & LMP_LE)
+ settings |= MGMT_SETTING_LE;
+
+ return settings;
+}
+
+static u32 get_current_settings(struct hci_dev *hdev)
+{
+ u32 settings = 0;
+
+ if (test_bit(HCI_UP, &hdev->flags))
+ settings |= MGMT_SETTING_POWERED;
+ else
+ return settings;
+
+ if (test_bit(HCI_PSCAN, &hdev->flags))
+ settings |= MGMT_SETTING_CONNECTABLE;
+
+ if (test_bit(HCI_ISCAN, &hdev->flags))
+ settings |= MGMT_SETTING_DISCOVERABLE;
+
+ if (test_bit(HCI_PAIRABLE, &hdev->flags))
+ settings |= MGMT_SETTING_PAIRABLE;
+
+ if (!(hdev->features[4] & LMP_NO_BREDR))
+ settings |= MGMT_SETTING_BREDR;
+
+ if (hdev->extfeatures[0] & LMP_HOST_LE)
+ settings |= MGMT_SETTING_LE;
+
+ if (test_bit(HCI_AUTH, &hdev->flags))
+ settings |= MGMT_SETTING_LINK_SECURITY;
+
+ if (hdev->ssp_mode > 0)
+ settings |= MGMT_SETTING_SSP;
+
+ return settings;
+}
+
+#define EIR_FLAGS 0x01 /* flags */
+#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
+#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
+#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
+#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
+#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
+#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
+#define EIR_NAME_SHORT 0x08 /* shortened local name */
+#define EIR_NAME_COMPLETE 0x09 /* complete local name */
+#define EIR_TX_POWER 0x0A /* transmit power level */
+#define EIR_DEVICE_ID 0x10 /* device ID */
+
+#define PNP_INFO_SVCLASS_ID 0x1200
+
+static u8 bluetooth_base_uuid[] = {
+ 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static u16 get_uuid16(u8 *uuid128)
+{
+ u32 val;
+ int i;
+
+ for (i = 0; i < 12; i++) {
+ if (bluetooth_base_uuid[i] != uuid128[i])
+ return 0;
+ }
+
+ memcpy(&val, &uuid128[12], 4);
+
+ val = le32_to_cpu(val);
+ if (val > 0xffff)
+ return 0;
+
+ return (u16) val;
+}
+
+static void create_eir(struct hci_dev *hdev, u8 *data)
+{
+ u8 *ptr = data;
+ u16 eir_len = 0;
+ u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
+ int i, truncated = 0;
+ struct bt_uuid *uuid;
+ size_t name_len;
+
+ name_len = strlen(hdev->dev_name);
+
+ if (name_len > 0) {
+ /* EIR Data type */
+ if (name_len > 48) {
+ name_len = 48;
+ ptr[1] = EIR_NAME_SHORT;
+ } else
+ ptr[1] = EIR_NAME_COMPLETE;
+
+ /* EIR Data length */
+ ptr[0] = name_len + 1;
+
+ memcpy(ptr + 2, hdev->dev_name, name_len);
+
+ eir_len += (name_len + 2);
+ ptr += (name_len + 2);
+ }
+
+ memset(uuid16_list, 0, sizeof(uuid16_list));
+
+ /* Group all UUID16 types */
+ list_for_each_entry(uuid, &hdev->uuids, list) {
+ u16 uuid16;
+
+ uuid16 = get_uuid16(uuid->uuid);
+ if (uuid16 == 0)
+ return;
+
+ if (uuid16 < 0x1100)
+ continue;
+
+ if (uuid16 == PNP_INFO_SVCLASS_ID)
+ continue;
+
+ /* Stop if not enough space to put next UUID */
+ if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
+ truncated = 1;
+ break;
+ }
+
+ /* Check for duplicates */
+ for (i = 0; uuid16_list[i] != 0; i++)
+ if (uuid16_list[i] == uuid16)
+ break;
+
+ if (uuid16_list[i] == 0) {
+ uuid16_list[i] = uuid16;
+ eir_len += sizeof(u16);
+ }
+ }
+
+ if (uuid16_list[0] != 0) {
+ u8 *length = ptr;
+
+ /* EIR Data type */
+ ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
+
+ ptr += 2;
+ eir_len += 2;
+
+ for (i = 0; uuid16_list[i] != 0; i++) {
+ *ptr++ = (uuid16_list[i] & 0x00ff);
+ *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
+ }
+
+ /* EIR Data length */
+ *length = (i * sizeof(u16)) + 1;
+ }
+}
+
+static int update_eir(struct hci_dev *hdev)
+{
+ struct hci_cp_write_eir cp;
+
+ if (!(hdev->features[6] & LMP_EXT_INQ))
+ return 0;
+
+ if (hdev->ssp_mode == 0)
+ return 0;
+
+ if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+
+ create_eir(hdev, cp.data);
+
+ if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
+ return 0;
+
+ memcpy(hdev->eir, cp.data, sizeof(cp.data));
+
+ return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
+}
+
+static u8 get_service_classes(struct hci_dev *hdev)
+{
+ struct bt_uuid *uuid;
+ u8 val = 0;
+
+ list_for_each_entry(uuid, &hdev->uuids, list)
+ val |= uuid->svc_hint;
+
+ return val;
+}
+
+static int update_class(struct hci_dev *hdev)
+{
+ u8 cod[3];
+
+ BT_DBG("%s", hdev->name);
+
+ if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ return 0;
+
+ cod[0] = hdev->minor_class;
+ cod[1] = hdev->major_class;
+ cod[2] = get_service_classes(hdev);
+
+ if (memcmp(cod, hdev->dev_class, 3) == 0)
+ return 0;
+
+ return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
+}
+
+static void service_cache_off(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ service_cache.work);
+
+ if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ return;
+
+ hci_dev_lock(hdev);
+
+ update_eir(hdev);
+ update_class(hdev);
+
+ hci_dev_unlock(hdev);
+}
+
+static void mgmt_init_hdev(struct hci_dev *hdev)
+{
+ if (!test_and_set_bit(HCI_MGMT, &hdev->flags))
+ INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
+
+ if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->flags))
+ schedule_delayed_work(&hdev->service_cache,
+ msecs_to_jiffies(SERVICE_CACHE_TIMEOUT));
+}
+
static int read_controller_info(struct sock *sk, u16 index)
{
struct mgmt_rp_read_info rp;
@@ -173,40 +509,33 @@ static int read_controller_info(struct sock *sk, u16 index)
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_READ_INFO,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_del_off_timer(hdev);
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+ cancel_delayed_work_sync(&hdev->power_off);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
- set_bit(HCI_MGMT, &hdev->flags);
+ if (test_and_clear_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags))
+ mgmt_init_hdev(hdev);
memset(&rp, 0, sizeof(rp));
- rp.type = hdev->dev_type;
+ bacpy(&rp.bdaddr, &hdev->bdaddr);
- rp.powered = test_bit(HCI_UP, &hdev->flags);
- rp.connectable = test_bit(HCI_PSCAN, &hdev->flags);
- rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags);
- rp.pairable = test_bit(HCI_PSCAN, &hdev->flags);
+ rp.version = hdev->hci_ver;
- if (test_bit(HCI_AUTH, &hdev->flags))
- rp.sec_mode = 3;
- else if (hdev->ssp_mode > 0)
- rp.sec_mode = 4;
- else
- rp.sec_mode = 2;
+ put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
+
+ rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
+ rp.current_settings = cpu_to_le32(get_current_settings(hdev));
- bacpy(&rp.bdaddr, &hdev->bdaddr);
- memcpy(rp.features, hdev->features, 8);
memcpy(rp.dev_class, hdev->dev_class, 3);
- put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
- rp.hci_ver = hdev->hci_ver;
- put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
@@ -220,7 +549,8 @@ static void mgmt_pending_free(struct pending_cmd *cmd)
}
static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
- u16 index, void *data, u16 len)
+ struct hci_dev *hdev,
+ void *data, u16 len)
{
struct pending_cmd *cmd;
@@ -229,7 +559,7 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
return NULL;
cmd->opcode = opcode;
- cmd->index = index;
+ cmd->index = hdev->id;
cmd->param = kmalloc(len, GFP_ATOMIC);
if (!cmd->param) {
@@ -243,48 +573,36 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
cmd->sk = sk;
sock_hold(sk);
- list_add(&cmd->list, &cmd_list);
+ list_add(&cmd->list, &hdev->mgmt_pending);
return cmd;
}
-static void mgmt_pending_foreach(u16 opcode, int index,
+static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
void (*cb)(struct pending_cmd *cmd, void *data),
void *data)
{
struct list_head *p, *n;
- list_for_each_safe(p, n, &cmd_list) {
+ list_for_each_safe(p, n, &hdev->mgmt_pending) {
struct pending_cmd *cmd;
cmd = list_entry(p, struct pending_cmd, list);
- if (cmd->opcode != opcode)
- continue;
-
- if (index >= 0 && cmd->index != index)
+ if (opcode > 0 && cmd->opcode != opcode)
continue;
cb(cmd, data);
}
}
-static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
+static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
{
- struct list_head *p;
-
- list_for_each(p, &cmd_list) {
- struct pending_cmd *cmd;
-
- cmd = list_entry(p, struct pending_cmd, list);
-
- if (cmd->opcode != opcode)
- continue;
-
- if (index >= 0 && cmd->index != index)
- continue;
+ struct pending_cmd *cmd;
- return cmd;
+ list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+ if (cmd->opcode == opcode)
+ return cmd;
}
return NULL;
@@ -296,6 +614,13 @@ static void mgmt_pending_remove(struct pending_cmd *cmd)
mgmt_pending_free(cmd);
}
+static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
+{
+ __le32 settings = cpu_to_le32(get_current_settings(hdev));
+
+ return cmd_complete(sk, hdev->id, opcode, &settings, sizeof(settings));
+}
+
static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
{
struct mgmt_mode *cp;
@@ -308,40 +633,43 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
BT_DBG("request for hci%u", index);
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
up = test_bit(HCI_UP, &hdev->flags);
if ((cp->val && up) || (!cp->val && !up)) {
- err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY);
+ err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) {
- err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
+ if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_BUSY);
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
if (cp->val)
- queue_work(hdev->workqueue, &hdev->power_on);
+ schedule_work(&hdev->power_on);
else
- queue_work(hdev->workqueue, &hdev->power_off);
+ schedule_work(&hdev->power_off.work);
err = 0;
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
@@ -349,7 +677,7 @@ failed:
static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
- struct mgmt_mode *cp;
+ struct mgmt_cp_set_discoverable *cp;
struct hci_dev *hdev;
struct pending_cmd *cmd;
u8 scan;
@@ -360,32 +688,36 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
BT_DBG("request for hci%u", index);
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
+ err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
- err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY);
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_BUSY);
goto failed;
}
if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
test_bit(HCI_PSCAN, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY);
+ err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -395,13 +727,18 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
if (cp->val)
scan |= SCAN_INQUIRY;
+ else
+ cancel_delayed_work(&hdev->discov_off);
err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
if (err < 0)
mgmt_pending_remove(cmd);
+ if (cp->val)
+ hdev->discov_timeout = get_unaligned_le16(&cp->timeout);
+
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -421,31 +758,35 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
BT_DBG("request for hci%u", index);
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
+ err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
- err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY);
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+ err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_BUSY);
goto failed;
}
if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY);
+ err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -461,14 +802,14 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
-static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
- struct sock *skip_sk)
+static int mgmt_event(u16 event, struct hci_dev *hdev, void *data,
+ u16 data_len, struct sock *skip_sk)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
@@ -481,7 +822,10 @@ static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
hdr = (void *) skb_put(skb, sizeof(*hdr));
hdr->opcode = cpu_to_le16(event);
- hdr->index = cpu_to_le16(index);
+ if (hdev)
+ hdr->index = cpu_to_le16(hdev->id);
+ else
+ hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
hdr->len = cpu_to_le16(data_len);
if (data)
@@ -493,20 +837,12 @@ static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
return 0;
}
-static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
-{
- struct mgmt_mode rp;
-
- rp.val = val;
-
- return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
-}
-
static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
- struct mgmt_mode *cp, ev;
+ struct mgmt_mode *cp;
struct hci_dev *hdev;
+ __le32 ev;
int err;
cp = (void *) data;
@@ -514,211 +850,36 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
BT_DBG("request for hci%u", index);
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (cp->val)
set_bit(HCI_PAIRABLE, &hdev->flags);
else
clear_bit(HCI_PAIRABLE, &hdev->flags);
- err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val);
+ err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
if (err < 0)
goto failed;
- ev.val = cp->val;
+ ev = cpu_to_le32(get_current_settings(hdev));
- err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
+ err = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), sk);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
-#define EIR_FLAGS 0x01 /* flags */
-#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
-#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
-#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
-#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
-#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
-#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
-#define EIR_NAME_SHORT 0x08 /* shortened local name */
-#define EIR_NAME_COMPLETE 0x09 /* complete local name */
-#define EIR_TX_POWER 0x0A /* transmit power level */
-#define EIR_DEVICE_ID 0x10 /* device ID */
-
-#define PNP_INFO_SVCLASS_ID 0x1200
-
-static u8 bluetooth_base_uuid[] = {
- 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80,
- 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-static u16 get_uuid16(u8 *uuid128)
-{
- u32 val;
- int i;
-
- for (i = 0; i < 12; i++) {
- if (bluetooth_base_uuid[i] != uuid128[i])
- return 0;
- }
-
- memcpy(&val, &uuid128[12], 4);
-
- val = le32_to_cpu(val);
- if (val > 0xffff)
- return 0;
-
- return (u16) val;
-}
-
-static void create_eir(struct hci_dev *hdev, u8 *data)
-{
- u8 *ptr = data;
- u16 eir_len = 0;
- u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
- int i, truncated = 0;
- struct list_head *p;
- size_t name_len;
-
- name_len = strlen(hdev->dev_name);
-
- if (name_len > 0) {
- /* EIR Data type */
- if (name_len > 48) {
- name_len = 48;
- ptr[1] = EIR_NAME_SHORT;
- } else
- ptr[1] = EIR_NAME_COMPLETE;
-
- /* EIR Data length */
- ptr[0] = name_len + 1;
-
- memcpy(ptr + 2, hdev->dev_name, name_len);
-
- eir_len += (name_len + 2);
- ptr += (name_len + 2);
- }
-
- memset(uuid16_list, 0, sizeof(uuid16_list));
-
- /* Group all UUID16 types */
- list_for_each(p, &hdev->uuids) {
- struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
- u16 uuid16;
-
- uuid16 = get_uuid16(uuid->uuid);
- if (uuid16 == 0)
- return;
-
- if (uuid16 < 0x1100)
- continue;
-
- if (uuid16 == PNP_INFO_SVCLASS_ID)
- continue;
-
- /* Stop if not enough space to put next UUID */
- if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
- truncated = 1;
- break;
- }
-
- /* Check for duplicates */
- for (i = 0; uuid16_list[i] != 0; i++)
- if (uuid16_list[i] == uuid16)
- break;
-
- if (uuid16_list[i] == 0) {
- uuid16_list[i] = uuid16;
- eir_len += sizeof(u16);
- }
- }
-
- if (uuid16_list[0] != 0) {
- u8 *length = ptr;
-
- /* EIR Data type */
- ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
-
- ptr += 2;
- eir_len += 2;
-
- for (i = 0; uuid16_list[i] != 0; i++) {
- *ptr++ = (uuid16_list[i] & 0x00ff);
- *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
- }
-
- /* EIR Data length */
- *length = (i * sizeof(u16)) + 1;
- }
-}
-
-static int update_eir(struct hci_dev *hdev)
-{
- struct hci_cp_write_eir cp;
-
- if (!(hdev->features[6] & LMP_EXT_INQ))
- return 0;
-
- if (hdev->ssp_mode == 0)
- return 0;
-
- if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
- return 0;
-
- memset(&cp, 0, sizeof(cp));
-
- create_eir(hdev, cp.data);
-
- if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
- return 0;
-
- memcpy(hdev->eir, cp.data, sizeof(cp.data));
-
- return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
-}
-
-static u8 get_service_classes(struct hci_dev *hdev)
-{
- struct list_head *p;
- u8 val = 0;
-
- list_for_each(p, &hdev->uuids) {
- struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
-
- val |= uuid->svc_hint;
- }
-
- return val;
-}
-
-static int update_class(struct hci_dev *hdev)
-{
- u8 cod[3];
-
- BT_DBG("%s", hdev->name);
-
- if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
- return 0;
-
- cod[0] = hdev->minor_class;
- cod[1] = hdev->major_class;
- cod[2] = get_service_classes(hdev);
-
- if (memcmp(cod, hdev->dev_class, 3) == 0)
- return 0;
-
- return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
-}
-
static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
{
struct mgmt_cp_add_uuid *cp;
@@ -731,13 +892,15 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
BT_DBG("request for hci%u", index);
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_ADD_UUID,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_ADD_UUID,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
if (!uuid) {
@@ -761,7 +924,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -780,13 +943,15 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
BT_DBG("request for hci%u", index);
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
err = hci_uuids_clear(hdev);
@@ -806,7 +971,8 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
}
if (found == 0) {
- err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT);
+ err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
+ MGMT_STATUS_INVALID_PARAMS);
goto unlock;
}
@@ -821,7 +987,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
unlock:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -839,97 +1005,71 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
BT_DBG("request for hci%u", index);
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hdev->major_class = cp->major;
hdev->minor_class = cp->minor;
+ if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags)) {
+ hci_dev_unlock(hdev);
+ cancel_delayed_work_sync(&hdev->service_cache);
+ hci_dev_lock(hdev);
+ update_eir(hdev);
+ }
+
err = update_class(hdev);
if (err == 0)
err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
-
- return err;
-}
-
-static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
- u16 len)
-{
- struct hci_dev *hdev;
- struct mgmt_cp_set_service_cache *cp;
- int err;
-
- cp = (void *) data;
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL);
-
- hdev = hci_dev_get(index);
- if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
-
- hci_dev_lock_bh(hdev);
-
- BT_DBG("hci%u enable %d", index, cp->enable);
-
- if (cp->enable) {
- set_bit(HCI_SERVICE_CACHE, &hdev->flags);
- err = 0;
- } else {
- clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
- err = update_class(hdev);
- if (err == 0)
- err = update_eir(hdev);
- }
-
- if (err == 0)
- err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
- 0);
-
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
-static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static int load_link_keys(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
{
struct hci_dev *hdev;
- struct mgmt_cp_load_keys *cp;
+ struct mgmt_cp_load_link_keys *cp;
u16 key_count, expected_len;
int i;
cp = (void *) data;
if (len < sizeof(*cp))
- return -EINVAL;
+ return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
key_count = get_unaligned_le16(&cp->key_count);
- expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
+ expected_len = sizeof(*cp) + key_count *
+ sizeof(struct mgmt_link_key_info);
if (expected_len != len) {
- BT_ERR("load_keys: expected %u bytes, got %u bytes",
+ BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
len, expected_len);
- return -EINVAL;
+ return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
}
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
key_count);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hci_link_keys_clear(hdev);
@@ -941,58 +1081,84 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
for (i = 0; i < key_count; i++) {
- struct mgmt_key_info *key = &cp->keys[i];
+ struct mgmt_link_key_info *key = &cp->keys[i];
hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
key->pin_len);
}
- hci_dev_unlock_bh(hdev);
+ cmd_complete(sk, index, MGMT_OP_LOAD_LINK_KEYS, NULL, 0);
+
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return 0;
}
-static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static int remove_keys(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
{
struct hci_dev *hdev;
- struct mgmt_cp_remove_key *cp;
+ struct mgmt_cp_remove_keys *cp;
+ struct mgmt_rp_remove_keys rp;
+ struct hci_cp_disconnect dc;
+ struct pending_cmd *cmd;
struct hci_conn *conn;
int err;
cp = (void *) data;
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
+
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.bdaddr, &cp->bdaddr);
+ rp.status = MGMT_STATUS_FAILED;
err = hci_remove_link_key(hdev, &cp->bdaddr);
if (err < 0) {
- err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err);
+ rp.status = MGMT_STATUS_NOT_PAIRED;
goto unlock;
}
- err = 0;
-
- if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
+ if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect) {
+ err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
+ sizeof(rp));
goto unlock;
+ }
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- if (conn) {
- struct hci_cp_disconnect dc;
+ if (!conn) {
+ err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
+ sizeof(rp));
+ goto unlock;
+ }
- put_unaligned_le16(conn->handle, &dc.handle);
- dc.reason = 0x13; /* Remote User Terminated Connection */
- err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+ cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_KEYS, hdev, cp, sizeof(*cp));
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
}
+ put_unaligned_le16(conn->handle, &dc.handle);
+ dc.reason = 0x13; /* Remote User Terminated Connection */
+ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
unlock:
- hci_dev_unlock_bh(hdev);
+ if (err < 0)
+ err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
+ sizeof(rp));
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1012,21 +1178,25 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
cp = (void *) data;
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
+ err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
- if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) {
- err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY);
+ if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
+ err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_BUSY);
goto failed;
}
@@ -1035,11 +1205,12 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
if (!conn) {
- err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN);
+ err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_NOT_CONNECTED);
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1053,16 +1224,36 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
+static u8 link_to_mgmt(u8 link_type, u8 addr_type)
+{
+ switch (link_type) {
+ case LE_LINK:
+ switch (addr_type) {
+ case ADDR_LE_DEV_PUBLIC:
+ return MGMT_ADDR_LE_PUBLIC;
+ case ADDR_LE_DEV_RANDOM:
+ return MGMT_ADDR_LE_RANDOM;
+ default:
+ return MGMT_ADDR_INVALID;
+ }
+ case ACL_LINK:
+ return MGMT_ADDR_BREDR;
+ default:
+ return MGMT_ADDR_INVALID;
+ }
+}
+
static int get_connections(struct sock *sk, u16 index)
{
struct mgmt_rp_get_connections *rp;
struct hci_dev *hdev;
+ struct hci_conn *c;
struct list_head *p;
size_t rp_len;
u16 count;
@@ -1072,16 +1263,17 @@ static int get_connections(struct sock *sk, u16 index)
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
count = 0;
list_for_each(p, &hdev->conn_hash.list) {
count++;
}
- rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
+ rp_len = sizeof(*rp) + (count * sizeof(struct mgmt_addr_info));
rp = kmalloc(rp_len, GFP_ATOMIC);
if (!rp) {
err = -ENOMEM;
@@ -1091,17 +1283,22 @@ static int get_connections(struct sock *sk, u16 index)
put_unaligned_le16(count, &rp->conn_count);
i = 0;
- list_for_each(p, &hdev->conn_hash.list) {
- struct hci_conn *c = list_entry(p, struct hci_conn, list);
-
- bacpy(&rp->conn[i++], &c->dst);
+ list_for_each_entry(c, &hdev->conn_hash.list, list) {
+ bacpy(&rp->addr[i].bdaddr, &c->dst);
+ rp->addr[i].type = link_to_mgmt(c->type, c->dst_type);
+ if (rp->addr[i].type == MGMT_ADDR_INVALID)
+ continue;
+ i++;
}
+ /* Recalculate length in case of filtered SCO connections, etc */
+ rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
+
err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
unlock:
kfree(rp);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
@@ -1112,7 +1309,7 @@ static int send_pin_code_neg_reply(struct sock *sk, u16 index,
struct pending_cmd *cmd;
int err;
- cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp,
+ cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
sizeof(*cp));
if (!cmd)
return -ENOMEM;
@@ -1141,22 +1338,26 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
cp = (void *) data;
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
+ err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
if (!conn) {
- err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENOTCONN);
+ err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_NOT_CONNECTED);
goto failed;
}
@@ -1168,12 +1369,12 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
err = send_pin_code_neg_reply(sk, index, hdev, &ncp);
if (err >= 0)
err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
- EINVAL);
+ MGMT_STATUS_INVALID_PARAMS);
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1188,7 +1389,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1207,25 +1408,25 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
- EINVAL);
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
- ENODEV);
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
- ENETDOWN);
+ MGMT_STATUS_NOT_POWERED);
goto failed;
}
err = send_pin_code_neg_reply(sk, index, hdev, cp);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1242,20 +1443,22 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
cp = (void *) data;
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hdev->io_capability = cp->io_capability;
BT_DBG("%s IO capability set to 0x%02x", hdev->name,
hdev->io_capability);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
@@ -1264,19 +1467,12 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- struct list_head *p;
-
- list_for_each(p, &cmd_list) {
- struct pending_cmd *cmd;
-
- cmd = list_entry(p, struct pending_cmd, list);
+ struct pending_cmd *cmd;
+ list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
continue;
- if (cmd->index != hdev->id)
- continue;
-
if (cmd->user_data != conn)
continue;
@@ -1291,7 +1487,8 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
struct mgmt_rp_pair_device rp;
struct hci_conn *conn = cmd->user_data;
- bacpy(&rp.bdaddr, &conn->dst);
+ bacpy(&rp.addr.bdaddr, &conn->dst);
+ rp.addr.type = link_to_mgmt(conn->type, conn->dst_type);
rp.status = status;
cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp));
@@ -1313,20 +1510,18 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
BT_DBG("status %u", status);
cmd = find_pairing(conn);
- if (!cmd) {
+ if (!cmd)
BT_DBG("Unable to find a pending command");
- return;
- }
-
- pairing_complete(cmd, status);
+ else
+ pairing_complete(cmd, status);
}
static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
{
struct hci_dev *hdev;
struct mgmt_cp_pair_device *cp;
+ struct mgmt_rp_pair_device rp;
struct pending_cmd *cmd;
- struct adv_entry *entry;
u8 sec_level, auth_type;
struct hci_conn *conn;
int err;
@@ -1336,13 +1531,15 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
cp = (void *) data;
if (len != sizeof(*cp))
- return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
sec_level = BT_SECURITY_MEDIUM;
if (cp->io_cap == 0x03)
@@ -1350,26 +1547,33 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
else
auth_type = HCI_AT_DEDICATED_BONDING_MITM;
- entry = hci_find_adv_entry(hdev, &cp->bdaddr);
- if (entry)
- conn = hci_connect(hdev, LE_LINK, &cp->bdaddr, sec_level,
+ if (cp->addr.type == MGMT_ADDR_BREDR)
+ conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level,
auth_type);
else
- conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level,
+ conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level,
auth_type);
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
+
if (IS_ERR(conn)) {
- err = PTR_ERR(conn);
+ rp.status = -PTR_ERR(conn);
+ err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE,
+ &rp, sizeof(rp));
goto unlock;
}
if (conn->connect_cfm_cb) {
hci_conn_put(conn);
- err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY);
+ rp.status = EBUSY;
+ err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE,
+ &rp, sizeof(rp));
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
hci_conn_put(conn);
@@ -1377,7 +1581,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
}
/* For LE, just connecting isn't a proof that the pairing finished */
- if (!entry)
+ if (cp->addr.type == MGMT_ADDR_BREDR)
conn->connect_cfm_cb = pairing_complete_cb;
conn->security_cfm_cb = pairing_complete_cb;
@@ -1392,62 +1596,144 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
err = 0;
unlock:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
-static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
- u16 len, int success)
+static int user_pairing_resp(struct sock *sk, u16 index, bdaddr_t *bdaddr,
+ u16 mgmt_op, u16 hci_op, __le32 passkey)
{
- struct mgmt_cp_user_confirm_reply *cp = (void *) data;
- u16 mgmt_op, hci_op;
struct pending_cmd *cmd;
struct hci_dev *hdev;
+ struct hci_conn *conn;
int err;
- BT_DBG("");
-
- if (success) {
- mgmt_op = MGMT_OP_USER_CONFIRM_REPLY;
- hci_op = HCI_OP_USER_CONFIRM_REPLY;
- } else {
- mgmt_op = MGMT_OP_USER_CONFIRM_NEG_REPLY;
- hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY;
- }
-
- if (len != sizeof(*cp))
- return cmd_status(sk, index, mgmt_op, EINVAL);
-
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, mgmt_op, ENODEV);
+ return cmd_status(sk, index, mgmt_op,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
- err = cmd_status(sk, index, mgmt_op, ENETDOWN);
- goto failed;
+ err = cmd_status(sk, index, mgmt_op, MGMT_STATUS_NOT_POWERED);
+ goto done;
+ }
+
+ /*
+ * Check for an existing ACL link, if present pair via
+ * HCI commands.
+ *
+ * If no ACL link is present, check for an LE link and if
+ * present, pair via the SMP engine.
+ *
+ * If neither ACL nor LE links are present, fail with error.
+ */
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
+ if (!conn) {
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
+ if (!conn) {
+ err = cmd_status(sk, index, mgmt_op,
+ MGMT_STATUS_NOT_CONNECTED);
+ goto done;
+ }
+
+ /* Continue with pairing via SMP */
+
+ err = cmd_status(sk, index, mgmt_op, MGMT_STATUS_SUCCESS);
+ goto done;
}
- cmd = mgmt_pending_add(sk, mgmt_op, index, data, len);
+ cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
if (!cmd) {
err = -ENOMEM;
- goto failed;
+ goto done;
}
- err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr);
+ /* Continue with pairing via HCI */
+ if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
+ struct hci_cp_user_passkey_reply cp;
+
+ bacpy(&cp.bdaddr, bdaddr);
+ cp.passkey = passkey;
+ err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
+ } else
+ err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
+
if (err < 0)
mgmt_pending_remove(cmd);
-failed:
- hci_dev_unlock_bh(hdev);
+done:
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
+static int user_confirm_reply(struct sock *sk, u16 index, void *data, u16 len)
+{
+ struct mgmt_cp_user_confirm_reply *cp = (void *) data;
+
+ BT_DBG("");
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ return user_pairing_resp(sk, index, &cp->bdaddr,
+ MGMT_OP_USER_CONFIRM_REPLY,
+ HCI_OP_USER_CONFIRM_REPLY, 0);
+}
+
+static int user_confirm_neg_reply(struct sock *sk, u16 index, void *data,
+ u16 len)
+{
+ struct mgmt_cp_user_confirm_neg_reply *cp = data;
+
+ BT_DBG("");
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_NEG_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ return user_pairing_resp(sk, index, &cp->bdaddr,
+ MGMT_OP_USER_CONFIRM_NEG_REPLY,
+ HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
+}
+
+static int user_passkey_reply(struct sock *sk, u16 index, void *data, u16 len)
+{
+ struct mgmt_cp_user_passkey_reply *cp = (void *) data;
+
+ BT_DBG("");
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_REPLY,
+ EINVAL);
+
+ return user_pairing_resp(sk, index, &cp->bdaddr,
+ MGMT_OP_USER_PASSKEY_REPLY,
+ HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
+}
+
+static int user_passkey_neg_reply(struct sock *sk, u16 index, void *data,
+ u16 len)
+{
+ struct mgmt_cp_user_passkey_neg_reply *cp = (void *) data;
+
+ BT_DBG("");
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_NEG_REPLY,
+ EINVAL);
+
+ return user_pairing_resp(sk, index, &cp->bdaddr,
+ MGMT_OP_USER_PASSKEY_NEG_REPLY,
+ HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
+}
+
static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
@@ -1460,15 +1746,17 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
BT_DBG("");
if (len != sizeof(*mgmt_cp))
- return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, EINVAL);
+ return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1481,7 +1769,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1498,28 +1786,29 @@ static int read_local_oob_data(struct sock *sk, u16 index)
hdev = hci_dev_get(index);
if (!hdev)
return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- ENODEV);
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (!test_bit(HCI_UP, &hdev->flags)) {
err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- ENETDOWN);
+ MGMT_STATUS_NOT_POWERED);
goto unlock;
}
if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- EOPNOTSUPP);
+ MGMT_STATUS_NOT_SUPPORTED);
goto unlock;
}
- if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) {
- err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY);
+ if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
+ err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_BUSY);
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0);
+ cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
if (!cmd) {
err = -ENOMEM;
goto unlock;
@@ -1530,7 +1819,7 @@ static int read_local_oob_data(struct sock *sk, u16 index)
mgmt_pending_remove(cmd);
unlock:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1547,24 +1836,25 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
- EINVAL);
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
- ENODEV);
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
cp->randomizer);
if (err < 0)
- err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, -err);
+ err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
+ MGMT_STATUS_FAILED);
else
err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
0);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1581,62 +1871,68 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- EINVAL);
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- ENODEV);
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
if (err < 0)
err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
- -err);
+ MGMT_STATUS_INVALID_PARAMS);
else
err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
NULL, 0);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
-static int start_discovery(struct sock *sk, u16 index)
+static int start_discovery(struct sock *sk, u16 index,
+ unsigned char *data, u16 len)
{
- u8 lap[3] = { 0x33, 0x8b, 0x9e };
- struct hci_cp_inquiry cp;
+ struct mgmt_cp_start_discovery *cp = (void *) data;
struct pending_cmd *cmd;
struct hci_dev *hdev;
int err;
BT_DBG("hci%u", index);
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS);
+
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
- cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0);
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_NOT_POWERED);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- memset(&cp, 0, sizeof(cp));
- memcpy(&cp.lap, lap, 3);
- cp.length = 0x08;
- cp.num_rsp = 0x00;
-
- err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+ err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
if (err < 0)
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1652,22 +1948,23 @@ static int stop_discovery(struct sock *sk, u16 index)
hdev = hci_dev_get(index);
if (!hdev)
- return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV);
+ return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
- cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0);
+ cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+ err = hci_cancel_inquiry(hdev);
if (err < 0)
mgmt_pending_remove(cmd);
failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1677,7 +1974,6 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
struct hci_dev *hdev;
- struct pending_cmd *cmd;
struct mgmt_cp_block_device *cp = (void *) data;
int err;
@@ -1685,33 +1981,24 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
- EINVAL);
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
- ENODEV);
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
-
- cmd = mgmt_pending_add(sk, MGMT_OP_BLOCK_DEVICE, index, NULL, 0);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
+ hci_dev_lock(hdev);
err = hci_blacklist_add(hdev, &cp->bdaddr);
-
if (err < 0)
- err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err);
+ err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
+ MGMT_STATUS_FAILED);
else
err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
NULL, 0);
- mgmt_pending_remove(cmd);
-
-failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1721,7 +2008,6 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
struct hci_dev *hdev;
- struct pending_cmd *cmd;
struct mgmt_cp_unblock_device *cp = (void *) data;
int err;
@@ -1729,33 +2015,25 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
- EINVAL);
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
- ENODEV);
+ MGMT_STATUS_INVALID_PARAMS);
- hci_dev_lock_bh(hdev);
-
- cmd = mgmt_pending_add(sk, MGMT_OP_UNBLOCK_DEVICE, index, NULL, 0);
- if (!cmd) {
- err = -ENOMEM;
- goto failed;
- }
+ hci_dev_lock(hdev);
err = hci_blacklist_del(hdev, &cp->bdaddr);
if (err < 0)
- err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, -err);
+ err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS);
else
err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
NULL, 0);
- mgmt_pending_remove(cmd);
-
-failed:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1765,7 +2043,7 @@ static int set_fast_connectable(struct sock *sk, u16 index,
unsigned char *data, u16 len)
{
struct hci_dev *hdev;
- struct mgmt_cp_set_fast_connectable *cp = (void *) data;
+ struct mgmt_mode *cp = (void *) data;
struct hci_cp_write_page_scan_activity acp;
u8 type;
int err;
@@ -1774,16 +2052,16 @@ static int set_fast_connectable(struct sock *sk, u16 index,
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
- EINVAL);
+ MGMT_STATUS_INVALID_PARAMS);
hdev = hci_dev_get(index);
if (!hdev)
return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
- ENODEV);
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
- if (cp->enable) {
+ if (cp->val) {
type = PAGE_SCAN_TYPE_INTERLACED;
acp.interval = 0x0024; /* 22.5 msec page scan interval */
} else {
@@ -1797,14 +2075,14 @@ static int set_fast_connectable(struct sock *sk, u16 index,
sizeof(acp), &acp);
if (err < 0) {
err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
- -err);
+ MGMT_STATUS_FAILED);
goto done;
}
err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
if (err < 0) {
err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
- -err);
+ MGMT_STATUS_FAILED);
goto done;
}
@@ -1867,6 +2145,10 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
case MGMT_OP_SET_CONNECTABLE:
err = set_connectable(sk, index, buf + sizeof(*hdr), len);
break;
+ case MGMT_OP_SET_FAST_CONNECTABLE:
+ err = set_fast_connectable(sk, index, buf + sizeof(*hdr),
+ len);
+ break;
case MGMT_OP_SET_PAIRABLE:
err = set_pairable(sk, index, buf + sizeof(*hdr), len);
break;
@@ -1879,14 +2161,11 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
case MGMT_OP_SET_DEV_CLASS:
err = set_dev_class(sk, index, buf + sizeof(*hdr), len);
break;
- case MGMT_OP_SET_SERVICE_CACHE:
- err = set_service_cache(sk, index, buf + sizeof(*hdr), len);
- break;
- case MGMT_OP_LOAD_KEYS:
- err = load_keys(sk, index, buf + sizeof(*hdr), len);
+ case MGMT_OP_LOAD_LINK_KEYS:
+ err = load_link_keys(sk, index, buf + sizeof(*hdr), len);
break;
- case MGMT_OP_REMOVE_KEY:
- err = remove_key(sk, index, buf + sizeof(*hdr), len);
+ case MGMT_OP_REMOVE_KEYS:
+ err = remove_keys(sk, index, buf + sizeof(*hdr), len);
break;
case MGMT_OP_DISCONNECT:
err = disconnect(sk, index, buf + sizeof(*hdr), len);
@@ -1907,10 +2186,18 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
err = pair_device(sk, index, buf + sizeof(*hdr), len);
break;
case MGMT_OP_USER_CONFIRM_REPLY:
- err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 1);
+ err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len);
break;
case MGMT_OP_USER_CONFIRM_NEG_REPLY:
- err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0);
+ err = user_confirm_neg_reply(sk, index, buf + sizeof(*hdr),
+ len);
+ break;
+ case MGMT_OP_USER_PASSKEY_REPLY:
+ err = user_passkey_reply(sk, index, buf + sizeof(*hdr), len);
+ break;
+ case MGMT_OP_USER_PASSKEY_NEG_REPLY:
+ err = user_passkey_neg_reply(sk, index, buf + sizeof(*hdr),
+ len);
break;
case MGMT_OP_SET_LOCAL_NAME:
err = set_local_name(sk, index, buf + sizeof(*hdr), len);
@@ -1926,7 +2213,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
len);
break;
case MGMT_OP_START_DISCOVERY:
- err = start_discovery(sk, index);
+ err = start_discovery(sk, index, buf + sizeof(*hdr), len);
break;
case MGMT_OP_STOP_DISCOVERY:
err = stop_discovery(sk, index);
@@ -1937,13 +2224,10 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
case MGMT_OP_UNBLOCK_DEVICE:
err = unblock_device(sk, index, buf + sizeof(*hdr), len);
break;
- case MGMT_OP_SET_FAST_CONNECTABLE:
- err = set_fast_connectable(sk, index, buf + sizeof(*hdr),
- len);
- break;
default:
BT_DBG("Unknown op %u", opcode);
- err = cmd_status(sk, index, opcode, 0x01);
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_UNKNOWN_COMMAND);
break;
}
@@ -1957,30 +2241,39 @@ done:
return err;
}
-int mgmt_index_added(u16 index)
+static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
{
- return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL);
+ u8 *status = data;
+
+ cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
+ mgmt_pending_remove(cmd);
}
-int mgmt_index_removed(u16 index)
+int mgmt_index_added(struct hci_dev *hdev)
{
- return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL);
+ return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
+}
+
+int mgmt_index_removed(struct hci_dev *hdev)
+{
+ u8 status = ENODEV;
+
+ mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
+
+ return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
}
struct cmd_lookup {
u8 val;
struct sock *sk;
+ struct hci_dev *hdev;
};
-static void mode_rsp(struct pending_cmd *cmd, void *data)
+static void settings_rsp(struct pending_cmd *cmd, void *data)
{
- struct mgmt_mode *cp = cmd->param;
struct cmd_lookup *match = data;
- if (cp->val != match->val)
- return;
-
- send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val);
+ send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
list_del(&cmd->list);
@@ -1992,17 +2285,23 @@ static void mode_rsp(struct pending_cmd *cmd, void *data)
mgmt_pending_free(cmd);
}
-int mgmt_powered(u16 index, u8 powered)
+int mgmt_powered(struct hci_dev *hdev, u8 powered)
{
- struct mgmt_mode ev;
- struct cmd_lookup match = { powered, NULL };
+ struct cmd_lookup match = { powered, NULL, hdev };
+ __le32 ev;
int ret;
- mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
+ mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
- ev.val = powered;
+ if (!powered) {
+ u8 status = ENETDOWN;
+ mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
+ }
+
+ ev = cpu_to_le32(get_current_settings(hdev));
- ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk);
+ ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev),
+ match.sk);
if (match.sk)
sock_put(match.sk);
@@ -2010,36 +2309,36 @@ int mgmt_powered(u16 index, u8 powered)
return ret;
}
-int mgmt_discoverable(u16 index, u8 discoverable)
+int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
{
- struct mgmt_mode ev;
- struct cmd_lookup match = { discoverable, NULL };
+ struct cmd_lookup match = { discoverable, NULL, hdev };
+ __le32 ev;
int ret;
- mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match);
+ mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp, &match);
- ev.val = discoverable;
+ ev = cpu_to_le32(get_current_settings(hdev));
- ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev),
+ ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev),
match.sk);
-
if (match.sk)
sock_put(match.sk);
return ret;
}
-int mgmt_connectable(u16 index, u8 connectable)
+int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
{
- struct mgmt_mode ev;
- struct cmd_lookup match = { connectable, NULL };
+ __le32 ev;
+ struct cmd_lookup match = { connectable, NULL, hdev };
int ret;
- mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
+ mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, settings_rsp,
+ &match);
- ev.val = connectable;
+ ev = cpu_to_le32(get_current_settings(hdev));
- ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk);
+ ret = mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), match.sk);
if (match.sk)
sock_put(match.sk);
@@ -2047,9 +2346,25 @@ int mgmt_connectable(u16 index, u8 connectable)
return ret;
}
-int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
+int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
+{
+ u8 mgmt_err = mgmt_status(status);
+
+ if (scan & SCAN_PAGE)
+ mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
+ cmd_status_rsp, &mgmt_err);
+
+ if (scan & SCAN_INQUIRY)
+ mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
+ cmd_status_rsp, &mgmt_err);
+
+ return 0;
+}
+
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ u8 persistent)
{
- struct mgmt_ev_new_key ev;
+ struct mgmt_ev_new_link_key ev;
memset(&ev, 0, sizeof(ev));
@@ -2059,17 +2374,18 @@ int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
memcpy(ev.key.val, key->val, 16);
ev.key.pin_len = key->pin_len;
- return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type)
+int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type)
{
- struct mgmt_ev_connected ev;
+ struct mgmt_addr_info ev;
bacpy(&ev.bdaddr, bdaddr);
- ev.link_type = link_type;
+ ev.type = link_to_mgmt(link_type, addr_type);
- return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_CONNECTED, hdev, &ev, sizeof(ev), NULL);
}
static void disconnect_rsp(struct pending_cmd *cmd, void *data)
@@ -2079,6 +2395,7 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
struct mgmt_rp_disconnect rp;
bacpy(&rp.bdaddr, &cp->bdaddr);
+ rp.status = 0;
cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
@@ -2088,75 +2405,110 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
mgmt_pending_remove(cmd);
}
-int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
+static void remove_keys_rsp(struct pending_cmd *cmd, void *data)
+{
+ u8 *status = data;
+ struct mgmt_cp_remove_keys *cp = cmd->param;
+ struct mgmt_rp_remove_keys rp;
+
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.bdaddr, &cp->bdaddr);
+ if (status != NULL)
+ rp.status = *status;
+
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_REMOVE_KEYS, &rp,
+ sizeof(rp));
+
+ mgmt_pending_remove(cmd);
+}
+
+int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type)
{
- struct mgmt_ev_disconnected ev;
+ struct mgmt_addr_info ev;
struct sock *sk = NULL;
int err;
- mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
+ mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
bacpy(&ev.bdaddr, bdaddr);
+ ev.type = link_to_mgmt(link_type, addr_type);
- err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk);
+ err = mgmt_event(MGMT_EV_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
if (sk)
sock_put(sk);
+ mgmt_pending_foreach(MGMT_OP_REMOVE_KEYS, hdev, remove_keys_rsp, NULL);
+
return err;
}
-int mgmt_disconnect_failed(u16 index)
+int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)
{
struct pending_cmd *cmd;
+ u8 mgmt_err = mgmt_status(status);
int err;
- cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
+ cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
if (!cmd)
return -ENOENT;
- err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO);
+ if (bdaddr) {
+ struct mgmt_rp_disconnect rp;
+
+ bacpy(&rp.bdaddr, bdaddr);
+ rp.status = status;
+
+ err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
+ &rp, sizeof(rp));
+ } else
+ err = cmd_status(cmd->sk, hdev->id, MGMT_OP_DISCONNECT,
+ mgmt_err);
mgmt_pending_remove(cmd);
return err;
}
-int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status)
{
struct mgmt_ev_connect_failed ev;
- bacpy(&ev.bdaddr, bdaddr);
- ev.status = status;
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.status = mgmt_status(status);
- return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure)
+int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
{
struct mgmt_ev_pin_code_request ev;
bacpy(&ev.bdaddr, bdaddr);
ev.secure = secure;
- return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
+ return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
NULL);
}
-int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
{
struct pending_cmd *cmd;
struct mgmt_rp_pin_code_reply rp;
int err;
- cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
if (!cmd)
return -ENOENT;
bacpy(&rp.bdaddr, bdaddr);
- rp.status = status;
+ rp.status = mgmt_status(status);
- err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp,
+ err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, &rp,
sizeof(rp));
mgmt_pending_remove(cmd);
@@ -2164,20 +2516,21 @@ int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
return err;
}
-int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
{
struct pending_cmd *cmd;
struct mgmt_rp_pin_code_reply rp;
int err;
- cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
if (!cmd)
return -ENOENT;
bacpy(&rp.bdaddr, bdaddr);
- rp.status = status;
+ rp.status = mgmt_status(status);
- err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
+ err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
sizeof(rp));
mgmt_pending_remove(cmd);
@@ -2185,97 +2538,119 @@ int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
return err;
}
-int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value,
- u8 confirm_hint)
+int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ __le32 value, u8 confirm_hint)
{
struct mgmt_ev_user_confirm_request ev;
- BT_DBG("hci%u", index);
+ BT_DBG("%s", hdev->name);
bacpy(&ev.bdaddr, bdaddr);
ev.confirm_hint = confirm_hint;
put_unaligned_le32(value, &ev.value);
- return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
+ return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
+ NULL);
+}
+
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+ struct mgmt_ev_user_passkey_request ev;
+
+ BT_DBG("%s", hdev->name);
+
+ bacpy(&ev.bdaddr, bdaddr);
+
+ return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
NULL);
}
-static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status,
- u8 opcode)
+static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status, u8 opcode)
{
struct pending_cmd *cmd;
struct mgmt_rp_user_confirm_reply rp;
int err;
- cmd = mgmt_pending_find(opcode, index);
+ cmd = mgmt_pending_find(opcode, hdev);
if (!cmd)
return -ENOENT;
bacpy(&rp.bdaddr, bdaddr);
- rp.status = status;
- err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp));
+ rp.status = mgmt_status(status);
+ err = cmd_complete(cmd->sk, hdev->id, opcode, &rp, sizeof(rp));
mgmt_pending_remove(cmd);
return err;
}
-int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
{
- return confirm_reply_complete(index, bdaddr, status,
+ return user_pairing_resp_complete(hdev, bdaddr, status,
MGMT_OP_USER_CONFIRM_REPLY);
}
-int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 status)
{
- return confirm_reply_complete(index, bdaddr, status,
+ return user_pairing_resp_complete(hdev, bdaddr, status,
MGMT_OP_USER_CONFIRM_NEG_REPLY);
}
-int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
+{
+ return user_pairing_resp_complete(hdev, bdaddr, status,
+ MGMT_OP_USER_PASSKEY_REPLY);
+}
+
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 status)
+{
+ return user_pairing_resp_complete(hdev, bdaddr, status,
+ MGMT_OP_USER_PASSKEY_NEG_REPLY);
+}
+
+int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)
{
struct mgmt_ev_auth_failed ev;
bacpy(&ev.bdaddr, bdaddr);
- ev.status = status;
+ ev.status = mgmt_status(status);
- return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status)
+int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
{
struct pending_cmd *cmd;
- struct hci_dev *hdev;
struct mgmt_cp_set_local_name ev;
int err;
memset(&ev, 0, sizeof(ev));
memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
- cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index);
+ cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
if (!cmd)
goto send_event;
if (status) {
- err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO);
+ err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
+ mgmt_status(status));
goto failed;
}
- hdev = hci_dev_get(index);
- if (hdev) {
- hci_dev_lock_bh(hdev);
- update_eir(hdev);
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
- }
+ update_eir(hdev);
- err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev,
+ err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, &ev,
sizeof(ev));
if (err < 0)
goto failed;
send_event:
- err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev),
+ err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
cmd ? cmd->sk : NULL);
failed:
@@ -2284,29 +2659,31 @@ failed:
return err;
}
-int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
- u8 status)
+int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+ u8 *randomizer, u8 status)
{
struct pending_cmd *cmd;
int err;
- BT_DBG("hci%u status %u", index, status);
+ BT_DBG("%s status %u", hdev->name, status);
- cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index);
+ cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
if (!cmd)
return -ENOENT;
if (status) {
- err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- EIO);
+ err = cmd_status(cmd->sk, hdev->id,
+ MGMT_OP_READ_LOCAL_OOB_DATA,
+ mgmt_status(status));
} else {
struct mgmt_rp_read_local_oob_data rp;
memcpy(rp.hash, hash, sizeof(rp.hash));
memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
- err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
- &rp, sizeof(rp));
+ err = cmd_complete(cmd->sk, hdev->id,
+ MGMT_OP_READ_LOCAL_OOB_DATA,
+ &rp, sizeof(rp));
}
mgmt_pending_remove(cmd);
@@ -2314,14 +2691,15 @@ int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
return err;
}
-int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
- u8 *eir)
+int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir)
{
struct mgmt_ev_device_found ev;
memset(&ev, 0, sizeof(ev));
- bacpy(&ev.bdaddr, bdaddr);
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_mgmt(link_type, addr_type);
ev.rssi = rssi;
if (eir)
@@ -2330,10 +2708,10 @@ int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
if (dev_class)
memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
- return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, &ev, sizeof(ev), NULL);
}
-int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name)
+int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name)
{
struct mgmt_ev_remote_name ev;
@@ -2342,37 +2720,79 @@ int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name)
bacpy(&ev.bdaddr, bdaddr);
memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
- return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL);
+ return mgmt_event(MGMT_EV_REMOTE_NAME, hdev, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+ int err;
+
+ cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+ if (!cmd)
+ return -ENOENT;
+
+ err = cmd_status(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status));
+ mgmt_pending_remove(cmd);
+
+ return err;
+}
+
+int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+ int err;
+
+ cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+ if (!cmd)
+ return -ENOENT;
+
+ err = cmd_status(cmd->sk, hdev->id, cmd->opcode, status);
+ mgmt_pending_remove(cmd);
+
+ return err;
}
-int mgmt_discovering(u16 index, u8 discovering)
+int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
{
- return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering,
+ struct pending_cmd *cmd;
+
+ if (discovering)
+ cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+ else
+ cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+
+ if (cmd != NULL) {
+ cmd_complete(cmd->sk, hdev->id, cmd->opcode, NULL, 0);
+ mgmt_pending_remove(cmd);
+ }
+
+ return mgmt_event(MGMT_EV_DISCOVERING, hdev, &discovering,
sizeof(discovering), NULL);
}
-int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr)
+int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct pending_cmd *cmd;
struct mgmt_ev_device_blocked ev;
- cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, index);
+ cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
bacpy(&ev.bdaddr, bdaddr);
- return mgmt_event(MGMT_EV_DEVICE_BLOCKED, index, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
+ return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
}
-int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr)
+int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct pending_cmd *cmd;
struct mgmt_ev_device_unblocked ev;
- cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, index);
+ cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
bacpy(&ev.bdaddr, bdaddr);
- return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, index, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
+ return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 2d28dfe9838..be6288cf854 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -65,7 +65,8 @@ static DEFINE_MUTEX(rfcomm_mutex);
static LIST_HEAD(session_list);
-static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len);
+static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len,
+ u32 priority);
static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci);
static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci);
static int rfcomm_queue_disc(struct rfcomm_dlc *d);
@@ -377,13 +378,11 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d)
static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci)
{
struct rfcomm_dlc *d;
- struct list_head *p;
- list_for_each(p, &s->dlcs) {
- d = list_entry(p, struct rfcomm_dlc, list);
+ list_for_each_entry(d, &s->dlcs, list)
if (d->dlci == dlci)
return d;
- }
+
return NULL;
}
@@ -749,19 +748,34 @@ void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *d
}
/* ---- RFCOMM frame sending ---- */
-static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len)
+static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len,
+ u32 priority)
{
struct socket *sock = s->sock;
+ struct sock *sk = sock->sk;
struct kvec iv = { data, len };
struct msghdr msg;
- BT_DBG("session %p len %d", s, len);
+ BT_DBG("session %p len %d priority %u", s, len, priority);
+
+ if (sk->sk_priority != priority) {
+ lock_sock(sk);
+ sk->sk_priority = priority;
+ release_sock(sk);
+ }
memset(&msg, 0, sizeof(msg));
return kernel_sendmsg(sock, &msg, &iv, 1, len);
}
+static int rfcomm_send_cmd(struct rfcomm_session *s, struct rfcomm_cmd *cmd)
+{
+ BT_DBG("%p cmd %u", s, cmd->ctrl);
+
+ return rfcomm_send_frame(s, (void *) cmd, sizeof(*cmd), HCI_PRIO_MAX);
+}
+
static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
{
struct rfcomm_cmd cmd;
@@ -773,7 +787,7 @@ static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
@@ -787,7 +801,7 @@ static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
@@ -801,7 +815,7 @@ static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_queue_disc(struct rfcomm_dlc *d)
@@ -815,6 +829,8 @@ static int rfcomm_queue_disc(struct rfcomm_dlc *d)
if (!skb)
return -ENOMEM;
+ skb->priority = HCI_PRIO_MAX;
+
cmd = (void *) __skb_put(skb, sizeof(*cmd));
cmd->addr = d->addr;
cmd->ctrl = __ctrl(RFCOMM_DISC, 1);
@@ -837,7 +853,7 @@ static int rfcomm_send_dm(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type)
@@ -862,7 +878,7 @@ static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type)
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d)
@@ -904,7 +920,7 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
@@ -942,7 +958,7 @@ int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
static int rfcomm_send_rls(struct rfcomm_session *s, int cr, u8 dlci, u8 status)
@@ -969,7 +985,7 @@ static int rfcomm_send_rls(struct rfcomm_session *s, int cr, u8 dlci, u8 status)
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig)
@@ -996,7 +1012,7 @@ static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
static int rfcomm_send_fcoff(struct rfcomm_session *s, int cr)
@@ -1018,7 +1034,7 @@ static int rfcomm_send_fcoff(struct rfcomm_session *s, int cr)
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
static int rfcomm_send_fcon(struct rfcomm_session *s, int cr)
@@ -1040,7 +1056,7 @@ static int rfcomm_send_fcon(struct rfcomm_session *s, int cr)
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int len)
@@ -1091,7 +1107,7 @@ static int rfcomm_send_credits(struct rfcomm_session *s, u8 addr, u8 credits)
*ptr = __fcs(buf); ptr++;
- return rfcomm_send_frame(s, buf, ptr - buf);
+ return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
}
static void rfcomm_make_uih(struct sk_buff *skb, u8 addr)
@@ -1770,7 +1786,8 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
return skb_queue_len(&d->tx_queue);
while (d->tx_credits && (skb = skb_dequeue(&d->tx_queue))) {
- err = rfcomm_send_frame(d->session, skb->data, skb->len);
+ err = rfcomm_send_frame(d->session, skb->data, skb->len,
+ skb->priority);
if (err < 0) {
skb_queue_head(&d->tx_queue, skb);
break;
@@ -2121,15 +2138,13 @@ static struct hci_cb rfcomm_cb = {
static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
{
struct rfcomm_session *s;
- struct list_head *pp, *p;
rfcomm_lock();
- list_for_each(p, &session_list) {
- s = list_entry(p, struct rfcomm_session, list);
- list_for_each(pp, &s->dlcs) {
+ list_for_each_entry(s, &session_list, list) {
+ struct rfcomm_dlc *d;
+ list_for_each_entry(d, &s->dlcs, list) {
struct sock *sk = s->sock->sk;
- struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
seq_printf(f, "%s %s %ld %d %d %d %d\n",
batostr(&bt_sk(sk)->src),
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 5417f612732..aea2bdd1510 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -600,6 +600,8 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
break;
}
+ skb->priority = sk->sk_priority;
+
err = rfcomm_dlc_send(d, skb);
if (err < 0) {
kfree_skb(skb);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index c258796313e..fa8f4de53b9 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -34,6 +34,7 @@
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
+#include <linux/workqueue.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -65,7 +66,7 @@ struct rfcomm_dev {
struct rfcomm_dlc *dlc;
struct tty_struct *tty;
wait_queue_head_t wait;
- struct tasklet_struct wakeup_task;
+ struct work_struct wakeup_task;
struct device *tty_dev;
@@ -81,7 +82,7 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig);
-static void rfcomm_tty_wakeup(unsigned long arg);
+static void rfcomm_tty_wakeup(struct work_struct *work);
/* ---- Device functions ---- */
static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
@@ -133,13 +134,10 @@ static inline void rfcomm_dev_put(struct rfcomm_dev *dev)
static struct rfcomm_dev *__rfcomm_dev_get(int id)
{
struct rfcomm_dev *dev;
- struct list_head *p;
- list_for_each(p, &rfcomm_dev_list) {
- dev = list_entry(p, struct rfcomm_dev, list);
+ list_for_each_entry(dev, &rfcomm_dev_list, list)
if (dev->id == id)
return dev;
- }
return NULL;
}
@@ -197,7 +195,7 @@ static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
{
- struct rfcomm_dev *dev;
+ struct rfcomm_dev *dev, *entry;
struct list_head *head = &rfcomm_dev_list, *p;
int err = 0;
@@ -212,8 +210,8 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
if (req->dev_id < 0) {
dev->id = 0;
- list_for_each(p, &rfcomm_dev_list) {
- if (list_entry(p, struct rfcomm_dev, list)->id != dev->id)
+ list_for_each_entry(entry, &rfcomm_dev_list, list) {
+ if (entry->id != dev->id)
break;
dev->id++;
@@ -222,9 +220,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
} else {
dev->id = req->dev_id;
- list_for_each(p, &rfcomm_dev_list) {
- struct rfcomm_dev *entry = list_entry(p, struct rfcomm_dev, list);
-
+ list_for_each_entry(entry, &rfcomm_dev_list, list) {
if (entry->id == dev->id) {
err = -EADDRINUSE;
goto out;
@@ -257,7 +253,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
atomic_set(&dev->opened, 0);
init_waitqueue_head(&dev->wait);
- tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev);
+ INIT_WORK(&dev->wakeup_task, rfcomm_tty_wakeup);
skb_queue_head_init(&dev->pending);
@@ -351,7 +347,7 @@ static void rfcomm_wfree(struct sk_buff *skb)
struct rfcomm_dev *dev = (void *) skb->sk;
atomic_sub(skb->truesize, &dev->wmem_alloc);
if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags))
- tasklet_schedule(&dev->wakeup_task);
+ queue_work(system_nrt_wq, &dev->wakeup_task);
rfcomm_dev_put(dev);
}
@@ -455,9 +451,9 @@ static int rfcomm_release_dev(void __user *arg)
static int rfcomm_get_dev_list(void __user *arg)
{
+ struct rfcomm_dev *dev;
struct rfcomm_dev_list_req *dl;
struct rfcomm_dev_info *di;
- struct list_head *p;
int n = 0, size, err;
u16 dev_num;
@@ -479,8 +475,7 @@ static int rfcomm_get_dev_list(void __user *arg)
read_lock_bh(&rfcomm_dev_lock);
- list_for_each(p, &rfcomm_dev_list) {
- struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list);
+ list_for_each_entry(dev, &rfcomm_dev_list, list) {
if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
continue;
(di + n)->id = dev->id;
@@ -635,9 +630,10 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
}
/* ---- TTY functions ---- */
-static void rfcomm_tty_wakeup(unsigned long arg)
+static void rfcomm_tty_wakeup(struct work_struct *work)
{
- struct rfcomm_dev *dev = (void *) arg;
+ struct rfcomm_dev *dev = container_of(work, struct rfcomm_dev,
+ wakeup_task);
struct tty_struct *tty = dev->tty;
if (!tty)
return;
@@ -762,7 +758,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
rfcomm_dlc_close(dev->dlc, 0);
clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
- tasklet_kill(&dev->wakeup_task);
+ cancel_work_sync(&dev->wakeup_task);
rfcomm_dlc_lock(dev->dlc);
tty->driver_data = NULL;
@@ -1155,9 +1151,11 @@ static const struct tty_operations rfcomm_ops = {
int __init rfcomm_init_ttys(void)
{
+ int error;
+
rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS);
if (!rfcomm_tty_driver)
- return -1;
+ return -ENOMEM;
rfcomm_tty_driver->owner = THIS_MODULE;
rfcomm_tty_driver->driver_name = "rfcomm";
@@ -1172,10 +1170,11 @@ int __init rfcomm_init_ttys(void)
rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON;
tty_set_operations(rfcomm_tty_driver, &rfcomm_ops);
- if (tty_register_driver(rfcomm_tty_driver)) {
+ error = tty_register_driver(rfcomm_tty_driver);
+ if (error) {
BT_ERR("Can't register RFCOMM TTY driver");
put_tty_driver(rfcomm_tty_driver);
- return -1;
+ return error;
}
BT_INFO("RFCOMM TTY layer initialized");
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index a324b009e34..725e10d487f 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -189,7 +189,7 @@ static int sco_connect(struct sock *sk)
if (!hdev)
return -EHOSTUNREACH;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
@@ -225,7 +225,7 @@ static int sco_connect(struct sock *sk)
}
done:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 759b6357264..0b96737d0ad 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -181,7 +181,8 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
if (!skb)
return;
- hci_send_acl(conn->hcon, skb, 0);
+ skb->priority = HCI_PRIO_MAX;
+ hci_send_acl(conn->hchan, skb, 0);
mod_timer(&conn->security_timer, jiffies +
msecs_to_jiffies(SMP_TIMEOUT));
@@ -231,6 +232,18 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
return 0;
}
+static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
+{
+ if (send)
+ smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
+ &reason);
+
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->pend);
+ mgmt_auth_failed(conn->hcon->hdev, conn->dst, reason);
+ del_timer(&conn->security_timer);
+ smp_chan_destroy(conn);
+}
+
static void confirm_work(struct work_struct *work)
{
struct smp_chan *smp = container_of(work, struct smp_chan, confirm);
@@ -269,8 +282,7 @@ static void confirm_work(struct work_struct *work)
return;
error:
- smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
- smp_chan_destroy(conn);
+ smp_failure(conn, reason, 1);
}
static void random_work(struct work_struct *work)
@@ -353,8 +365,7 @@ static void random_work(struct work_struct *work)
return;
error:
- smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
- smp_chan_destroy(conn);
+ smp_failure(conn, reason, 1);
}
static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
@@ -378,7 +389,15 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
void smp_chan_destroy(struct l2cap_conn *conn)
{
- kfree(conn->smp_chan);
+ struct smp_chan *smp = conn->smp_chan;
+
+ clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend);
+
+ if (smp->tfm)
+ crypto_free_blkcipher(smp->tfm);
+
+ kfree(smp);
+ conn->smp_chan = NULL;
hci_conn_put(conn->hcon);
}
@@ -646,6 +665,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
break;
case SMP_CMD_PAIRING_FAIL:
+ smp_failure(conn, skb->data[0], 0);
reason = 0;
err = -EPERM;
break;
@@ -691,8 +711,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
done:
if (reason)
- smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
- &reason);
+ smp_failure(conn, reason, 1);
kfree_skb(skb);
return err;