diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/bluetooth/hci_conn.c | 116 | ||||
-rw-r--r-- | net/bluetooth/hci_core.c | 679 | ||||
-rw-r--r-- | net/bluetooth/hci_event.c | 274 | ||||
-rw-r--r-- | net/bluetooth/l2cap_core.c | 37 | ||||
-rw-r--r-- | net/bluetooth/mgmt.c | 425 | ||||
-rw-r--r-- | net/bluetooth/smp.c | 180 | ||||
-rw-r--r-- | net/bluetooth/smp.h | 11 | ||||
-rw-r--r-- | net/mac80211/ieee80211_i.h | 10 | ||||
-rw-r--r-- | net/mac80211/mlme.c | 24 | ||||
-rw-r--r-- | net/mac80211/rx.c | 7 | ||||
-rw-r--r-- | net/mac80211/sta_info.c | 66 | ||||
-rw-r--r-- | net/mac80211/sta_info.h | 7 | ||||
-rw-r--r-- | net/mac80211/tx.c | 15 | ||||
-rw-r--r-- | net/mac80211/util.c | 48 | ||||
-rw-r--r-- | net/mac80211/wme.c | 5 | ||||
-rw-r--r-- | net/nfc/core.c | 10 | ||||
-rw-r--r-- | net/nfc/digital.h | 6 | ||||
-rw-r--r-- | net/nfc/digital_core.c | 67 | ||||
-rw-r--r-- | net/nfc/digital_technology.c | 247 | ||||
-rw-r--r-- | net/nfc/hci/llc.c | 4 | ||||
-rw-r--r-- | net/nfc/llcp_core.c | 16 | ||||
-rw-r--r-- | net/nfc/nci/core.c | 5 | ||||
-rw-r--r-- | net/nfc/nci/spi.c | 3 | ||||
-rw-r--r-- | net/nfc/netlink.c | 8 | ||||
-rw-r--r-- | net/wireless/reg.c | 12 |
25 files changed, 1946 insertions, 336 deletions
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index bd66c52eff9..7c713c4675b 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -231,7 +231,7 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); } -void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], +void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, __u8 ltk[16]) { struct hci_dev *hdev = conn->hdev; @@ -242,9 +242,9 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(conn->handle); - memcpy(cp.ltk, ltk, sizeof(cp.ltk)); + cp.rand = rand; cp.ediv = ediv; - memcpy(cp.rand, rand, sizeof(cp.rand)); + memcpy(cp.ltk, ltk, sizeof(cp.ltk)); hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); } @@ -363,6 +363,16 @@ static void hci_conn_auto_accept(struct work_struct *work) &conn->dst); } +static void le_conn_timeout(struct work_struct *work) +{ + struct hci_conn *conn = container_of(work, struct hci_conn, + le_conn_timeout.work); + + BT_DBG(""); + + hci_le_create_connection_cancel(conn); +} + struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) { struct hci_conn *conn; @@ -410,6 +420,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept); INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle); + INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout); atomic_set(&conn->refcnt, 0); @@ -442,6 +453,8 @@ int hci_conn_del(struct hci_conn *conn) /* Unacked frames */ hdev->acl_cnt += conn->sent; } else if (conn->type == LE_LINK) { + cancel_delayed_work_sync(&conn->le_conn_timeout); + if (hdev->le_pkts) hdev->le_cnt += conn->sent; else @@ -515,7 +528,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) EXPORT_SYMBOL(hci_get_route); /* This function requires the caller holds hdev->lock */ -static void le_conn_failed(struct hci_conn *conn, u8 status) +void hci_le_conn_failed(struct hci_conn *conn, u8 status) { struct hci_dev *hdev = conn->hdev; @@ -527,6 +540,11 @@ static void le_conn_failed(struct hci_conn *conn, u8 status) hci_proto_connect_cfm(conn, status); hci_conn_del(conn); + + /* Since we may have temporarily stopped the background scanning in + * favor of connection establishment, we should restart it. + */ + hci_update_background_scan(hdev); } static void create_le_conn_complete(struct hci_dev *hdev, u8 status) @@ -545,50 +563,55 @@ static void create_le_conn_complete(struct hci_dev *hdev, u8 status) if (!conn) goto done; - le_conn_failed(conn, status); + hci_le_conn_failed(conn, status); done: hci_dev_unlock(hdev); } -static int hci_create_le_conn(struct hci_conn *conn) +static void hci_req_add_le_create_conn(struct hci_request *req, + struct hci_conn *conn) { - struct hci_dev *hdev = conn->hdev; struct hci_cp_le_create_conn cp; - struct hci_request req; - int err; - - hci_req_init(&req, hdev); + struct hci_dev *hdev = conn->hdev; + u8 own_addr_type; memset(&cp, 0, sizeof(cp)); + + /* Update random address, but set require_privacy to false so + * that we never connect with an unresolvable address. + */ + if (hci_update_random_address(req, false, &own_addr_type)) + return; + + /* Save the address type used for this connnection attempt so we able + * to retrieve this information if we need it. + */ + conn->src_type = own_addr_type; + cp.scan_interval = cpu_to_le16(hdev->le_scan_interval); cp.scan_window = cpu_to_le16(hdev->le_scan_window); bacpy(&cp.peer_addr, &conn->dst); cp.peer_addr_type = conn->dst_type; - cp.own_address_type = conn->src_type; + cp.own_address_type = own_addr_type; cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); cp.supervision_timeout = __constant_cpu_to_le16(0x002a); cp.min_ce_len = __constant_cpu_to_le16(0x0000); cp.max_ce_len = __constant_cpu_to_le16(0x0000); - hci_req_add(&req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); + hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); - err = hci_req_run(&req, create_le_conn_complete); - if (err) { - hci_conn_del(conn); - return err; - } - - return 0; + conn->state = BT_CONNECT; } -static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, - u8 dst_type, u8 sec_level, u8 auth_type) +struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, + u8 dst_type, u8 sec_level, u8 auth_type) { struct hci_conn_params *params; struct hci_conn *conn; struct smp_irk *irk; + struct hci_request req; int err; if (test_bit(HCI_ADVERTISING, &hdev->flags)) @@ -617,12 +640,6 @@ static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, if (conn) return ERR_PTR(-EBUSY); - /* Convert from L2CAP channel address type to HCI address type */ - if (dst_type == BDADDR_LE_PUBLIC) - dst_type = ADDR_LE_DEV_PUBLIC; - else - dst_type = ADDR_LE_DEV_RANDOM; - /* When given an identity address with existing identity * resolving key, the connection needs to be established * to a resolvable random address. @@ -647,9 +664,7 @@ static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, return ERR_PTR(-ENOMEM); conn->dst_type = dst_type; - conn->src_type = hdev->own_addr_type; - conn->state = BT_CONNECT; conn->out = true; conn->link_mode |= HCI_LM_MASTER; conn->sec_level = BT_SECURITY_LOW; @@ -665,17 +680,34 @@ static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, conn->le_conn_max_interval = hdev->le_conn_max_interval; } - err = hci_create_le_conn(conn); - if (err) + hci_req_init(&req, hdev); + + /* If controller is scanning, we stop it since some controllers are + * not able to scan and connect at the same time. Also set the + * HCI_LE_SCAN_INTERRUPTED flag so that the command complete + * handler for scan disabling knows to set the correct discovery + * state. + */ + if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) { + hci_req_add_le_scan_disable(&req); + set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags); + } + + hci_req_add_le_create_conn(&req, conn); + + err = hci_req_run(&req, create_le_conn_complete); + if (err) { + hci_conn_del(conn); return ERR_PTR(err); + } done: hci_conn_hold(conn); return conn; } -static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, - u8 sec_level, u8 auth_type) +struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, + u8 sec_level, u8 auth_type) { struct hci_conn *acl; @@ -744,22 +776,6 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, return sco; } -/* Create SCO, ACL or LE connection. */ -struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, - __u8 dst_type, __u8 sec_level, __u8 auth_type) -{ - BT_DBG("%s dst %pMR type 0x%x", hdev->name, dst, type); - - switch (type) { - case LE_LINK: - return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type); - case ACL_LINK: - return hci_connect_acl(hdev, dst, sec_level, auth_type); - } - - return ERR_PTR(-EINVAL); -} - /* Check link security requirement */ int hci_conn_check_link_mode(struct hci_conn *conn) { diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 964aa8deb00..8bbfdea9cbe 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -492,6 +492,37 @@ static int idle_timeout_get(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get, idle_timeout_set, "%llu\n"); +static int rpa_timeout_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + /* Require the RPA timeout to be at least 30 seconds and at most + * 24 hours. + */ + if (val < 30 || val > (60 * 60 * 24)) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->rpa_timeout = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int rpa_timeout_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->rpa_timeout; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get, + rpa_timeout_set, "%llu\n"); + static int sniff_min_interval_set(void *data, u64 val) { struct hci_dev *hdev = data; @@ -548,6 +579,36 @@ static int sniff_max_interval_get(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get, sniff_max_interval_set, "%llu\n"); +static int identity_show(struct seq_file *f, void *p) +{ + struct hci_dev *hdev = f->private; + bdaddr_t addr; + u8 addr_type; + + hci_dev_lock(hdev); + + hci_copy_identity_address(hdev, &addr, &addr_type); + + seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type, + 16, hdev->irk, &hdev->rpa); + + hci_dev_unlock(hdev); + + return 0; +} + +static int identity_open(struct inode *inode, struct file *file) +{ + return single_open(file, identity_show, inode->i_private); +} + +static const struct file_operations identity_fops = { + .open = identity_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static int random_address_show(struct seq_file *f, void *p) { struct hci_dev *hdev = f->private; @@ -641,6 +702,31 @@ static const struct file_operations force_static_address_fops = { .llseek = default_llseek, }; +static int white_list_show(struct seq_file *f, void *ptr) +{ + struct hci_dev *hdev = f->private; + struct bdaddr_list *b; + + hci_dev_lock(hdev); + list_for_each_entry(b, &hdev->le_white_list, list) + seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); + hci_dev_unlock(hdev); + + return 0; +} + +static int white_list_open(struct inode *inode, struct file *file) +{ + return single_open(file, white_list_show, inode->i_private); +} + +static const struct file_operations white_list_fops = { + .open = white_list_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static int identity_resolving_keys_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; @@ -679,10 +765,10 @@ static int long_term_keys_show(struct seq_file *f, void *ptr) hci_dev_lock(hdev); list_for_each_safe(p, n, &hdev->long_term_keys) { struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list); - seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n", + seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n", <k->bdaddr, ltk->bdaddr_type, ltk->authenticated, ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv), - 8, ltk->rand, 16, ltk->val); + __le64_to_cpu(ltk->rand), 16, ltk->val); } hci_dev_unlock(hdev); @@ -828,6 +914,115 @@ static const struct file_operations lowpan_debugfs_fops = { .llseek = default_llseek, }; +static int le_auto_conn_show(struct seq_file *sf, void *ptr) +{ + struct hci_dev *hdev = sf->private; + struct hci_conn_params *p; + + hci_dev_lock(hdev); + + list_for_each_entry(p, &hdev->le_conn_params, list) { + seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type, + p->auto_connect); + } + + hci_dev_unlock(hdev); + + return 0; +} + +static int le_auto_conn_open(struct inode *inode, struct file *file) +{ + return single_open(file, le_auto_conn_show, inode->i_private); +} + +static ssize_t le_auto_conn_write(struct file *file, const char __user *data, + size_t count, loff_t *offset) +{ + struct seq_file *sf = file->private_data; + struct hci_dev *hdev = sf->private; + u8 auto_connect = 0; + bdaddr_t addr; + u8 addr_type; + char *buf; + int err = 0; + int n; + + /* Don't allow partial write */ + if (*offset != 0) + return -EINVAL; + + if (count < 3) + return -EINVAL; + + buf = kzalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, data, count)) { + err = -EFAULT; + goto done; + } + + if (memcmp(buf, "add", 3) == 0) { + n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu", + &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2], + &addr.b[1], &addr.b[0], &addr_type, + &auto_connect); + + if (n < 7) { + err = -EINVAL; + goto done; + } + + hci_dev_lock(hdev); + err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect, + hdev->le_conn_min_interval, + hdev->le_conn_max_interval); + hci_dev_unlock(hdev); + + if (err) + goto done; + } else if (memcmp(buf, "del", 3) == 0) { + n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu", + &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2], + &addr.b[1], &addr.b[0], &addr_type); + + if (n < 7) { + err = -EINVAL; + goto done; + } + + hci_dev_lock(hdev); + hci_conn_params_del(hdev, &addr, addr_type); + hci_dev_unlock(hdev); + } else if (memcmp(buf, "clr", 3) == 0) { + hci_dev_lock(hdev); + hci_conn_params_clear(hdev); + hci_pend_le_conns_clear(hdev); + hci_update_background_scan(hdev); + hci_dev_unlock(hdev); + } else { + err = -EINVAL; + } + +done: + kfree(buf); + + if (err) + return err; + else + return count; +} + +static const struct file_operations le_auto_conn_fops = { + .open = le_auto_conn_open, + .read = seq_read, + .write = le_auto_conn_write, + .llseek = seq_lseek, + .release = single_release, +}; + /* ---- HCI requests ---- */ static void hci_req_sync_complete(struct hci_dev *hdev, u8 result) @@ -1176,14 +1371,17 @@ static void le_setup(struct hci_request *req) /* Read LE Local Supported Features */ hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); + /* Read LE Supported States */ + hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); + /* Read LE Advertising Channel TX Power */ hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); /* Read LE White List Size */ hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL); - /* Read LE Supported States */ - hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); + /* Clear LE White List */ + hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL); /* LE-only controllers have LE implicitly enabled */ if (!lmp_bredr_capable(hdev)) @@ -1475,23 +1673,8 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt) if (hdev->commands[5] & 0x10) hci_setup_link_policy(req); - if (lmp_le_capable(hdev)) { - /* If the controller has a public BD_ADDR, then by default - * use that one. If this is a LE only controller without - * a public address, default to the random address. - * - * For debugging purposes it is possible to force - * controllers with a public address to use the - * random address instead. - */ - if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) || - !bacmp(&hdev->bdaddr, BDADDR_ANY)) - hdev->own_addr_type = ADDR_LE_DEV_RANDOM; - else - hdev->own_addr_type = ADDR_LE_DEV_PUBLIC; - + if (lmp_le_capable(hdev)) hci_set_le_support(req); - } /* Read features beyond page 1 if available */ for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { @@ -1608,6 +1791,10 @@ static int __hci_init(struct hci_dev *hdev) } if (lmp_le_capable(hdev)) { + debugfs_create_file("identity", 0400, hdev->debugfs, + hdev, &identity_fops); + debugfs_create_file("rpa_timeout", 0644, hdev->debugfs, + hdev, &rpa_timeout_fops); debugfs_create_file("random_address", 0444, hdev->debugfs, hdev, &random_address_fops); debugfs_create_file("static_address", 0444, hdev->debugfs, @@ -1624,6 +1811,8 @@ static int __hci_init(struct hci_dev *hdev) debugfs_create_u8("white_list_size", 0444, hdev->debugfs, &hdev->le_white_list_size); + debugfs_create_file("white_list", 0444, hdev->debugfs, hdev, + &white_list_fops); debugfs_create_file("identity_resolving_keys", 0400, hdev->debugfs, hdev, &identity_resolving_keys_fops); @@ -1637,6 +1826,8 @@ static int __hci_init(struct hci_dev *hdev) hdev, &adv_channel_map_fops); debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev, &lowpan_debugfs_fops); + debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev, + &le_auto_conn_fops); } return 0; @@ -1729,6 +1920,8 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state) switch (state) { case DISCOVERY_STOPPED: + hci_update_background_scan(hdev); + if (hdev->discovery.state != DISCOVERY_STARTING) mgmt_discovering(hdev, 0); break; @@ -2102,6 +2295,7 @@ static int hci_dev_do_open(struct hci_dev *hdev) if (!ret) { hci_dev_hold(hdev); + set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); set_bit(HCI_UP, &hdev->flags); hci_notify(hdev, HCI_DEV_UP); if (!test_bit(HCI_SETUP, &hdev->dev_flags) && @@ -2200,9 +2394,13 @@ static int hci_dev_do_close(struct hci_dev *hdev) cancel_delayed_work_sync(&hdev->le_scan_disable); + if (test_bit(HCI_MGMT, &hdev->dev_flags)) + cancel_delayed_work_sync(&hdev->rpa_expired); + hci_dev_lock(hdev); hci_inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); + hci_pend_le_conns_clear(hdev); hci_dev_unlock(hdev); hci_notify(hdev, HCI_DEV_DOWN); @@ -2723,14 +2921,13 @@ static bool ltk_type_master(u8 type) return false; } -struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8], +struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand, bool master) { struct smp_ltk *k; list_for_each_entry(k, &hdev->long_term_keys, list) { - if (k->ediv != ediv || - memcmp(rand, k->rand, sizeof(k->rand))) + if (k->ediv != ediv || k->rand != rand) continue; if (ltk_type_master(k->type) != master) @@ -2848,7 +3045,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, u8 authenticated, - u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8]) + u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) { struct smp_ltk *key, *old_key; bool master = ltk_type_master(type); @@ -2868,9 +3065,9 @@ struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, memcpy(key->val, tk, sizeof(key->val)); key->authenticated = authenticated; key->ediv = ediv; + key->rand = rand; key->enc_size = enc_size; key->type = type; - memcpy(key->rand, rand, sizeof(key->rand)); return key; } @@ -3070,7 +3267,7 @@ struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, return NULL; } -void hci_blacklist_clear(struct hci_dev *hdev) +static void hci_blacklist_clear(struct hci_dev *hdev) { struct list_head *p, *n; @@ -3123,6 +3320,67 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) return mgmt_device_unblocked(hdev, bdaddr, type); } +struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 type) +{ + struct bdaddr_list *b; + + list_for_each_entry(b, &hdev->le_white_list, list) { + if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) + return b; + } + + return NULL; +} + +void hci_white_list_clear(struct hci_dev *hdev) +{ + struct list_head *p, *n; + + list_for_each_safe(p, n, &hdev->le_white_list) { + struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list); + + list_del(p); + kfree(b); + } +} + +int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) +{ + struct bdaddr_list *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) + return -EBADF; + + entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + bacpy(&entry->bdaddr, bdaddr); + entry->bdaddr_type = type; + + list_add(&entry->list, &hdev->le_white_list); + + return 0; +} + +int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) +{ + struct bdaddr_list *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) + return -EBADF; + + entry = hci_white_list_lookup(hdev, bdaddr, type); + if (!entry) + return -ENOENT; + + list_del(&entry->list); + kfree(entry); + + return 0; +} + /* This function requires the caller holds hdev->lock */ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) @@ -3139,35 +3397,81 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, return NULL; } +static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) +{ + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr); + if (!conn) + return false; + + if (conn->dst_type != type) + return false; + + if (conn->state != BT_CONNECTED) + return false; + + return true; +} + +static bool is_identity_address(bdaddr_t *addr, u8 addr_type) +{ + if (addr_type == ADDR_LE_DEV_PUBLIC) + return true; + + /* Check for Random Static address type */ + if ((addr->b[5] & 0xc0) == 0xc0) + return true; + + return false; +} + /* This function requires the caller holds hdev->lock */ -void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, - u16 conn_min_interval, u16 conn_max_interval) +int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, + u8 auto_connect, u16 conn_min_interval, + u16 conn_max_interval) { struct hci_conn_params *params; + if (!is_identity_address(addr, addr_type)) + return -EINVAL; + params = hci_conn_params_lookup(hdev, addr, addr_type); - if (params) { - params->conn_min_interval = conn_min_interval; - params->conn_max_interval = conn_max_interval; - return; - } + if (params) + goto update; params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) { BT_ERR("Out of memory"); - return; + return -ENOMEM; } bacpy(¶ms->addr, addr); params->addr_type = addr_type; + + list_add(¶ms->list, &hdev->le_conn_params); + +update: params->conn_min_interval = conn_min_interval; params->conn_max_interval = conn_max_interval; + params->auto_connect = auto_connect; - list_add(¶ms->list, &hdev->le_conn_params); + switch (auto_connect) { + case HCI_AUTO_CONN_DISABLED: + case HCI_AUTO_CONN_LINK_LOSS: + hci_pend_le_conn_del(hdev, addr, addr_type); + break; + case HCI_AUTO_CONN_ALWAYS: + if (!is_connected(hdev, addr, addr_type)) + hci_pend_le_conn_add(hdev, addr, addr_type); + break; + } - BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x " - "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval, - conn_max_interval); + BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x " + "conn_max_interval 0x%.4x", addr, addr_type, auto_connect, + conn_min_interval, conn_max_interval); + + return 0; } /* This function requires the caller holds hdev->lock */ @@ -3179,6 +3483,8 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) if (!params) return; + hci_pend_le_conn_del(hdev, addr, addr_type); + list_del(¶ms->list); kfree(params); @@ -3198,6 +3504,78 @@ void hci_conn_params_clear(struct hci_dev *hdev) BT_DBG("All LE connection parameters were removed"); } +/* This function requires the caller holds hdev->lock */ +struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev, + bdaddr_t *addr, u8 addr_type) +{ + struct bdaddr_list *entry; + + list_for_each_entry(entry, &hdev->pend_le_conns, list) { + if (bacmp(&entry->bdaddr, addr) == 0 && + entry->bdaddr_type == addr_type) + return entry; + } + + return NULL; +} + +/* This function requires the caller holds hdev->lock */ +void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) +{ + struct bdaddr_list *entry; + + entry = hci_pend_le_conn_lookup(hdev, addr, addr_type); + if (entry) + goto done; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + BT_ERR("Out of memory"); + return; + } + + bacpy(&entry->bdaddr, addr); + entry->bdaddr_type = addr_type; + + list_add(&entry->list, &hdev->pend_le_conns); + + BT_DBG("addr %pMR (type %u)", addr, addr_type); + +done: + hci_update_background_scan(hdev); +} + +/* This function requires the caller holds hdev->lock */ +void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) +{ + struct bdaddr_list *entry; + + entry = hci_pend_le_conn_lookup(hdev, addr, addr_type); + if (!entry) + goto done; + + list_del(&entry->list); + kfree(entry); + + BT_DBG("addr %pMR (type %u)", addr, addr_type); + +done: + hci_update_background_scan(hdev); +} + +/* This function requires the caller holds hdev->lock */ +void hci_pend_le_conns_clear(struct hci_dev *hdev) +{ + struct bdaddr_list *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) { + list_del(&entry->list); + kfree(entry); + } + + BT_DBG("All LE pending connections cleared"); +} + static void inquiry_complete(struct hci_dev *hdev, u8 status) { if (status) { @@ -3257,7 +3635,6 @@ static void le_scan_disable_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan_disable.work); - struct hci_cp_le_set_scan_enable cp; struct hci_request req; int err; @@ -3265,15 +3642,128 @@ static void le_scan_disable_work(struct work_struct *work) hci_req_init(&req, hdev); - memset(&cp, 0, sizeof(cp)); - cp.enable = LE_SCAN_DISABLE; - hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); + hci_req_add_le_scan_disable(&req); err = hci_req_run(&req, le_scan_disable_work_complete); if (err) BT_ERR("Disable LE scanning request failed: err %d", err); } +static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) +{ + struct hci_dev *hdev = req->hdev; + + /* If we're advertising or initiating an LE connection we can't + * go ahead and change the random address at this time. This is + * because the eventual initiator address used for the + * subsequently created connection will be undefined (some + * controllers use the new address and others the one we had + * when the operation started). + * + * In this kind of scenario skip the update and let the random + * address be updated at the next cycle. + */ + if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) || + hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) { + BT_DBG("Deferring random address update"); + return; + } + + hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); +} + +int hci_update_random_address(struct hci_request *req, bool require_privacy, + u8 *own_addr_type) +{ + struct hci_dev *hdev = req->hdev; + int err; + + /* If privacy is enabled use a resolvable private address. If + * current RPA has expired or there is something else than + * the current RPA in use, then generate a new one. + */ + if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) { + int to; + + *own_addr_type = ADDR_LE_DEV_RANDOM; + + if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) && + !bacmp(&hdev->random_addr, &hdev->rpa)) + return 0; + + err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa); + if (err < 0) { + BT_ERR("%s failed to generate new RPA", hdev->name); + return err; + } + + set_random_addr(req, &hdev->rpa); + + to = msecs_to_jiffies(hdev->rpa_timeout * 1000); + queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to); + + return 0; + } + + /* In case of required privacy without resolvable private address, + * use an unresolvable private address. This is useful for active + * scanning and non-connectable advertising. + */ + if (require_privacy) { + bdaddr_t urpa; + + get_random_bytes(&urpa, 6); + urpa.b[5] &= 0x3f; /* Clear two most significant bits */ + + *own_addr_type = ADDR_LE_DEV_RANDOM; + set_random_addr(req, &urpa); + return 0; + } + + /* If forcing static address is in use or there is no public + * address use the static address as random address (but skip + * the HCI command if the current random address is already the + * static one. + */ + if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) || + !bacmp(&hdev->bdaddr, BDADDR_ANY)) { + *own_addr_type = ADDR_LE_DEV_RANDOM; + if (bacmp(&hdev->static_addr, &hdev->random_addr)) + hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, + &hdev->static_addr); + return 0; + } + + /* Neither privacy nor static address is being used so use a + * public address. + */ + *own_addr_type = ADDR_LE_DEV_PUBLIC; + + return 0; +} + +/* Copy the Identity Address of the controller. + * + * If the controller has a public BD_ADDR, then by default use that one. + * If this is a LE only controller without a public address, default to + * the static random address. + * + * For debugging purposes it is possible to force controllers with a + * public address to use the static random address instead. + */ +void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 *bdaddr_type) +{ + if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) || + !bacmp(&hdev->bdaddr, BDADDR_ANY)) { + bacpy(bdaddr, &hdev->static_addr); + *bdaddr_type = ADDR_LE_DEV_RANDOM; + } else { + bacpy(bdaddr, &hdev->bdaddr); + *bdaddr_type = ADDR_LE_DEV_PUBLIC; + } +} + /* Alloc HCI device */ struct hci_dev *hci_alloc_dev(void) { @@ -3300,6 +3790,8 @@ struct hci_dev *hci_alloc_dev(void) hdev->le_conn_min_interval = 0x0028; hdev->le_conn_max_interval = 0x0038; + hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; + mutex_init(&hdev->lock); mutex_init(&hdev->req_lock); @@ -3310,7 +3802,9 @@ struct hci_dev *hci_alloc_dev(void) INIT_LIST_HEAD(&hdev->long_term_keys); INIT_LIST_HEAD(&hdev->identity_resolving_keys); INIT_LIST_HEAD(&hdev->remote_oob_data); + INIT_LIST_HEAD(&hdev->le_white_list); INIT_LIST_HEAD(&hdev->le_conn_params); + INIT_LIST_HEAD(&hdev->pend_le_conns); INIT_LIST_HEAD(&hdev->conn_hash.list); INIT_WORK(&hdev->rx_work, hci_rx_work); @@ -3511,7 +4005,9 @@ void hci_unregister_dev(struct hci_dev *hdev) hci_smp_ltks_clear(hdev); hci_smp_irks_clear(hdev); hci_remote_oob_data_clear(hdev); + hci_white_list_clear(hdev); hci_conn_params_clear(hdev); + hci_pend_le_conns_clear(hdev); hci_dev_unlock(hdev); hci_dev_put(hdev); @@ -4739,3 +5235,102 @@ static void hci_cmd_work(struct work_struct *work) } } } + +void hci_req_add_le_scan_disable(struct hci_request *req) +{ + struct hci_cp_le_set_scan_enable cp; + + memset(&cp, 0, sizeof(cp)); + cp.enable = LE_SCAN_DISABLE; + hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); +} + +void hci_req_add_le_passive_scan(struct hci_request *req) +{ + struct hci_cp_le_set_scan_param param_cp; + struct hci_cp_le_set_scan_enable enable_cp; + struct hci_dev *hdev = req->hdev; + u8 own_addr_type; + + /* Set require_privacy to true to avoid identification from + * unknown peer devices. Since this is passive scanning, no + * SCAN_REQ using the local identity should be sent. Mandating + * privacy is just an extra precaution. + */ + if (hci_update_random_address(req, true, &own_addr_type)) + return; + + memset(¶m_cp, 0, sizeof(param_cp)); + param_cp.type = LE_SCAN_PASSIVE; + param_cp.interval = cpu_to_le16(hdev->le_scan_interval); + param_cp.window = cpu_to_le16(hdev->le_scan_window); + param_cp.own_address_type = own_addr_type; + hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), + ¶m_cp); + + memset(&enable_cp, 0, sizeof(enable_cp)); + enable_cp.enable = LE_SCAN_ENABLE; + enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; + hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), + &enable_cp); +} + +static void update_background_scan_complete(struct hci_dev *hdev, u8 status) +{ + if (status) + BT_DBG("HCI request failed to update background scanning: " + "status 0x%2.2x", status); +} + +/* This function controls the background scanning based on hdev->pend_le_conns + * list. If there are pending LE connection we start the background scanning, + * otherwise we stop it. + * + * This function requires the caller holds hdev->lock. + */ +void hci_update_background_scan(struct hci_dev *hdev) +{ + struct hci_request req; + struct hci_conn *conn; + int err; + + hci_req_init(&req, hdev); + + if (list_empty(&hdev->pend_le_conns)) { + /* If there is no pending LE connections, we should stop + * the background scanning. + */ + + /* If controller is not scanning we are done. */ + if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags)) + return; + + hci_req_add_le_scan_disable(&req); + + BT_DBG("%s stopping background scanning", hdev->name); + } else { + /* If there is at least one pending LE connection, we should + * keep the background scan running. + */ + + /* If controller is already scanning we are done. */ + if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) + return; + + /* If controller is connecting, we should not start scanning + * since some controllers are not able to scan and connect at + * the same time. + */ + conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); + if (conn) + return; + + hci_req_add_le_passive_scan(&req); + + BT_DBG("%s starting background scanning", hdev->name); + } + + err = hci_req_run(&req, update_background_scan_complete); + if (err) + BT_ERR("Failed to run HCI request: err %d", err); +} diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 4327b129d38..c3b0a08f5ab 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -991,12 +991,8 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_lock(hdev); - if (!status) { - if (*sent) - set_bit(HCI_ADVERTISING, &hdev->dev_flags); - else - clear_bit(HCI_ADVERTISING, &hdev->dev_flags); - } + if (!status) + mgmt_advertising(hdev, *sent); hci_dev_unlock(hdev); } @@ -1022,7 +1018,19 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, break; case LE_SCAN_DISABLE: + /* Cancel this timer so that we don't try to disable scanning + * when it's already disabled. + */ + cancel_delayed_work(&hdev->le_scan_disable); + clear_bit(HCI_LE_SCAN, &hdev->dev_flags); + /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we + * interrupted scanning due to a connect request. Mark + * therefore discovery as stopped. + */ + if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED, + &hdev->dev_flags)) + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); break; default: @@ -1042,6 +1050,49 @@ static void hci_cc_le_read_white_list_size(struct hci_dev *hdev, hdev->le_white_list_size = rp->size; } +static void hci_cc_le_clear_white_list(struct hci_dev *hdev, + struct sk_buff *skb) +{ + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + if (!status) + hci_white_list_clear(hdev); +} + +static void hci_cc_le_add_to_white_list(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_cp_le_add_to_white_list *sent; + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST); + if (!sent) + return; + + if (!status) + hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type); +} + +static void hci_cc_le_del_from_white_list(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_cp_le_del_from_white_list *sent; + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST); + if (!sent) + return; + + if (!status) + hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type); +} + static void hci_cc_le_read_supported_states(struct hci_dev *hdev, struct sk_buff *skb) { @@ -1082,6 +1133,25 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev, } } +static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_cp_le_set_adv_param *cp; + u8 status = *((u8 *) skb->data); + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + if (status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); + if (!cp) + return; + + hci_dev_lock(hdev); + hdev->adv_addr_type = cp->own_address_type; + hci_dev_unlock(hdev); +} + static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev, struct sk_buff *skb) { @@ -1583,6 +1653,57 @@ static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status) amp_write_remote_assoc(hdev, cp->phy_handle); } +static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) +{ + struct hci_cp_le_create_conn *cp; + struct hci_conn *conn; + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + /* All connection failure handling is taken care of by the + * hci_le_conn_failed function which is triggered by the HCI + * request completion callbacks used for connecting. + */ + if (status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); + if (!conn) + goto unlock; + + /* Store the initiator and responder address information which + * is needed for SMP. These values will not change during the + * lifetime of the connection. + */ + conn->init_addr_type = cp->own_address_type; + if (cp->own_address_type == ADDR_LE_DEV_RANDOM) + bacpy(&conn->init_addr, &hdev->random_addr); + else + bacpy(&conn->init_addr, &hdev->bdaddr); + + conn->resp_addr_type = cp->peer_addr_type; + bacpy(&conn->resp_addr, &cp->peer_addr); + + /* We don't want the connection attempt to stick around + * indefinitely since LE doesn't have a page timeout concept + * like BR/EDR. Set a timer for any connection that doesn't use + * the white list for connecting. + */ + if (cp->filter_policy == HCI_LE_USE_PEER_ADDR) + queue_delayed_work(conn->hdev->workqueue, + &conn->le_conn_timeout, + HCI_LE_CONN_TIMEOUT); + +unlock: + hci_dev_unlock(hdev); +} + static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); @@ -1845,7 +1966,9 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_disconn_complete *ev = (void *) skb->data; u8 reason = hci_to_mgmt_reason(ev->reason); + struct hci_conn_params *params; struct hci_conn *conn; + bool mgmt_connected; u8 type; BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); @@ -1864,13 +1987,30 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) conn->state = BT_CLOSED; - if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) - mgmt_device_disconnected(hdev, &conn->dst, conn->type, - conn->dst_type, reason); + mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); + mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, + reason, mgmt_connected); if (conn->type == ACL_LINK && conn->flush_key) hci_remove_link_key(hdev, &conn->dst); + params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); + if (params) { + switch (params->auto_connect) { + case HCI_AUTO_CONN_LINK_LOSS: + if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) + break; + /* Fall through */ + + case HCI_AUTO_CONN_ALWAYS: + hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type); + break; + + default: + break; + } + } + type = conn->type; hci_proto_disconn_cfm(conn, ev->reason); @@ -2344,6 +2484,18 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_cc_le_read_white_list_size(hdev, skb); break; + case HCI_OP_LE_CLEAR_WHITE_LIST: + hci_cc_le_clear_white_list(hdev, skb); + break; + + case HCI_OP_LE_ADD_TO_WHITE_LIST: + hci_cc_le_add_to_white_list(hdev, skb); + break; + + case HCI_OP_LE_DEL_FROM_WHITE_LIST: + hci_cc_le_del_from_white_list(hdev, skb); + break; + case HCI_OP_LE_READ_SUPPORTED_STATES: hci_cc_le_read_supported_states(hdev, skb); break; @@ -2352,6 +2504,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_cc_write_le_host_supported(hdev, skb); break; + case HCI_OP_LE_SET_ADV_PARAM: + hci_cc_set_adv_param(hdev, skb); + break; + case HCI_OP_WRITE_REMOTE_AMP_ASSOC: hci_cc_write_remote_amp_assoc(hdev, skb); break; @@ -2439,6 +2595,10 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_cs_accept_phylink(hdev, ev->status); break; + case HCI_OP_LE_CREATE_CONN: + hci_cs_le_create_conn(hdev, ev->status); + break; + default: BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); break; @@ -3623,8 +3783,48 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) conn->out = true; conn->link_mode |= HCI_LM_MASTER; } + + /* If we didn't have a hci_conn object previously + * but we're in master role this must be something + * initiated using a white list. Since white list based + * connections are not "first class citizens" we don't + * have full tracking of them. Therefore, we go ahead + * with a "best effort" approach of determining the + * initiator address based on the HCI_PRIVACY flag. + */ + if (conn->out) { + conn->resp_addr_type = ev->bdaddr_type; + bacpy(&conn->resp_addr, &ev->bdaddr); + if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) { + conn->init_addr_type = ADDR_LE_DEV_RANDOM; + bacpy(&conn->init_addr, &hdev->rpa); + } else { + hci_copy_identity_address(hdev, + &conn->init_addr, + &conn->init_addr_type); + } + } else { + /* Set the responder (our side) address type based on + * the advertising address type. + */ + conn->resp_addr_type = hdev->adv_addr_type; + if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) + bacpy(&conn->resp_addr, &hdev->random_addr); + else + bacpy(&conn->resp_addr, &hdev->bdaddr); + + conn->init_addr_type = ev->bdaddr_type; + bacpy(&conn->init_addr, &ev->bdaddr); + } + } else { + cancel_delayed_work(&conn->le_conn_timeout); } + /* Ensure that the hci_conn contains the identity address type + * regardless of which address the connection was made with. + */ + hci_copy_identity_address(hdev, &conn->src, &conn->src_type); + /* Lookup the identity address from the stored connection * address and address type. * @@ -3641,11 +3841,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) } if (ev->status) { - mgmt_connect_failed(hdev, &conn->dst, conn->type, - conn->dst_type, ev->status); - hci_proto_connect_cfm(conn, ev->status); - conn->state = BT_CLOSED; - hci_conn_del(conn); + hci_le_conn_failed(conn, ev->status); goto unlock; } @@ -3664,25 +3860,73 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_proto_connect_cfm(conn, ev->status); + hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type); + unlock: hci_dev_unlock(hdev); } +/* This function requires the caller holds hdev->lock */ +static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, + u8 addr_type) +{ + struct hci_conn *conn; + struct smp_irk *irk; + + /* If this is a resolvable address, we should resolve it and then + * update address and address type variables. + */ + irk = hci_get_irk(hdev, addr, addr_type); + if (irk) { + addr = &irk->bdaddr; + addr_type = irk->addr_type; + } + + if (!hci_pend_le_conn_lookup(hdev, addr, addr_type)) + return; + + conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, + HCI_AT_NO_BONDING); + if (!IS_ERR(conn)) + return; + + switch (PTR_ERR(conn)) { + case -EBUSY: + /* If hci_connect() returns -EBUSY it means there is already + * an LE connection attempt going on. Since controllers don't + * support more than one connection attempt at the time, we + * don't consider this an error case. + */ + break; + default: + BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); + } +} + static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) { u8 num_reports = skb->data[0]; void *ptr = &skb->data[1]; s8 rssi; + hci_dev_lock(hdev); + while (num_reports--) { struct hci_ev_le_advertising_info *ev = ptr; + if (ev->evt_type == LE_ADV_IND || + ev->evt_type == LE_ADV_DIRECT_IND) + check_pending_le_conn(hdev, &ev->bdaddr, + ev->bdaddr_type); + rssi = ev->data[ev->length]; mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type, NULL, rssi, 0, 1, ev->data, ev->length); ptr += sizeof(*ev) + ev->length + 1; } + + hci_dev_unlock(hdev); } static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) @@ -3701,7 +3945,7 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) if (conn == NULL) goto not_found; - ltk = hci_find_ltk(hdev, ev->ediv, ev->random, conn->out); + ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out); if (ltk == NULL) goto not_found; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 6ace116f3b3..9ed2168fa59 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -2434,6 +2434,14 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, if (IS_ERR(skb)) return PTR_ERR(skb); + /* Channel lock is released before requesting new skb and then + * reacquired thus we need to recheck channel state. + */ + if (chan->state != BT_CONNECTED) { + kfree_skb(skb); + return -ENOTCONN; + } + l2cap_do_send(chan, skb); return len; } @@ -2483,6 +2491,14 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, if (IS_ERR(skb)) return PTR_ERR(skb); + /* Channel lock is released before requesting new skb and then + * reacquired thus we need to recheck channel state. + */ + if (chan->state != BT_CONNECTED) { + kfree_skb(skb); + return -ENOTCONN; + } + l2cap_do_send(chan, skb); err = len; break; @@ -7092,12 +7108,19 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, auth_type = l2cap_get_auth_type(chan); - if (bdaddr_type_is_le(dst_type)) - hcon = hci_connect(hdev, LE_LINK, dst, dst_type, - chan->sec_level, auth_type); - else - hcon = hci_connect(hdev, ACL_LINK, dst, dst_type, - chan->sec_level, auth_type); + if (bdaddr_type_is_le(dst_type)) { + /* Convert from L2CAP channel address type to HCI address type + */ + if (dst_type == BDADDR_LE_PUBLIC) + dst_type = ADDR_LE_DEV_PUBLIC; + else + dst_type = ADDR_LE_DEV_RANDOM; + + hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level, + auth_type); + } else { + hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type); + } if (IS_ERR(hcon)) { err = PTR_ERR(hcon); @@ -7251,7 +7274,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) if (hcon->type == LE_LINK) { if (!status && encrypt) - smp_distribute_keys(conn, 0); + smp_distribute_keys(conn); cancel_delayed_work(&conn->security_timer); } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 12fa6399c79..98e9df3556e 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -81,6 +81,7 @@ static const u16 mgmt_commands[] = { MGMT_OP_SET_SCAN_PARAMS, MGMT_OP_SET_SECURE_CONN, MGMT_OP_SET_DEBUG_KEYS, + MGMT_OP_SET_PRIVACY, MGMT_OP_LOAD_IRKS, }; @@ -106,6 +107,7 @@ static const u16 mgmt_events[] = { MGMT_EV_DEVICE_UNBLOCKED, MGMT_EV_DEVICE_UNPAIRED, MGMT_EV_PASSKEY_NOTIFY, + MGMT_EV_NEW_IRK, }; #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) @@ -389,6 +391,7 @@ static u32 get_supported_settings(struct hci_dev *hdev) if (lmp_le_capable(hdev)) { settings |= MGMT_SETTING_LE; settings |= MGMT_SETTING_ADVERTISING; + settings |= MGMT_SETTING_PRIVACY; } return settings; @@ -437,6 +440,9 @@ static u32 get_current_settings(struct hci_dev *hdev) if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags)) settings |= MGMT_SETTING_DEBUG_KEYS; + if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) + settings |= MGMT_SETTING_PRIVACY; + return settings; } @@ -811,6 +817,64 @@ static void update_class(struct hci_request *req) hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); } +static bool get_connectable(struct hci_dev *hdev) +{ + struct pending_cmd *cmd; + + /* If there's a pending mgmt command the flag will not yet have + * it's final value, so check for this first. + */ + cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev); + if (cmd) { + struct mgmt_mode *cp = cmd->param; + return cp->val; + } + + return test_bit(HCI_CONNECTABLE, &hdev->dev_flags); +} + +static void enable_advertising(struct hci_request *req) +{ + struct hci_dev *hdev = req->hdev; + struct hci_cp_le_set_adv_param cp; + u8 own_addr_type, enable = 0x01; + bool connectable; + + /* Clear the HCI_ADVERTISING bit temporarily so that the + * hci_update_random_address knows that it's safe to go ahead + * and write a new random address. The flag will be set back on + * as soon as the SET_ADV_ENABLE HCI command completes. + */ + clear_bit(HCI_ADVERTISING, &hdev->dev_flags); + + connectable = get_connectable(hdev); + + /* Set require_privacy to true only when non-connectable + * advertising is used. In that case it is fine to use a + * non-resolvable private address. + */ + if (hci_update_random_address(req, !connectable, &own_addr_type) < 0) + return; + + memset(&cp, 0, sizeof(cp)); + cp.min_interval = __constant_cpu_to_le16(0x0800); + cp.max_interval = __constant_cpu_to_le16(0x0800); + cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND; + cp.own_address_type = own_addr_type; + cp.channel_map = hdev->le_adv_channel_map; + + hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); + + hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); +} + +static void disable_advertising(struct hci_request *req) +{ + u8 enable = 0x00; + + hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); +} + static void service_cache_off(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, @@ -832,12 +896,39 @@ static void service_cache_off(struct work_struct *work) hci_req_run(&req, NULL); } +static void rpa_expired(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + rpa_expired.work); + struct hci_request req; + + BT_DBG(""); + + set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); + + if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) || + hci_conn_num(hdev, LE_LINK) > 0) + return; + + /* The generation of a new RPA and programming it into the + * controller happens in the enable_advertising() function. + */ + + hci_req_init(&req, hdev); + + disable_advertising(&req); + enable_advertising(&req); + + hci_req_run(&req, NULL); +} + static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) { if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags)) return; INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off); + INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired); /* Non-mgmt controlled devices get this bit set * implicitly so that pairing works for them, however @@ -943,6 +1034,71 @@ static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) sizeof(settings)); } +static void clean_up_hci_complete(struct hci_dev *hdev, u8 status) +{ + BT_DBG("%s status 0x%02x", hdev->name, status); + + if (hci_conn_count(hdev) == 0) { + cancel_delayed_work(&hdev->power_off); + queue_work(hdev->req_workqueue, &hdev->power_off.work); + } +} + +static int clean_up_hci_state(struct hci_dev *hdev) +{ + struct hci_request req; + struct hci_conn *conn; + + hci_req_init(&req, hdev); + + if (test_bit(HCI_ISCAN, &hdev->flags) || + test_bit(HCI_PSCAN, &hdev->flags)) { + u8 scan = 0x00; + hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); + } + + if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) + disable_advertising(&req); + + if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) { + hci_req_add_le_scan_disable(&req); + } + + list_for_each_entry(conn, &hdev->conn_hash.list, list) { + struct hci_cp_disconnect dc; + struct hci_cp_reject_conn_req rej; + + switch (conn->state) { + case BT_CONNECTED: + case BT_CONFIG: + dc.handle = cpu_to_le16(conn->handle); + dc.reason = 0x15; /* Terminated due to Power Off */ + hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc); + break; + case BT_CONNECT: + if (conn->type == LE_LINK) + hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL, + 0, NULL); + else if (conn->type == ACL_LINK) + hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL, + 6, &conn->dst); + break; + case BT_CONNECT2: + bacpy(&rej.bdaddr, &conn->dst); + rej.reason = 0x15; /* Terminated due to Power Off */ + if (conn->type == ACL_LINK) + hci_req_add(&req, HCI_OP_REJECT_CONN_REQ, + sizeof(rej), &rej); + else if (conn->type == SCO_LINK) + hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ, + sizeof(rej), &rej); + break; + } + } + + return hci_req_run(&req, clean_up_hci_complete); +} + static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -986,12 +1142,23 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - if (cp->val) + if (cp->val) { queue_work(hdev->req_workqueue, &hdev->power_on); - else - queue_work(hdev->req_workqueue, &hdev->power_off.work); - - err = 0; + err = 0; + } else { + /* Disconnect connections, stop scans, etc */ + err = clean_up_hci_state(hdev); + if (!err) + queue_delayed_work(hdev->req_workqueue, &hdev->power_off, + HCI_POWER_OFF_TIMEOUT); + + /* ENODATA means there were no HCI commands queued */ + if (err == -ENODATA) { + cancel_delayed_work(&hdev->power_off); + queue_work(hdev->req_workqueue, &hdev->power_off.work); + err = 0; + } + } failed: hci_dev_unlock(hdev); @@ -1344,50 +1511,6 @@ static void write_fast_connectable(struct hci_request *req, bool enable) hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); } -static u8 get_adv_type(struct hci_dev *hdev) -{ - struct pending_cmd *cmd; - bool connectable; - - /* If there's a pending mgmt command the flag will not yet have - * it's final value, so check for this first. - */ - cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev); - if (cmd) { - struct mgmt_mode *cp = cmd->param; - connectable = !!cp->val; - } else { - connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags); - } - - return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND; -} - -static void enable_advertising(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_cp_le_set_adv_param cp; - u8 enable = 0x01; - - memset(&cp, 0, sizeof(cp)); - cp.min_interval = __constant_cpu_to_le16(0x0800); - cp.max_interval = __constant_cpu_to_le16(0x0800); - cp.type = get_adv_type(hdev); - cp.own_address_type = hdev->own_addr_type; - cp.channel_map = hdev->le_adv_channel_map; - - hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); - - hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); -} - -static void disable_advertising(struct hci_request *req) -{ - u8 enable = 0x00; - - hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); -} - static void set_connectable_complete(struct hci_dev *hdev, u8 status) { struct pending_cmd *cmd; @@ -2330,6 +2453,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type); + hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type); + err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type); } @@ -2729,12 +2854,22 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, else auth_type = HCI_AT_DEDICATED_BONDING_MITM; - if (cp->addr.type == BDADDR_BREDR) - conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, - cp->addr.type, sec_level, auth_type); - else - conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, - cp->addr.type, sec_level, auth_type); + if (cp->addr.type == BDADDR_BREDR) { + conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level, + auth_type); + } else { + u8 addr_type; + + /* Convert from L2CAP channel address type to HCI address type + */ + if (cp->addr.type == BDADDR_LE_PUBLIC) + addr_type = ADDR_LE_DEV_PUBLIC; + else + addr_type = ADDR_LE_DEV_RANDOM; + + conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type, + sec_level, auth_type); + } if (IS_ERR(conn)) { int status; @@ -3258,7 +3393,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev, struct hci_request req; /* General inquiry access code (GIAC) */ u8 lap[3] = { 0x33, 0x8b, 0x9e }; - u8 status; + u8 status, own_addr_type; int err; BT_DBG("%s", hdev->name); @@ -3343,18 +3478,31 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev, goto failed; } - if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) { + /* If controller is scanning, it means the background scanning + * is running. Thus, we should temporarily stop it in order to + * set the discovery scanning parameters. + */ + if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) + hci_req_add_le_scan_disable(&req); + + memset(¶m_cp, 0, sizeof(param_cp)); + + /* All active scans will be done with either a resolvable + * private address (when privacy feature has been enabled) + * or unresolvable private address. + */ + err = hci_update_random_address(&req, true, &own_addr_type); + if (err < 0) { err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, - MGMT_STATUS_BUSY); + MGMT_STATUS_FAILED); mgmt_pending_remove(cmd); goto failed; } - memset(¶m_cp, 0, sizeof(param_cp)); param_cp.type = LE_SCAN_ACTIVE; param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT); param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); - param_cp.own_address_type = hdev->own_addr_type; + param_cp.own_address_type = own_addr_type; hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), ¶m_cp); @@ -3424,7 +3572,6 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, struct hci_cp_remote_name_req_cancel cp; struct inquiry_entry *e; struct hci_request req; - struct hci_cp_le_set_scan_enable enable_cp; int err; BT_DBG("%s", hdev->name); @@ -3460,10 +3607,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, } else { cancel_delayed_work(&hdev->le_scan_disable); - memset(&enable_cp, 0, sizeof(enable_cp)); - enable_cp.enable = LE_SCAN_DISABLE; - hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, - sizeof(enable_cp), &enable_cp); + hci_req_add_le_scan_disable(&req); } break; @@ -3520,15 +3664,17 @@ static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_lock(hdev); if (!hci_discovery_active(hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME, - MGMT_STATUS_FAILED); + err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, + MGMT_STATUS_FAILED, &cp->addr, + sizeof(cp->addr)); goto failed; } e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr); if (!e) { - err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME, - MGMT_STATUS_INVALID_PARAMS); + err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, + MGMT_STATUS_INVALID_PARAMS, &cp->addr, + sizeof(cp->addr)); goto failed; } @@ -3817,6 +3963,21 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev, err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0); + /* If background scan is running, restart it so new parameters are + * loaded. + */ + if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) && + hdev->discovery.state == DISCOVERY_STOPPED) { + struct hci_request req; + + hci_req_init(&req, hdev); + + hci_req_add_le_scan_disable(&req); + hci_req_add_le_passive_scan(&req); + + hci_req_run(&req, NULL); + } + hci_dev_unlock(hdev); return err; @@ -4182,6 +4343,56 @@ unlock: return err; } +static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data, + u16 len) +{ + struct mgmt_cp_set_privacy *cp = cp_data; + bool changed; + int err; + + BT_DBG("request for %s", hdev->name); + + if (!lmp_le_capable(hdev)) + return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, + MGMT_STATUS_NOT_SUPPORTED); + + if (cp->privacy != 0x00 && cp->privacy != 0x01) + return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, + MGMT_STATUS_INVALID_PARAMS); + + if (hdev_is_powered(hdev)) + return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, + MGMT_STATUS_REJECTED); + + hci_dev_lock(hdev); + + /* If user space supports this command it is also expected to + * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag. + */ + set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags); + + if (cp->privacy) { + changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags); + memcpy(hdev->irk, cp->irk, sizeof(hdev->irk)); + set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); + } else { + changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags); + memset(hdev->irk, 0, sizeof(hdev->irk)); + clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); + } + + err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev); + if (err < 0) + goto unlock; + + if (changed) + err = new_settings(hdev, sk); + +unlock: + hci_dev_unlock(hdev); + return err; +} + static bool irk_is_valid(struct mgmt_irk_info *irk) { switch (irk->addr.type) { @@ -4396,7 +4607,7 @@ static const struct mgmt_handler { { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE }, { set_secure_conn, false, MGMT_SETTING_SIZE }, { set_debug_keys, false, MGMT_SETTING_SIZE }, - { }, + { set_privacy, false, MGMT_SET_PRIVACY_SIZE }, { load_irks, true, MGMT_LOAD_IRKS_SIZE }, }; @@ -4514,6 +4725,17 @@ void mgmt_index_removed(struct hci_dev *hdev) mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL); } +/* This function requires the caller holds hdev->lock */ +static void restart_le_auto_conns(struct hci_dev *hdev) +{ + struct hci_conn_params *p; + + list_for_each_entry(p, &hdev->le_conn_params, list) { + if (p->auto_connect == HCI_AUTO_CONN_ALWAYS) + hci_pend_le_conn_add(hdev, &p->addr, p->addr_type); + } +} + static void powered_complete(struct hci_dev *hdev, u8 status) { struct cmd_lookup match = { NULL, hdev }; @@ -4522,6 +4744,8 @@ static void powered_complete(struct hci_dev *hdev, u8 status) hci_dev_lock(hdev); + restart_le_auto_conns(hdev); + mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); new_settings(hdev, match.sk); @@ -4563,11 +4787,6 @@ static int powered_update_hci(struct hci_dev *hdev) } if (lmp_le_capable(hdev)) { - /* Set random address to static address if configured */ - if (bacmp(&hdev->static_addr, BDADDR_ANY)) - hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6, - &hdev->static_addr); - /* Make sure the controller has a good default for * advertising data. This also applies to the case * where BR/EDR was toggled during the AUTO_OFF phase. @@ -4693,6 +4912,10 @@ void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev)) return; + /* Powering off may clear the scan mode - don't let that interfere */ + if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) + return; + if (discoverable) { changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags); } else { @@ -4726,6 +4949,10 @@ void mgmt_connectable(struct hci_dev *hdev, u8 connectable) if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) return; + /* Powering off may clear the scan mode - don't let that interfere */ + if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) + return; + if (connectable) changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags); else @@ -4735,6 +4962,18 @@ void mgmt_connectable(struct hci_dev *hdev, u8 connectable) new_settings(hdev, NULL); } +void mgmt_advertising(struct hci_dev *hdev, u8 advertising) +{ + /* Powering off may stop advertising - don't let that interfere */ + if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) + return; + + if (advertising) + set_bit(HCI_ADVERTISING, &hdev->dev_flags); + else + clear_bit(HCI_ADVERTISING, &hdev->dev_flags); +} + void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status) { u8 mgmt_err = mgmt_status(status); @@ -4793,11 +5032,11 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key) ev.key.type = key->authenticated; ev.key.enc_size = key->enc_size; ev.key.ediv = key->ediv; + ev.key.rand = key->rand; if (key->type == HCI_SMP_LTK) ev.key.master = 1; - memcpy(ev.key.rand, key->rand, sizeof(key->rand)); memcpy(ev.key.val, key->val, sizeof(key->val)); mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL); @@ -4907,11 +5146,29 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data) } void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 link_type, u8 addr_type, u8 reason) + u8 link_type, u8 addr_type, u8 reason, + bool mgmt_connected) { struct mgmt_ev_device_disconnected ev; + struct pending_cmd *power_off; struct sock *sk = NULL; + power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); + if (power_off) { + struct mgmt_mode *cp = power_off->param; + + /* The connection is still in hci_conn_hash so test for 1 + * instead of 0 to know if this is the last one. + */ + if (!cp->val && hci_conn_count(hdev) == 1) { + cancel_delayed_work(&hdev->power_off); + queue_work(hdev->req_workqueue, &hdev->power_off.work); + } + } + + if (!mgmt_connected) + return; + if (link_type != ACL_LINK && link_type != LE_LINK) return; @@ -4966,6 +5223,20 @@ void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { struct mgmt_ev_connect_failed ev; + struct pending_cmd *power_off; + + power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); + if (power_off) { + struct mgmt_mode *cp = power_off->param; + + /* The connection is still in hci_conn_hash so test for 1 + * instead of 0 to know if this is the last one. + */ + if (!cp->val && hci_conn_count(hdev) == 1) { + cancel_delayed_work(&hdev->power_off); + queue_work(hdev->req_workqueue, &hdev->power_off.work); + } + } bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_bdaddr(link_type, addr_type); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index f06068072bd..f886bcae1b7 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -124,6 +124,24 @@ bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16], return !memcmp(bdaddr->b, hash, 3); } +int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa) +{ + int err; + + get_random_bytes(&rpa->b[3], 3); + + rpa->b[5] &= 0x3f; /* Clear two most significant bits */ + rpa->b[5] |= 0x40; /* Set second most significant bit */ + + err = smp_ah(tfm, irk, &rpa->b[3], rpa->b); + if (err < 0) + return err; + + BT_DBG("RPA %pMR", rpa); + + return 0; +} + static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16], u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia, u8 _rat, bdaddr_t *ra, u8 res[16]) @@ -265,6 +283,9 @@ static void build_pairing_cmd(struct l2cap_conn *conn, if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags)) remote_dist |= SMP_DIST_ID_KEY; + if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) + local_dist |= SMP_DIST_ID_KEY; + if (rsp == NULL) { req->io_capability = conn->hcon->io_capability; req->oob_flag = SMP_OOB_NOT_PRESENT; @@ -424,14 +445,9 @@ static void confirm_work(struct work_struct *work) /* Prevent mutual access to hdev->tfm_aes */ hci_dev_lock(hdev); - if (conn->hcon->out) - ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, - conn->hcon->src_type, &conn->hcon->src, - conn->hcon->dst_type, &conn->hcon->dst, res); - else - ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, - conn->hcon->dst_type, &conn->hcon->dst, - conn->hcon->src_type, &conn->hcon->src, res); + ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, + conn->hcon->init_addr_type, &conn->hcon->init_addr, + conn->hcon->resp_addr_type, &conn->hcon->resp_addr, res); hci_dev_unlock(hdev); @@ -471,14 +487,9 @@ static void random_work(struct work_struct *work) /* Prevent mutual access to hdev->tfm_aes */ hci_dev_lock(hdev); - if (hcon->out) - ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, - hcon->src_type, &hcon->src, - hcon->dst_type, &hcon->dst, res); - else - ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, - hcon->dst_type, &hcon->dst, - hcon->src_type, &hcon->src, res); + ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, + hcon->init_addr_type, &hcon->init_addr, + hcon->resp_addr_type, &hcon->resp_addr, res); hci_dev_unlock(hdev); @@ -496,11 +507,9 @@ static void random_work(struct work_struct *work) } if (hcon->out) { - u8 stk[16], rand[8]; - __le16 ediv; - - memset(rand, 0, sizeof(rand)); - ediv = 0; + u8 stk[16]; + __le64 rand = 0; + __le16 ediv = 0; smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, key); swap128(key, stk); @@ -516,11 +525,9 @@ static void random_work(struct work_struct *work) hci_le_start_enc(hcon, ediv, rand, stk); hcon->enc_key_size = smp->enc_key_size; } else { - u8 stk[16], r[16], rand[8]; - __le16 ediv; - - memset(rand, 0, sizeof(rand)); - ediv = 0; + u8 stk[16], r[16]; + __le64 rand = 0; + __le16 ediv = 0; swap128(smp->prnd, r); smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r); @@ -542,6 +549,20 @@ error: smp_failure(conn, reason); } +static void smp_reencrypt(struct work_struct *work) +{ + struct smp_chan *smp = container_of(work, struct smp_chan, + reencrypt.work); + struct l2cap_conn *conn = smp->conn; + struct hci_conn *hcon = conn->hcon; + struct smp_ltk *ltk = smp->ltk; + + BT_DBG(""); + + hci_le_start_enc(hcon, ltk->ediv, ltk->rand, ltk->val); + hcon->enc_key_size = ltk->enc_size; +} + static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) { struct smp_chan *smp; @@ -552,6 +573,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) INIT_WORK(&smp->confirm, confirm_work); INIT_WORK(&smp->random, random_work); + INIT_DELAYED_WORK(&smp->reencrypt, smp_reencrypt); smp->conn = conn; conn->smp_chan = smp; @@ -569,9 +591,29 @@ void smp_chan_destroy(struct l2cap_conn *conn) BUG_ON(!smp); + cancel_delayed_work_sync(&smp->reencrypt); + complete = test_bit(SMP_FLAG_COMPLETE, &smp->smp_flags); mgmt_smp_complete(conn->hcon, complete); + /* If pairing failed clean up any keys we might have */ + if (!complete) { + if (smp->ltk) { + list_del(&smp->ltk->list); + kfree(smp->ltk); + } + + if (smp->slave_ltk) { + list_del(&smp->slave_ltk->list); + kfree(smp->slave_ltk); + } + + if (smp->remote_irk) { + list_del(&smp->remote_irk->list); + kfree(smp->remote_irk); + } + } + kfree(smp); conn->smp_chan = NULL; conn->hcon->smp_conn = NULL; @@ -927,6 +969,9 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY)) return 0; + /* Mark the information as received */ + smp->remote_key_dist &= ~SMP_DIST_ENC_KEY; + skb_pull(skb, sizeof(*rp)); hci_dev_lock(hdev); @@ -936,7 +981,7 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) rp->ediv, rp->rand); smp->ltk = ltk; if (!(smp->remote_key_dist & SMP_DIST_ID_KEY)) - smp_distribute_keys(conn, 1); + smp_distribute_keys(conn); hci_dev_unlock(hdev); return 0; @@ -980,8 +1025,24 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn, if (!(smp->remote_key_dist & SMP_DIST_ID_KEY)) return 0; + /* Mark the information as received */ + smp->remote_key_dist &= ~SMP_DIST_ID_KEY; + skb_pull(skb, sizeof(*info)); + /* Strictly speaking the Core Specification (4.1) allows sending + * an empty address which would force us to rely on just the IRK + * as "identity information". However, since such + * implementations are not known of and in order to not over + * complicate our implementation, simply pretend that we never + * received an IRK for such a device. + */ + if (!bacmp(&info->bdaddr, BDADDR_ANY)) { + BT_ERR("Ignoring IRK with no identity address"); + smp_distribute_keys(conn); + return 0; + } + bacpy(&smp->id_addr, &info->bdaddr); smp->id_addr_type = info->addr_type; @@ -999,7 +1060,7 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn, l2cap_conn_update_id_addr(hcon); - smp_distribute_keys(conn, 1); + smp_distribute_keys(conn); return 0; } @@ -1128,26 +1189,29 @@ static void smp_notify_keys(struct l2cap_conn *conn) } } -int smp_distribute_keys(struct l2cap_conn *conn, __u8 force) +int smp_distribute_keys(struct l2cap_conn *conn) { struct smp_cmd_pairing *req, *rsp; struct smp_chan *smp = conn->smp_chan; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + bool ltk_encrypt; __u8 *keydist; - BT_DBG("conn %p force %d", conn, force); + BT_DBG("conn %p", conn); - if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) + if (!test_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) return 0; rsp = (void *) &smp->prsp[1]; /* The responder sends its keys first */ - if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07)) + if (hcon->out && (smp->remote_key_dist & 0x07)) return 0; req = (void *) &smp->preq[1]; - if (conn->hcon->out) { + if (hcon->out) { keydist = &rsp->init_key_dist; *keydist &= req->init_key_dist; } else { @@ -1160,24 +1224,25 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force) if (*keydist & SMP_DIST_ENC_KEY) { struct smp_cmd_encrypt_info enc; struct smp_cmd_master_ident ident; - struct hci_conn *hcon = conn->hcon; struct smp_ltk *ltk; u8 authenticated; __le16 ediv; + __le64 rand; get_random_bytes(enc.ltk, sizeof(enc.ltk)); get_random_bytes(&ediv, sizeof(ediv)); - get_random_bytes(ident.rand, sizeof(ident.rand)); + get_random_bytes(&rand, sizeof(rand)); smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc); authenticated = hcon->sec_level == BT_SECURITY_HIGH; - ltk = hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, + ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, HCI_SMP_LTK_SLAVE, authenticated, enc.ltk, - smp->enc_key_size, ediv, ident.rand); + smp->enc_key_size, ediv, rand); smp->slave_ltk = ltk; ident.ediv = ediv; + ident.rand = rand; smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident); @@ -1188,14 +1253,18 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force) struct smp_cmd_ident_addr_info addrinfo; struct smp_cmd_ident_info idinfo; - /* Send a dummy key */ - get_random_bytes(idinfo.irk, sizeof(idinfo.irk)); + memcpy(idinfo.irk, hdev->irk, sizeof(idinfo.irk)); smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo); - /* Just public address */ - memset(&addrinfo, 0, sizeof(addrinfo)); - bacpy(&addrinfo.bdaddr, &conn->hcon->src); + /* The hci_conn contains the local identity address + * after the connection has been established. + * + * This is true even when the connection has been + * established using a resolvable random address. + */ + bacpy(&addrinfo.bdaddr, &hcon->src); + addrinfo.addr_type = hcon->src_type; smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo), &addrinfo); @@ -1214,8 +1283,31 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force) *keydist &= ~SMP_DIST_SIGN; } - if (conn->hcon->out || force || !(rsp->init_key_dist & 0x07)) { - clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags); + /* If there are still keys to be received wait for them */ + if ((smp->remote_key_dist & 0x07)) + return 0; + + /* Check if we should try to re-encrypt the link with the LTK. + * SMP_FLAG_LTK_ENCRYPT flag is used to track whether we've + * already tried this (in which case we shouldn't try again). + * + * The request will trigger an encryption key refresh event + * which will cause a call to auth_cfm and eventually lead to + * l2cap_core.c calling this smp_distribute_keys function again + * and thereby completing the process. + */ + if (smp->ltk) + ltk_encrypt = !test_and_set_bit(SMP_FLAG_LTK_ENCRYPT, + &smp->smp_flags); + else + ltk_encrypt = false; + + /* Re-encrypt the link with LTK if possible */ + if (ltk_encrypt && hcon->out) { + queue_delayed_work(hdev->req_workqueue, &smp->reencrypt, + SMP_REENCRYPT_TIMEOUT); + } else { + clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags); cancel_delayed_work_sync(&conn->security_timer); set_bit(SMP_FLAG_COMPLETE, &smp->smp_flags); smp_notify_keys(conn); diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h index d8cc543f523..f55d8361721 100644 --- a/net/bluetooth/smp.h +++ b/net/bluetooth/smp.h @@ -78,7 +78,7 @@ struct smp_cmd_encrypt_info { #define SMP_CMD_MASTER_IDENT 0x07 struct smp_cmd_master_ident { __le16 ediv; - __u8 rand[8]; + __le64 rand; } __packed; #define SMP_CMD_IDENT_INFO 0x08 @@ -118,7 +118,10 @@ struct smp_cmd_security_req { #define SMP_FLAG_TK_VALID 1 #define SMP_FLAG_CFM_PENDING 2 #define SMP_FLAG_MITM_AUTH 3 -#define SMP_FLAG_COMPLETE 4 +#define SMP_FLAG_LTK_ENCRYPT 4 +#define SMP_FLAG_COMPLETE 5 + +#define SMP_REENCRYPT_TIMEOUT msecs_to_jiffies(250) struct smp_chan { struct l2cap_conn *conn; @@ -139,18 +142,20 @@ struct smp_chan { unsigned long smp_flags; struct work_struct confirm; struct work_struct random; + struct delayed_work reencrypt; }; /* SMP Commands */ bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level); int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb); -int smp_distribute_keys(struct l2cap_conn *conn, __u8 force); +int smp_distribute_keys(struct l2cap_conn *conn); int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey); void smp_chan_destroy(struct l2cap_conn *conn); bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *bdaddr); +int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa); #endif /* __SMP_H */ diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 8603dfb52b3..0d1a0f801b9 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1700,14 +1700,8 @@ void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue); void ieee80211_add_pending_skb(struct ieee80211_local *local, struct sk_buff *skb); -void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, - struct sk_buff_head *skbs, - void (*fn)(void *data), void *data); -static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local, - struct sk_buff_head *skbs) -{ - ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL); -} +void ieee80211_add_pending_skbs(struct ieee80211_local *local, + struct sk_buff_head *skbs); void ieee80211_flush_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 46b62bb3677..94f0af29b74 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -222,6 +222,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, switch (vht_oper->chan_width) { case IEEE80211_VHT_CHANWIDTH_USE_HT: vht_chandef.width = chandef->width; + vht_chandef.center_freq1 = chandef->center_freq1; break; case IEEE80211_VHT_CHANWIDTH_80MHZ: vht_chandef.width = NL80211_CHAN_WIDTH_80; @@ -271,6 +272,28 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, ret = 0; out: + /* + * When tracking the current AP, don't do any further checks if the + * new chandef is identical to the one we're currently using for the + * connection. This keeps us from playing ping-pong with regulatory, + * without it the following can happen (for example): + * - connect to an AP with 80 MHz, world regdom allows 80 MHz + * - AP advertises regdom US + * - CRDA loads regdom US with 80 MHz prohibited (old database) + * - the code below detects an unsupported channel, downgrades, and + * we disconnect from the AP in the caller + * - disconnect causes CRDA to reload world regdomain and the game + * starts anew. + * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881) + * + * It seems possible that there are still scenarios with CSA or real + * bandwidth changes where a this could happen, but those cases are + * less common and wouldn't completely prevent using the AP. + */ + if (tracking && + cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef)) + return ret; + /* don't print the message below for VHT mismatch if VHT is disabled */ if (ret & IEEE80211_STA_DISABLE_VHT) vht_chandef = *chandef; @@ -3848,6 +3871,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); + sta_info_free(local, new_sta); return -EINVAL; } rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 58e4b7052d1..5b617660b0b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1092,6 +1092,13 @@ static void sta_ps_end(struct sta_info *sta) sta->sta.addr, sta->sta.aid); if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { + /* + * Clear the flag only if the other one is still set + * so that the TX path won't start TX'ing new frames + * directly ... In the case that the driver flag isn't + * set ieee80211_sta_ps_deliver_wakeup() will clear it. + */ + clear_sta_flag(sta, WLAN_STA_PS_STA); ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", sta->sta.addr, sta->sta.aid); return; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index decd30c1e29..a023b432143 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -91,7 +91,7 @@ static int sta_info_hash_del(struct ieee80211_local *local, return -ENOENT; } -static void cleanup_single_sta(struct sta_info *sta) +static void __cleanup_single_sta(struct sta_info *sta) { int ac, i; struct tid_ampdu_tx *tid_tx; @@ -99,7 +99,8 @@ static void cleanup_single_sta(struct sta_info *sta) struct ieee80211_local *local = sdata->local; struct ps_data *ps; - if (test_sta_flag(sta, WLAN_STA_PS_STA)) { + if (test_sta_flag(sta, WLAN_STA_PS_STA) || + test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { if (sta->sdata->vif.type == NL80211_IFTYPE_AP || sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) ps = &sdata->bss->ps; @@ -109,6 +110,7 @@ static void cleanup_single_sta(struct sta_info *sta) return; clear_sta_flag(sta, WLAN_STA_PS_STA); + clear_sta_flag(sta, WLAN_STA_PS_DRIVER); atomic_dec(&ps->num_sta_ps); sta_info_recalc_tim(sta); @@ -139,7 +141,14 @@ static void cleanup_single_sta(struct sta_info *sta) ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); kfree(tid_tx); } +} +static void cleanup_single_sta(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + + __cleanup_single_sta(sta); sta_info_free(local, sta); } @@ -330,6 +339,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, rcu_read_unlock(); spin_lock_init(&sta->lock); + spin_lock_init(&sta->ps_lock); INIT_WORK(&sta->drv_unblock_wk, sta_unblock); INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); mutex_init(&sta->ampdu_mlme.mtx); @@ -487,21 +497,26 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) goto out_err; } - /* notify driver */ - err = sta_info_insert_drv_state(local, sdata, sta); - if (err) - goto out_err; - local->num_sta++; local->sta_generation++; smp_mb(); + /* simplify things and don't accept BA sessions yet */ + set_sta_flag(sta, WLAN_STA_BLOCK_BA); + /* make the station visible */ sta_info_hash_add(local, sta); list_add_rcu(&sta->list, &local->sta_list); + /* notify driver */ + err = sta_info_insert_drv_state(local, sdata, sta); + if (err) + goto out_remove; + set_sta_flag(sta, WLAN_STA_INSERTED); + /* accept BA sessions now */ + clear_sta_flag(sta, WLAN_STA_BLOCK_BA); ieee80211_recalc_min_chandef(sdata); ieee80211_sta_debugfs_add(sta); @@ -522,6 +537,12 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) mesh_accept_plinks_update(sdata); return 0; + out_remove: + sta_info_hash_del(local, sta); + list_del_rcu(&sta->list); + local->num_sta--; + synchronize_net(); + __cleanup_single_sta(sta); out_err: mutex_unlock(&local->sta_mtx); rcu_read_lock(); @@ -1071,10 +1092,14 @@ struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, } EXPORT_SYMBOL(ieee80211_find_sta); -static void clear_sta_ps_flags(void *_sta) +/* powersave support code */ +void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) { - struct sta_info *sta = _sta; struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct sk_buff_head pending; + int filtered = 0, buffered = 0, ac; + unsigned long flags; struct ps_data *ps; if (sdata->vif.type == NL80211_IFTYPE_AP || @@ -1085,20 +1110,6 @@ static void clear_sta_ps_flags(void *_sta) else return; - clear_sta_flag(sta, WLAN_STA_PS_DRIVER); - if (test_and_clear_sta_flag(sta, WLAN_STA_PS_STA)) - atomic_dec(&ps->num_sta_ps); -} - -/* powersave support code */ -void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) -{ - struct ieee80211_sub_if_data *sdata = sta->sdata; - struct ieee80211_local *local = sdata->local; - struct sk_buff_head pending; - int filtered = 0, buffered = 0, ac; - unsigned long flags; - clear_sta_flag(sta, WLAN_STA_SP); BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1); @@ -1109,6 +1120,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) skb_queue_head_init(&pending); + /* sync with ieee80211_tx_h_unicast_ps_buf */ + spin_lock(&sta->ps_lock); /* Send all buffered frames to the station */ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { int count = skb_queue_len(&pending), tmp; @@ -1127,7 +1140,12 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) buffered += tmp - count; } - ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta); + ieee80211_add_pending_skbs(local, &pending); + clear_sta_flag(sta, WLAN_STA_PS_DRIVER); + clear_sta_flag(sta, WLAN_STA_PS_STA); + spin_unlock(&sta->ps_lock); + + atomic_dec(&ps->num_sta_ps); /* This station just woke up and isn't aware of our SMPS state */ if (!ieee80211_smps_is_restrictive(sta->known_smps_mode, diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index d4d85de0d75..4acc5fc402f 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -268,6 +268,7 @@ struct ieee80211_tx_latency_stat { * @drv_unblock_wk: used for driver PS unblocking * @listen_interval: listen interval of this station, when we're acting as AP * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly + * @ps_lock: used for powersave (when mac80211 is the AP) related locking * @ps_tx_buf: buffers (per AC) of frames to transmit to this station * when it leaves power saving state or polls * @tx_filtered: buffers (per AC) of frames we already tried to @@ -357,10 +358,8 @@ struct sta_info { /* use the accessors defined below */ unsigned long _flags; - /* - * STA powersave frame queues, no more than the internal - * locking required. - */ + /* STA powersave lock and frame queues */ + spinlock_t ps_lock; struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS]; struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS]; unsigned long driver_buffered_tids; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 722151fa5dc..cd9f80498c4 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -477,6 +477,20 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) sta->sta.addr, sta->sta.aid, ac); if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) purge_old_ps_buffers(tx->local); + + /* sync with ieee80211_sta_ps_deliver_wakeup */ + spin_lock(&sta->ps_lock); + /* + * STA woke up the meantime and all the frames on ps_tx_buf have + * been queued to pending queue. No reordering can happen, go + * ahead and Tx the packet. + */ + if (!test_sta_flag(sta, WLAN_STA_PS_STA) && + !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { + spin_unlock(&sta->ps_lock); + return TX_CONTINUE; + } + if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) { struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]); ps_dbg(tx->sdata, @@ -491,6 +505,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb); + spin_unlock(&sta->ps_lock); if (!timer_pending(&local->sta_cleanup)) mod_timer(&local->sta_cleanup, diff --git a/net/mac80211/util.c b/net/mac80211/util.c index d842af5c8a9..275c94f995f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -435,9 +435,8 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local, spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } -void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, - struct sk_buff_head *skbs, - void (*fn)(void *data), void *data) +void ieee80211_add_pending_skbs(struct ieee80211_local *local, + struct sk_buff_head *skbs) { struct ieee80211_hw *hw = &local->hw; struct sk_buff *skb; @@ -461,9 +460,6 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, __skb_queue_tail(&local->pending[queue], skb); } - if (fn) - fn(data); - for (i = 0; i < hw->queues; i++) __ieee80211_wake_queue(hw, i, IEEE80211_QUEUE_STOP_REASON_SKB_ADD); @@ -1767,6 +1763,26 @@ int ieee80211_reconfig(struct ieee80211_local *local) IEEE80211_QUEUE_STOP_REASON_SUSPEND); /* + * Reconfigure sched scan if it was interrupted by FW restart or + * suspend. + */ + mutex_lock(&local->mtx); + sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata, + lockdep_is_held(&local->mtx)); + if (sched_scan_sdata && local->sched_scan_req) + /* + * Sched scan stopped, but we don't want to report it. Instead, + * we're trying to reschedule. + */ + if (__ieee80211_request_sched_scan_start(sched_scan_sdata, + local->sched_scan_req)) + sched_scan_stopped = true; + mutex_unlock(&local->mtx); + + if (sched_scan_stopped) + cfg80211_sched_scan_stopped(local->hw.wiphy); + + /* * If this is for hw restart things are still running. * We may want to change that later, however. */ @@ -1794,26 +1810,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) WARN_ON(1); #endif - /* - * Reconfigure sched scan if it was interrupted by FW restart or - * suspend. - */ - mutex_lock(&local->mtx); - sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata, - lockdep_is_held(&local->mtx)); - if (sched_scan_sdata && local->sched_scan_req) - /* - * Sched scan stopped, but we don't want to report it. Instead, - * we're trying to reschedule. - */ - if (__ieee80211_request_sched_scan_start(sched_scan_sdata, - local->sched_scan_req)) - sched_scan_stopped = true; - mutex_unlock(&local->mtx); - - if (sched_scan_stopped) - cfg80211_sched_scan_stopped(local->hw.wiphy); - return 0; } diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 21211c60ca9..d51422c778d 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -154,6 +154,11 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, return IEEE80211_AC_BE; } + if (skb->protocol == sdata->control_port_protocol) { + skb->priority = 7; + return ieee80211_downgrade_queue(sdata, skb); + } + /* use the data classifier to determine what 802.1d tag the * data frame has */ rcu_read_lock(); diff --git a/net/nfc/core.c b/net/nfc/core.c index ca1e65f4b13..819b87702b7 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -280,9 +280,6 @@ static struct nfc_target *nfc_find_target(struct nfc_dev *dev, u32 target_idx) { int i; - if (dev->n_targets == 0) - return NULL; - for (i = 0; i < dev->n_targets; i++) { if (dev->targets[i].idx == target_idx) return &dev->targets[i]; @@ -546,9 +543,9 @@ error: struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx) { - struct nfc_se *se, *n; + struct nfc_se *se; - list_for_each_entry_safe(se, n, &dev->secure_elements, list) + list_for_each_entry(se, &dev->secure_elements, list) if (se->idx == se_idx) return se; @@ -655,9 +652,6 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len) { pr_debug("dev_name=%s gb_len=%d\n", dev_name(&dev->dev), gb_len); - if (gb_len > NFC_MAX_GT_LEN) - return -EINVAL; - return nfc_llcp_set_remote_gb(dev, gb, gb_len); } EXPORT_SYMBOL(nfc_set_remote_general_bytes); diff --git a/net/nfc/digital.h b/net/nfc/digital.h index 08b29b55ea6..3759add68b1 100644 --- a/net/nfc/digital.h +++ b/net/nfc/digital.h @@ -72,6 +72,12 @@ void digital_poll_next_tech(struct nfc_digital_dev *ddev); int digital_in_send_sens_req(struct nfc_digital_dev *ddev, u8 rf_tech); int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech); +int digital_in_send_iso15693_inv_req(struct nfc_digital_dev *ddev, u8 rf_tech); + +int digital_in_iso_dep_pull_sod(struct nfc_digital_dev *ddev, + struct sk_buff *skb); +int digital_in_iso_dep_push_sod(struct nfc_digital_dev *ddev, + struct sk_buff *skb); int digital_target_found(struct nfc_digital_dev *ddev, struct nfc_target *target, u8 protocol); diff --git a/net/nfc/digital_core.c b/net/nfc/digital_core.c index c129d1571ca..e01e15dbf1a 100644 --- a/net/nfc/digital_core.c +++ b/net/nfc/digital_core.c @@ -25,6 +25,8 @@ #define DIGITAL_PROTO_NFCF_RF_TECH \ (NFC_PROTO_FELICA_MASK | NFC_PROTO_NFC_DEP_MASK) +#define DIGITAL_PROTO_ISO15693_RF_TECH NFC_PROTO_ISO15693_MASK + struct digital_cmd { struct list_head queue; @@ -331,6 +333,18 @@ int digital_target_found(struct nfc_digital_dev *ddev, } break; + case NFC_PROTO_ISO15693: + framing = NFC_DIGITAL_FRAMING_ISO15693_T5T; + check_crc = digital_skb_check_crc_b; + add_crc = digital_skb_add_crc_b; + break; + + case NFC_PROTO_ISO14443: + framing = NFC_DIGITAL_FRAMING_NFCA_T4T; + check_crc = digital_skb_check_crc_a; + add_crc = digital_skb_add_crc_a; + break; + default: pr_err("Invalid protocol %d\n", protocol); return -EINVAL; @@ -461,7 +475,7 @@ static int digital_start_poll(struct nfc_dev *nfc_dev, __u32 im_protocols, digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A, digital_in_send_sens_req); - if (im_protocols & DIGITAL_PROTO_NFCF_RF_TECH) { + if (matching_im_protocols & DIGITAL_PROTO_NFCF_RF_TECH) { digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_212F, digital_in_send_sensf_req); @@ -469,7 +483,11 @@ static int digital_start_poll(struct nfc_dev *nfc_dev, __u32 im_protocols, digital_in_send_sensf_req); } - if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) { + if (matching_im_protocols & DIGITAL_PROTO_ISO15693_RF_TECH) + digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_ISO15693, + digital_in_send_iso15693_inv_req); + + if (matching_tm_protocols & NFC_PROTO_NFC_DEP_MASK) { if (ddev->ops->tg_listen_mdaa) { digital_add_poll_tech(ddev, 0, digital_tg_listen_mdaa); @@ -607,20 +625,30 @@ static void digital_in_send_complete(struct nfc_digital_dev *ddev, void *arg, if (IS_ERR(resp)) { rc = PTR_ERR(resp); + resp = NULL; goto done; } - if (ddev->curr_protocol == NFC_PROTO_MIFARE) + if (ddev->curr_protocol == NFC_PROTO_MIFARE) { rc = digital_in_recv_mifare_res(resp); - else - rc = ddev->skb_check_crc(resp); + /* crc check is done in digital_in_recv_mifare_res() */ + goto done; + } + if (ddev->curr_protocol == NFC_PROTO_ISO14443) { + rc = digital_in_iso_dep_pull_sod(ddev, resp); + if (rc) + goto done; + } + + rc = ddev->skb_check_crc(resp); + +done: if (rc) { kfree_skb(resp); resp = NULL; } -done: data_exch->cb(data_exch->cb_context, resp, rc); kfree(data_exch); @@ -632,6 +660,7 @@ static int digital_in_send(struct nfc_dev *nfc_dev, struct nfc_target *target, { struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev); struct digital_data_exch *data_exch; + int rc; data_exch = kzalloc(sizeof(struct digital_data_exch), GFP_KERNEL); if (!data_exch) { @@ -642,13 +671,27 @@ static int digital_in_send(struct nfc_dev *nfc_dev, struct nfc_target *target, data_exch->cb = cb; data_exch->cb_context = cb_context; - if (ddev->curr_protocol == NFC_PROTO_NFC_DEP) - return digital_in_send_dep_req(ddev, target, skb, data_exch); + if (ddev->curr_protocol == NFC_PROTO_NFC_DEP) { + rc = digital_in_send_dep_req(ddev, target, skb, data_exch); + goto exit; + } + + if (ddev->curr_protocol == NFC_PROTO_ISO14443) { + rc = digital_in_iso_dep_push_sod(ddev, skb); + if (rc) + goto exit; + } ddev->skb_add_crc(skb); - return digital_in_send_cmd(ddev, skb, 500, digital_in_send_complete, - data_exch); + rc = digital_in_send_cmd(ddev, skb, 500, digital_in_send_complete, + data_exch); + +exit: + if (rc) + kfree(data_exch); + + return rc; } static struct nfc_ops digital_nfc_ops = { @@ -700,6 +743,10 @@ struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops, ddev->protocols |= NFC_PROTO_FELICA_MASK; if (supported_protocols & NFC_PROTO_NFC_DEP_MASK) ddev->protocols |= NFC_PROTO_NFC_DEP_MASK; + if (supported_protocols & NFC_PROTO_ISO15693_MASK) + ddev->protocols |= NFC_PROTO_ISO15693_MASK; + if (supported_protocols & NFC_PROTO_ISO14443_MASK) + ddev->protocols |= NFC_PROTO_ISO14443_MASK; ddev->tx_headroom = tx_headroom + DIGITAL_MAX_HEADER_LEN; ddev->tx_tailroom = tx_tailroom + DIGITAL_CRC_LEN; diff --git a/net/nfc/digital_technology.c b/net/nfc/digital_technology.c index 251c8c753eb..278c3fed27e 100644 --- a/net/nfc/digital_technology.c +++ b/net/nfc/digital_technology.c @@ -30,6 +30,7 @@ #define DIGITAL_SEL_RES_NFCID1_COMPLETE(sel_res) (!((sel_res) & 0x04)) #define DIGITAL_SEL_RES_IS_T2T(sel_res) (!((sel_res) & 0x60)) +#define DIGITAL_SEL_RES_IS_T4T(sel_res) ((sel_res) & 0x20) #define DIGITAL_SEL_RES_IS_NFC_DEP(sel_res) ((sel_res) & 0x40) #define DIGITAL_SENS_RES_IS_T1T(sens_res) (((sens_res) & 0x0C00) == 0x0C00) @@ -51,6 +52,34 @@ #define DIGITAL_SENSF_REQ_RC_SC 1 #define DIGITAL_SENSF_REQ_RC_AP 2 +#define DIGITAL_CMD_ISO15693_INVENTORY_REQ 0x01 + +#define DIGITAL_ISO15693_REQ_FLAG_DATA_RATE BIT(1) +#define DIGITAL_ISO15693_REQ_FLAG_INVENTORY BIT(2) +#define DIGITAL_ISO15693_REQ_FLAG_NB_SLOTS BIT(5) +#define DIGITAL_ISO15693_RES_FLAG_ERROR BIT(0) +#define DIGITAL_ISO15693_RES_IS_VALID(flags) \ + (!((flags) & DIGITAL_ISO15693_RES_FLAG_ERROR)) + +#define DIGITAL_ISO_DEP_I_PCB 0x02 +#define DIGITAL_ISO_DEP_PNI(pni) ((pni) & 0x01) + +#define DIGITAL_ISO_DEP_PCB_TYPE(pcb) ((pcb) & 0xC0) + +#define DIGITAL_ISO_DEP_I_BLOCK 0x00 + +#define DIGITAL_ISO_DEP_BLOCK_HAS_DID(pcb) ((pcb) & 0x08) + +static const u8 digital_ats_fsc[] = { + 16, 24, 32, 40, 48, 64, 96, 128, +}; + +#define DIGITAL_ATS_FSCI(t0) ((t0) & 0x0F) +#define DIGITAL_ATS_MAX_FSC 256 + +#define DIGITAL_RATS_BYTE1 0xE0 +#define DIGITAL_RATS_PARAM 0x80 + struct digital_sdd_res { u8 nfcid1[4]; u8 bcc; @@ -82,9 +111,127 @@ struct digital_sensf_res { u8 rd[2]; } __packed; +struct digital_iso15693_inv_req { + u8 flags; + u8 cmd; + u8 mask_len; + u64 mask; +} __packed; + +struct digital_iso15693_inv_res { + u8 flags; + u8 dsfid; + u64 uid; +} __packed; + static int digital_in_send_sdd_req(struct nfc_digital_dev *ddev, struct nfc_target *target); +int digital_in_iso_dep_pull_sod(struct nfc_digital_dev *ddev, + struct sk_buff *skb) +{ + u8 pcb; + u8 block_type; + + if (skb->len < 1) + return -EIO; + + pcb = *skb->data; + block_type = DIGITAL_ISO_DEP_PCB_TYPE(pcb); + + /* No support fo R-block nor S-block */ + if (block_type != DIGITAL_ISO_DEP_I_BLOCK) { + pr_err("ISO_DEP R-block and S-block not supported\n"); + return -EIO; + } + + if (DIGITAL_ISO_DEP_BLOCK_HAS_DID(pcb)) { + pr_err("DID field in ISO_DEP PCB not supported\n"); + return -EIO; + } + + skb_pull(skb, 1); + + return 0; +} + +int digital_in_iso_dep_push_sod(struct nfc_digital_dev *ddev, + struct sk_buff *skb) +{ + /* + * Chaining not supported so skb->len + 1 PCB byte + 2 CRC bytes must + * not be greater than remote FSC + */ + if (skb->len + 3 > ddev->target_fsc) + return -EIO; + + skb_push(skb, 1); + + *skb->data = DIGITAL_ISO_DEP_I_PCB | ddev->curr_nfc_dep_pni; + + ddev->curr_nfc_dep_pni = + DIGITAL_ISO_DEP_PNI(ddev->curr_nfc_dep_pni + 1); + + return 0; +} + +static void digital_in_recv_ats(struct nfc_digital_dev *ddev, void *arg, + struct sk_buff *resp) +{ + struct nfc_target *target = arg; + u8 fsdi; + int rc; + + if (IS_ERR(resp)) { + rc = PTR_ERR(resp); + resp = NULL; + goto exit; + } + + if (resp->len < 2) { + rc = -EIO; + goto exit; + } + + fsdi = DIGITAL_ATS_FSCI(resp->data[1]); + if (fsdi >= 8) + ddev->target_fsc = DIGITAL_ATS_MAX_FSC; + else + ddev->target_fsc = digital_ats_fsc[fsdi]; + + ddev->curr_nfc_dep_pni = 0; + + rc = digital_target_found(ddev, target, NFC_PROTO_ISO14443); + +exit: + dev_kfree_skb(resp); + kfree(target); + + if (rc) + digital_poll_next_tech(ddev); +} + +static int digital_in_send_rats(struct nfc_digital_dev *ddev, + struct nfc_target *target) +{ + int rc; + struct sk_buff *skb; + + skb = digital_skb_alloc(ddev, 2); + if (!skb) + return -ENOMEM; + + *skb_put(skb, 1) = DIGITAL_RATS_BYTE1; + *skb_put(skb, 1) = DIGITAL_RATS_PARAM; + + rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_ats, + target); + if (rc) + kfree_skb(skb); + + return rc; +} + static void digital_in_recv_sel_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { @@ -122,8 +269,19 @@ static void digital_in_recv_sel_res(struct nfc_digital_dev *ddev, void *arg, goto exit_free_skb; } + target->sel_res = sel_res; + if (DIGITAL_SEL_RES_IS_T2T(sel_res)) { nfc_proto = NFC_PROTO_MIFARE; + } else if (DIGITAL_SEL_RES_IS_T4T(sel_res)) { + rc = digital_in_send_rats(ddev, target); + if (rc) + goto exit; + /* + * Skip target_found and don't free it for now. This will be + * done when receiving the ATS + */ + goto exit_free_skb; } else if (DIGITAL_SEL_RES_IS_NFC_DEP(sel_res)) { nfc_proto = NFC_PROTO_NFC_DEP; } else { @@ -131,8 +289,6 @@ static void digital_in_recv_sel_res(struct nfc_digital_dev *ddev, void *arg, goto exit; } - target->sel_res = sel_res; - rc = digital_target_found(ddev, target, nfc_proto); exit: @@ -473,6 +629,93 @@ int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech) return rc; } +static void digital_in_recv_iso15693_inv_res(struct nfc_digital_dev *ddev, + void *arg, struct sk_buff *resp) +{ + struct digital_iso15693_inv_res *res; + struct nfc_target *target = NULL; + int rc; + + if (IS_ERR(resp)) { + rc = PTR_ERR(resp); + resp = NULL; + goto out_free_skb; + } + + if (resp->len != sizeof(*res)) { + rc = -EIO; + goto out_free_skb; + } + + res = (struct digital_iso15693_inv_res *)resp->data; + + if (!DIGITAL_ISO15693_RES_IS_VALID(res->flags)) { + PROTOCOL_ERR("ISO15693 - 10.3.1"); + rc = -EINVAL; + goto out_free_skb; + } + + target = kzalloc(sizeof(*target), GFP_KERNEL); + if (!target) { + rc = -ENOMEM; + goto out_free_skb; + } + + target->is_iso15693 = 1; + target->iso15693_dsfid = res->dsfid; + memcpy(target->iso15693_uid, &res->uid, sizeof(target->iso15693_uid)); + + rc = digital_target_found(ddev, target, NFC_PROTO_ISO15693); + + kfree(target); + +out_free_skb: + dev_kfree_skb(resp); + + if (rc) + digital_poll_next_tech(ddev); +} + +int digital_in_send_iso15693_inv_req(struct nfc_digital_dev *ddev, u8 rf_tech) +{ + struct digital_iso15693_inv_req *req; + struct sk_buff *skb; + int rc; + + rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, + NFC_DIGITAL_RF_TECH_ISO15693); + if (rc) + return rc; + + rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, + NFC_DIGITAL_FRAMING_ISO15693_INVENTORY); + if (rc) + return rc; + + skb = digital_skb_alloc(ddev, sizeof(*req)); + if (!skb) + return -ENOMEM; + + skb_put(skb, sizeof(*req) - sizeof(req->mask)); /* No mask */ + req = (struct digital_iso15693_inv_req *)skb->data; + + /* Single sub-carrier, high data rate, no AFI, single slot + * Inventory command + */ + req->flags = DIGITAL_ISO15693_REQ_FLAG_DATA_RATE | + DIGITAL_ISO15693_REQ_FLAG_INVENTORY | + DIGITAL_ISO15693_REQ_FLAG_NB_SLOTS; + req->cmd = DIGITAL_CMD_ISO15693_INVENTORY_REQ; + req->mask_len = 0; + + rc = digital_in_send_cmd(ddev, skb, 30, + digital_in_recv_iso15693_inv_res, NULL); + if (rc) + kfree_skb(skb); + + return rc; +} + static int digital_tg_send_sel_res(struct nfc_digital_dev *ddev) { struct sk_buff *skb; diff --git a/net/nfc/hci/llc.c b/net/nfc/hci/llc.c index a07d2b81848..1b90c053185 100644 --- a/net/nfc/hci/llc.c +++ b/net/nfc/hci/llc.c @@ -20,14 +20,12 @@ #include "llc.h" -static struct list_head llc_engines; +static LIST_HEAD(llc_engines); int nfc_llc_init(void) { int r; - INIT_LIST_HEAD(&llc_engines); - r = nfc_llc_nop_register(); if (r) goto exit; diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c index 6184bd1fba3..b486f12ae24 100644 --- a/net/nfc/llcp_core.c +++ b/net/nfc/llcp_core.c @@ -27,7 +27,7 @@ static u8 llcp_magic[3] = {0x46, 0x66, 0x6d}; -static struct list_head llcp_devices; +static LIST_HEAD(llcp_devices); static void nfc_llcp_rx_skb(struct nfc_llcp_local *local, struct sk_buff *skb); @@ -293,9 +293,9 @@ static void nfc_llcp_sdreq_timer(unsigned long data) struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev) { - struct nfc_llcp_local *local, *n; + struct nfc_llcp_local *local; - list_for_each_entry_safe(local, n, &llcp_devices, list) + list_for_each_entry(local, &llcp_devices, list) if (local->dev == dev) return local; @@ -609,14 +609,16 @@ u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len) int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len) { - struct nfc_llcp_local *local = nfc_llcp_find_local(dev); + struct nfc_llcp_local *local; + + if (gb_len < 3 || gb_len > NFC_MAX_GT_LEN) + return -EINVAL; + local = nfc_llcp_find_local(dev); if (local == NULL) { pr_err("No LLCP device\n"); return -ENODEV; } - if (gb_len < 3) - return -EINVAL; memset(local->remote_gb, 0, NFC_MAX_GT_LEN); memcpy(local->remote_gb, gb, gb_len); @@ -1622,8 +1624,6 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev) int __init nfc_llcp_init(void) { - INIT_LIST_HEAD(&llcp_devices); - return nfc_llcp_sock_init(); } diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 46bda010bf1..6c34ac97850 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -74,7 +74,7 @@ static int __nci_request(struct nci_dev *ndev, ndev->req_status = NCI_REQ_PEND; - init_completion(&ndev->req_completion); + reinit_completion(&ndev->req_completion); req(ndev, opt); completion_rc = wait_for_completion_interruptible_timeout(&ndev->req_completion, @@ -301,7 +301,7 @@ static int nci_open_device(struct nci_dev *ndev) rc = __nci_request(ndev, nci_reset_req, 0, msecs_to_jiffies(NCI_RESET_TIMEOUT)); - if (ndev->ops->setup(ndev)) + if (ndev->ops->setup) ndev->ops->setup(ndev); if (!rc) { @@ -709,6 +709,7 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops, ndev->ops = ops; ndev->tx_headroom = tx_headroom; ndev->tx_tailroom = tx_tailroom; + init_completion(&ndev->req_completion); ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops, supported_protocols, diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c index f1d426f10cc..ec250e77763 100644 --- a/net/nfc/nci/spi.c +++ b/net/nfc/nci/spi.c @@ -105,7 +105,7 @@ int nci_spi_send(struct nci_spi *nspi, if (ret != 0 || nspi->acknowledge_mode == NCI_SPI_CRC_DISABLED) goto done; - init_completion(&nspi->req_completion); + reinit_completion(&nspi->req_completion); completion_rc = wait_for_completion_interruptible_timeout( &nspi->req_completion, NCI_SPI_SEND_TIMEOUT); @@ -145,6 +145,7 @@ struct nci_spi *nci_spi_allocate_spi(struct spi_device *spi, nspi->spi = spi; nspi->ndev = ndev; + init_completion(&nspi->req_completion); return nspi; } diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index ebbf6fb88b3..43cb1c17e26 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -94,6 +94,14 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, target->sensf_res)) goto nla_put_failure; + if (target->is_iso15693) { + if (nla_put_u8(msg, NFC_ATTR_TARGET_ISO15693_DSFID, + target->iso15693_dsfid) || + nla_put(msg, NFC_ATTR_TARGET_ISO15693_UID, + sizeof(target->iso15693_uid), target->iso15693_uid)) + goto nla_put_failure; + } + return genlmsg_end(msg, hdr); nla_put_failure: diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 6b6f33ad78f..90b82e08ae6 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1798,7 +1798,7 @@ static void reg_process_hint(struct regulatory_request *reg_request) return; case NL80211_REGDOM_SET_BY_USER: treatment = reg_process_hint_user(reg_request); - if (treatment == REG_REQ_OK || + if (treatment == REG_REQ_IGNORE || treatment == REG_REQ_ALREADY_SET) return; queue_delayed_work(system_power_efficient_wq, @@ -2492,6 +2492,7 @@ static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd, int set_regdom(const struct ieee80211_regdomain *rd) { struct regulatory_request *lr; + bool user_reset = false; int r; if (!reg_is_valid_request(rd->alpha2)) { @@ -2508,6 +2509,7 @@ int set_regdom(const struct ieee80211_regdomain *rd) break; case NL80211_REGDOM_SET_BY_USER: r = reg_set_rd_user(rd, lr); + user_reset = true; break; case NL80211_REGDOM_SET_BY_DRIVER: r = reg_set_rd_driver(rd, lr); @@ -2521,8 +2523,14 @@ int set_regdom(const struct ieee80211_regdomain *rd) } if (r) { - if (r == -EALREADY) + switch (r) { + case -EALREADY: reg_set_request_processed(); + break; + default: + /* Back to world regulatory in case of errors */ + restore_regulatory_settings(user_reset); + } kfree(rd); return r; |