2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
36 /* Handle HCI Event packets */
38 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 __u8 status = *((__u8 *) skb->data);
42 BT_DBG("%s status 0x%2.2x", hdev->name, status);
47 clear_bit(HCI_INQUIRY, &hdev->flags);
48 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
51 hci_conn_check_pending(hdev);
54 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
56 __u8 status = *((__u8 *) skb->data);
58 BT_DBG("%s status 0x%2.2x", hdev->name, status);
63 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
66 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
68 __u8 status = *((__u8 *) skb->data);
70 BT_DBG("%s status 0x%2.2x", hdev->name, status);
75 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
77 hci_conn_check_pending(hdev);
80 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
83 BT_DBG("%s", hdev->name);
86 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
88 struct hci_rp_role_discovery *rp = (void *) skb->data;
89 struct hci_conn *conn;
91 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
98 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
101 conn->link_mode &= ~HCI_LM_MASTER;
103 conn->link_mode |= HCI_LM_MASTER;
106 hci_dev_unlock(hdev);
109 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
111 struct hci_rp_read_link_policy *rp = (void *) skb->data;
112 struct hci_conn *conn;
114 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123 conn->link_policy = __le16_to_cpu(rp->policy);
125 hci_dev_unlock(hdev);
128 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
130 struct hci_rp_write_link_policy *rp = (void *) skb->data;
131 struct hci_conn *conn;
134 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
139 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147 conn->link_policy = get_unaligned_le16(sent + 2);
149 hci_dev_unlock(hdev);
152 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
155 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
157 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
162 hdev->link_policy = __le16_to_cpu(rp->policy);
165 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
168 __u8 status = *((__u8 *) skb->data);
171 BT_DBG("%s status 0x%2.2x", hdev->name, status);
173 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 hdev->link_policy = get_unaligned_le16(sent);
181 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
183 __u8 status = *((__u8 *) skb->data);
185 BT_DBG("%s status 0x%2.2x", hdev->name, status);
187 clear_bit(HCI_RESET, &hdev->flags);
189 /* Reset all non-persistent flags */
190 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
192 hdev->discovery.state = DISCOVERY_STOPPED;
193 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
194 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
196 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
197 hdev->adv_data_len = 0;
199 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
200 hdev->scan_rsp_data_len = 0;
202 hdev->le_scan_type = LE_SCAN_PASSIVE;
204 hdev->ssp_debug_mode = 0;
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
209 __u8 status = *((__u8 *) skb->data);
212 BT_DBG("%s status 0x%2.2x", hdev->name, status);
214 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
220 if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 mgmt_set_local_name_complete(hdev, sent, status);
223 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
225 hci_dev_unlock(hdev);
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
243 __u8 status = *((__u8 *) skb->data);
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
253 __u8 param = *((__u8 *) sent);
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
258 clear_bit(HCI_AUTH, &hdev->flags);
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
265 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
267 __u8 status = *((__u8 *) skb->data);
270 BT_DBG("%s status 0x%2.2x", hdev->name, status);
272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
277 __u8 param = *((__u8 *) sent);
280 set_bit(HCI_ENCRYPT, &hdev->flags);
282 clear_bit(HCI_ENCRYPT, &hdev->flags);
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
292 BT_DBG("%s status 0x%2.2x", hdev->name, status);
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
298 param = *((__u8 *) sent);
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
308 /* We need to ensure that we set this back on if someone changed
309 * the scan mode through a raw HCI socket.
311 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
313 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
314 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
316 if (param & SCAN_INQUIRY) {
317 set_bit(HCI_ISCAN, &hdev->flags);
319 mgmt_discoverable(hdev, 1);
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
331 hci_dev_unlock(hdev);
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
338 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
343 memcpy(hdev->dev_class, rp->dev_class, 3);
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
351 __u8 status = *((__u8 *) skb->data);
354 BT_DBG("%s status 0x%2.2x", hdev->name, status);
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
363 memcpy(hdev->dev_class, sent, 3);
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
368 hci_dev_unlock(hdev);
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
376 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
381 setting = __le16_to_cpu(rp->voice_setting);
383 if (hdev->voice_setting == setting)
386 hdev->voice_setting = setting;
388 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
397 __u8 status = *((__u8 *) skb->data);
401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
410 setting = get_unaligned_le16(sent);
412 if (hdev->voice_setting == setting)
415 hdev->voice_setting = setting;
417 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
423 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
426 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
428 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
433 hdev->num_iac = rp->num_iac;
435 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
438 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
440 __u8 status = *((__u8 *) skb->data);
441 struct hci_cp_write_ssp_mode *sent;
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
445 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
451 hdev->features[1][0] |= LMP_HOST_SSP;
453 hdev->features[1][0] &= ~LMP_HOST_SSP;
456 if (test_bit(HCI_MGMT, &hdev->dev_flags))
457 mgmt_ssp_enable_complete(hdev, sent->mode, status);
460 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
462 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
466 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
468 u8 status = *((u8 *) skb->data);
469 struct hci_cp_write_sc_support *sent;
471 BT_DBG("%s status 0x%2.2x", hdev->name, status);
473 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
479 hdev->features[1][0] |= LMP_HOST_SC;
481 hdev->features[1][0] &= ~LMP_HOST_SC;
484 if (test_bit(HCI_MGMT, &hdev->dev_flags))
485 mgmt_sc_enable_complete(hdev, sent->support, status);
488 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
490 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
494 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
496 struct hci_rp_read_local_version *rp = (void *) skb->data;
498 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
503 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
504 hdev->hci_ver = rp->hci_ver;
505 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
506 hdev->lmp_ver = rp->lmp_ver;
507 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
508 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
512 static void hci_cc_read_local_commands(struct hci_dev *hdev,
515 struct hci_rp_read_local_commands *rp = (void *) skb->data;
517 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
522 if (test_bit(HCI_SETUP, &hdev->dev_flags))
523 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
526 static void hci_cc_read_local_features(struct hci_dev *hdev,
529 struct hci_rp_read_local_features *rp = (void *) skb->data;
531 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
536 memcpy(hdev->features, rp->features, 8);
538 /* Adjust default settings according to features
539 * supported by device. */
541 if (hdev->features[0][0] & LMP_3SLOT)
542 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
544 if (hdev->features[0][0] & LMP_5SLOT)
545 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
547 if (hdev->features[0][1] & LMP_HV2) {
548 hdev->pkt_type |= (HCI_HV2);
549 hdev->esco_type |= (ESCO_HV2);
552 if (hdev->features[0][1] & LMP_HV3) {
553 hdev->pkt_type |= (HCI_HV3);
554 hdev->esco_type |= (ESCO_HV3);
557 if (lmp_esco_capable(hdev))
558 hdev->esco_type |= (ESCO_EV3);
560 if (hdev->features[0][4] & LMP_EV4)
561 hdev->esco_type |= (ESCO_EV4);
563 if (hdev->features[0][4] & LMP_EV5)
564 hdev->esco_type |= (ESCO_EV5);
566 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
567 hdev->esco_type |= (ESCO_2EV3);
569 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
570 hdev->esco_type |= (ESCO_3EV3);
572 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
573 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
576 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
579 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
586 if (hdev->max_page < rp->max_page)
587 hdev->max_page = rp->max_page;
589 if (rp->page < HCI_MAX_PAGES)
590 memcpy(hdev->features[rp->page], rp->features, 8);
593 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
596 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
598 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
601 hdev->flow_ctl_mode = rp->mode;
604 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
606 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
608 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
613 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
614 hdev->sco_mtu = rp->sco_mtu;
615 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
616 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
618 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
623 hdev->acl_cnt = hdev->acl_pkts;
624 hdev->sco_cnt = hdev->sco_pkts;
626 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
627 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
630 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
632 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
634 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
637 bacpy(&hdev->bdaddr, &rp->bdaddr);
640 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
643 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
645 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
647 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
648 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
649 hdev->page_scan_window = __le16_to_cpu(rp->window);
653 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
656 u8 status = *((u8 *) skb->data);
657 struct hci_cp_write_page_scan_activity *sent;
659 BT_DBG("%s status 0x%2.2x", hdev->name, status);
664 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
668 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
669 hdev->page_scan_window = __le16_to_cpu(sent->window);
672 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
675 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
677 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
679 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
680 hdev->page_scan_type = rp->type;
683 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
686 u8 status = *((u8 *) skb->data);
689 BT_DBG("%s status 0x%2.2x", hdev->name, status);
694 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
696 hdev->page_scan_type = *type;
699 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
702 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
704 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
709 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
710 hdev->block_len = __le16_to_cpu(rp->block_len);
711 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
713 hdev->block_cnt = hdev->num_blocks;
715 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
716 hdev->block_cnt, hdev->block_len);
719 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
722 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
724 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
729 hdev->amp_status = rp->amp_status;
730 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
731 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
732 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
733 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
734 hdev->amp_type = rp->amp_type;
735 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
736 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
737 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
738 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
741 a2mp_send_getinfo_rsp(hdev);
744 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
747 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
748 struct amp_assoc *assoc = &hdev->loc_assoc;
749 size_t rem_len, frag_len;
751 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
756 frag_len = skb->len - sizeof(*rp);
757 rem_len = __le16_to_cpu(rp->rem_len);
759 if (rem_len > frag_len) {
760 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
762 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
763 assoc->offset += frag_len;
765 /* Read other fragments */
766 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
771 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
772 assoc->len = assoc->offset + rem_len;
776 /* Send A2MP Rsp when all fragments are received */
777 a2mp_send_getampassoc_rsp(hdev, rp->status);
778 a2mp_send_create_phy_link_req(hdev, rp->status);
781 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
784 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
786 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
789 hdev->inq_tx_power = rp->tx_power;
792 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
794 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
795 struct hci_cp_pin_code_reply *cp;
796 struct hci_conn *conn;
798 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
802 if (test_bit(HCI_MGMT, &hdev->dev_flags))
803 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
808 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
812 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
814 conn->pin_length = cp->pin_len;
817 hci_dev_unlock(hdev);
820 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
822 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
824 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
828 if (test_bit(HCI_MGMT, &hdev->dev_flags))
829 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
832 hci_dev_unlock(hdev);
835 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
838 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
840 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
845 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
846 hdev->le_pkts = rp->le_max_pkt;
848 hdev->le_cnt = hdev->le_pkts;
850 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
853 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
856 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
858 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
861 memcpy(hdev->le_features, rp->features, 8);
864 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
867 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
869 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
872 hdev->adv_tx_power = rp->tx_power;
875 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
877 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
879 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
883 if (test_bit(HCI_MGMT, &hdev->dev_flags))
884 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
887 hci_dev_unlock(hdev);
890 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
893 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
895 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
899 if (test_bit(HCI_MGMT, &hdev->dev_flags))
900 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
901 ACL_LINK, 0, rp->status);
903 hci_dev_unlock(hdev);
906 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
908 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
910 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
914 if (test_bit(HCI_MGMT, &hdev->dev_flags))
915 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
918 hci_dev_unlock(hdev);
921 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
924 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
926 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
930 if (test_bit(HCI_MGMT, &hdev->dev_flags))
931 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
932 ACL_LINK, 0, rp->status);
934 hci_dev_unlock(hdev);
937 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
940 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
942 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
945 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
946 NULL, NULL, rp->status);
947 hci_dev_unlock(hdev);
950 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
953 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
955 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
958 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
959 rp->hash256, rp->randomizer256,
961 hci_dev_unlock(hdev);
965 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
967 __u8 status = *((__u8 *) skb->data);
970 BT_DBG("%s status 0x%2.2x", hdev->name, status);
972 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
979 bacpy(&hdev->random_addr, sent);
981 hci_dev_unlock(hdev);
984 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
986 __u8 *sent, status = *((__u8 *) skb->data);
988 BT_DBG("%s status 0x%2.2x", hdev->name, status);
990 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
999 /* If we're doing connection initation as peripheral. Set a
1000 * timeout in case something goes wrong.
1003 struct hci_conn *conn;
1005 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1007 queue_delayed_work(hdev->workqueue,
1008 &conn->le_conn_timeout,
1009 HCI_LE_CONN_TIMEOUT);
1012 mgmt_advertising(hdev, *sent);
1014 hci_dev_unlock(hdev);
1017 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1019 struct hci_cp_le_set_scan_param *cp;
1020 __u8 status = *((__u8 *) skb->data);
1022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1024 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1031 hdev->le_scan_type = cp->type;
1033 hci_dev_unlock(hdev);
1036 static bool has_pending_adv_report(struct hci_dev *hdev)
1038 struct discovery_state *d = &hdev->discovery;
1040 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1043 static void clear_pending_adv_report(struct hci_dev *hdev)
1045 struct discovery_state *d = &hdev->discovery;
1047 bacpy(&d->last_adv_addr, BDADDR_ANY);
1048 d->last_adv_data_len = 0;
1051 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1052 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
1054 struct discovery_state *d = &hdev->discovery;
1056 bacpy(&d->last_adv_addr, bdaddr);
1057 d->last_adv_addr_type = bdaddr_type;
1058 d->last_adv_rssi = rssi;
1059 memcpy(d->last_adv_data, data, len);
1060 d->last_adv_data_len = len;
1063 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1064 struct sk_buff *skb)
1066 struct hci_cp_le_set_scan_enable *cp;
1067 __u8 status = *((__u8 *) skb->data);
1069 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1071 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1078 switch (cp->enable) {
1079 case LE_SCAN_ENABLE:
1080 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1081 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1082 clear_pending_adv_report(hdev);
1085 case LE_SCAN_DISABLE:
1086 /* We do this here instead of when setting DISCOVERY_STOPPED
1087 * since the latter would potentially require waiting for
1088 * inquiry to stop too.
1090 if (has_pending_adv_report(hdev)) {
1091 struct discovery_state *d = &hdev->discovery;
1093 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1094 d->last_adv_addr_type, NULL,
1095 d->last_adv_rssi, 0, 1,
1097 d->last_adv_data_len, NULL, 0);
1100 /* Cancel this timer so that we don't try to disable scanning
1101 * when it's already disabled.
1103 cancel_delayed_work(&hdev->le_scan_disable);
1105 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1106 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1107 * interrupted scanning due to a connect request. Mark
1108 * therefore discovery as stopped.
1110 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1112 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1116 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1121 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1122 struct sk_buff *skb)
1124 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1126 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1129 hdev->le_white_list_size = rp->size;
1132 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1133 struct sk_buff *skb)
1135 __u8 status = *((__u8 *) skb->data);
1137 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1140 hci_white_list_clear(hdev);
1143 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1144 struct sk_buff *skb)
1146 struct hci_cp_le_add_to_white_list *sent;
1147 __u8 status = *((__u8 *) skb->data);
1149 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1151 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1156 hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
1159 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1160 struct sk_buff *skb)
1162 struct hci_cp_le_del_from_white_list *sent;
1163 __u8 status = *((__u8 *) skb->data);
1165 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1167 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1172 hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
1175 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1176 struct sk_buff *skb)
1178 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1180 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1183 memcpy(hdev->le_states, rp->le_states, 8);
1186 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1187 struct sk_buff *skb)
1189 struct hci_cp_write_le_host_supported *sent;
1190 __u8 status = *((__u8 *) skb->data);
1192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1194 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1200 hdev->features[1][0] |= LMP_HOST_LE;
1201 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1203 hdev->features[1][0] &= ~LMP_HOST_LE;
1204 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1205 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1209 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1211 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1215 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1217 struct hci_cp_le_set_adv_param *cp;
1218 u8 status = *((u8 *) skb->data);
1220 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1225 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1230 hdev->adv_addr_type = cp->own_address_type;
1231 hci_dev_unlock(hdev);
1234 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1235 struct sk_buff *skb)
1237 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1239 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1240 hdev->name, rp->status, rp->phy_handle);
1245 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1248 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1250 struct hci_rp_read_rssi *rp = (void *) skb->data;
1251 struct hci_conn *conn;
1253 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1260 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1262 conn->rssi = rp->rssi;
1264 hci_dev_unlock(hdev);
1267 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1269 struct hci_cp_read_tx_power *sent;
1270 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1271 struct hci_conn *conn;
1273 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1278 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1284 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1288 switch (sent->type) {
1290 conn->tx_power = rp->tx_power;
1293 conn->max_tx_power = rp->tx_power;
1298 hci_dev_unlock(hdev);
1301 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1303 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1306 hci_conn_check_pending(hdev);
1310 set_bit(HCI_INQUIRY, &hdev->flags);
1313 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1315 struct hci_cp_create_conn *cp;
1316 struct hci_conn *conn;
1318 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1320 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1326 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1328 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1331 if (conn && conn->state == BT_CONNECT) {
1332 if (status != 0x0c || conn->attempt > 2) {
1333 conn->state = BT_CLOSED;
1334 hci_proto_connect_cfm(conn, status);
1337 conn->state = BT_CONNECT2;
1341 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1344 conn->link_mode |= HCI_LM_MASTER;
1346 BT_ERR("No memory for new connection");
1350 hci_dev_unlock(hdev);
1353 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1355 struct hci_cp_add_sco *cp;
1356 struct hci_conn *acl, *sco;
1359 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1364 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1368 handle = __le16_to_cpu(cp->handle);
1370 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1374 acl = hci_conn_hash_lookup_handle(hdev, handle);
1378 sco->state = BT_CLOSED;
1380 hci_proto_connect_cfm(sco, status);
1385 hci_dev_unlock(hdev);
1388 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1390 struct hci_cp_auth_requested *cp;
1391 struct hci_conn *conn;
1393 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1398 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1404 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1406 if (conn->state == BT_CONFIG) {
1407 hci_proto_connect_cfm(conn, status);
1408 hci_conn_drop(conn);
1412 hci_dev_unlock(hdev);
1415 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1417 struct hci_cp_set_conn_encrypt *cp;
1418 struct hci_conn *conn;
1420 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1425 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1431 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1433 if (conn->state == BT_CONFIG) {
1434 hci_proto_connect_cfm(conn, status);
1435 hci_conn_drop(conn);
1439 hci_dev_unlock(hdev);
1442 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1443 struct hci_conn *conn)
1445 if (conn->state != BT_CONFIG || !conn->out)
1448 if (conn->pending_sec_level == BT_SECURITY_SDP)
1451 /* Only request authentication for SSP connections or non-SSP
1452 * devices with sec_level MEDIUM or HIGH or if MITM protection
1455 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1456 conn->pending_sec_level != BT_SECURITY_HIGH &&
1457 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1463 static int hci_resolve_name(struct hci_dev *hdev,
1464 struct inquiry_entry *e)
1466 struct hci_cp_remote_name_req cp;
1468 memset(&cp, 0, sizeof(cp));
1470 bacpy(&cp.bdaddr, &e->data.bdaddr);
1471 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1472 cp.pscan_mode = e->data.pscan_mode;
1473 cp.clock_offset = e->data.clock_offset;
1475 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1478 static bool hci_resolve_next_name(struct hci_dev *hdev)
1480 struct discovery_state *discov = &hdev->discovery;
1481 struct inquiry_entry *e;
1483 if (list_empty(&discov->resolve))
1486 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1490 if (hci_resolve_name(hdev, e) == 0) {
1491 e->name_state = NAME_PENDING;
1498 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1499 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1501 struct discovery_state *discov = &hdev->discovery;
1502 struct inquiry_entry *e;
1504 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1505 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1506 name_len, conn->dev_class);
1508 if (discov->state == DISCOVERY_STOPPED)
1511 if (discov->state == DISCOVERY_STOPPING)
1512 goto discov_complete;
1514 if (discov->state != DISCOVERY_RESOLVING)
1517 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1518 /* If the device was not found in a list of found devices names of which
1519 * are pending. there is no need to continue resolving a next name as it
1520 * will be done upon receiving another Remote Name Request Complete
1527 e->name_state = NAME_KNOWN;
1528 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1529 e->data.rssi, name, name_len);
1531 e->name_state = NAME_NOT_KNOWN;
1534 if (hci_resolve_next_name(hdev))
1538 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1541 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1543 struct hci_cp_remote_name_req *cp;
1544 struct hci_conn *conn;
1546 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1548 /* If successful wait for the name req complete event before
1549 * checking for the need to do authentication */
1553 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1559 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1561 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1562 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1567 if (!hci_outgoing_auth_needed(hdev, conn))
1570 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1571 struct hci_cp_auth_requested auth_cp;
1573 auth_cp.handle = __cpu_to_le16(conn->handle);
1574 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1575 sizeof(auth_cp), &auth_cp);
1579 hci_dev_unlock(hdev);
1582 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1584 struct hci_cp_read_remote_features *cp;
1585 struct hci_conn *conn;
1587 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1592 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1598 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1600 if (conn->state == BT_CONFIG) {
1601 hci_proto_connect_cfm(conn, status);
1602 hci_conn_drop(conn);
1606 hci_dev_unlock(hdev);
1609 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1611 struct hci_cp_read_remote_ext_features *cp;
1612 struct hci_conn *conn;
1614 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1619 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1625 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1627 if (conn->state == BT_CONFIG) {
1628 hci_proto_connect_cfm(conn, status);
1629 hci_conn_drop(conn);
1633 hci_dev_unlock(hdev);
1636 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1638 struct hci_cp_setup_sync_conn *cp;
1639 struct hci_conn *acl, *sco;
1642 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1647 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1651 handle = __le16_to_cpu(cp->handle);
1653 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1657 acl = hci_conn_hash_lookup_handle(hdev, handle);
1661 sco->state = BT_CLOSED;
1663 hci_proto_connect_cfm(sco, status);
1668 hci_dev_unlock(hdev);
1671 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1673 struct hci_cp_sniff_mode *cp;
1674 struct hci_conn *conn;
1676 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1681 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1687 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1689 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1691 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1692 hci_sco_setup(conn, status);
1695 hci_dev_unlock(hdev);
1698 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1700 struct hci_cp_exit_sniff_mode *cp;
1701 struct hci_conn *conn;
1703 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1708 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1714 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1716 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1718 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1719 hci_sco_setup(conn, status);
1722 hci_dev_unlock(hdev);
1725 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1727 struct hci_cp_disconnect *cp;
1728 struct hci_conn *conn;
1733 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1739 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1741 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1742 conn->dst_type, status);
1744 hci_dev_unlock(hdev);
1747 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1749 struct hci_cp_create_phy_link *cp;
1751 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1753 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1760 struct hci_conn *hcon;
1762 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1766 amp_write_remote_assoc(hdev, cp->phy_handle);
1769 hci_dev_unlock(hdev);
1772 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1774 struct hci_cp_accept_phy_link *cp;
1776 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1781 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1785 amp_write_remote_assoc(hdev, cp->phy_handle);
1788 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1790 struct hci_cp_le_create_conn *cp;
1791 struct hci_conn *conn;
1793 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1795 /* All connection failure handling is taken care of by the
1796 * hci_le_conn_failed function which is triggered by the HCI
1797 * request completion callbacks used for connecting.
1802 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1808 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1812 /* Store the initiator and responder address information which
1813 * is needed for SMP. These values will not change during the
1814 * lifetime of the connection.
1816 conn->init_addr_type = cp->own_address_type;
1817 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1818 bacpy(&conn->init_addr, &hdev->random_addr);
1820 bacpy(&conn->init_addr, &hdev->bdaddr);
1822 conn->resp_addr_type = cp->peer_addr_type;
1823 bacpy(&conn->resp_addr, &cp->peer_addr);
1825 /* We don't want the connection attempt to stick around
1826 * indefinitely since LE doesn't have a page timeout concept
1827 * like BR/EDR. Set a timer for any connection that doesn't use
1828 * the white list for connecting.
1830 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1831 queue_delayed_work(conn->hdev->workqueue,
1832 &conn->le_conn_timeout,
1833 HCI_LE_CONN_TIMEOUT);
1836 hci_dev_unlock(hdev);
1839 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1841 struct hci_cp_le_start_enc *cp;
1842 struct hci_conn *conn;
1844 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1851 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1855 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1859 if (conn->state != BT_CONNECTED)
1862 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1863 hci_conn_drop(conn);
1866 hci_dev_unlock(hdev);
1869 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1871 __u8 status = *((__u8 *) skb->data);
1872 struct discovery_state *discov = &hdev->discovery;
1873 struct inquiry_entry *e;
1875 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1877 hci_conn_check_pending(hdev);
1879 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1882 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1883 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1885 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1890 if (discov->state != DISCOVERY_FINDING)
1893 if (list_empty(&discov->resolve)) {
1894 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1898 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1899 if (e && hci_resolve_name(hdev, e) == 0) {
1900 e->name_state = NAME_PENDING;
1901 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1903 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1907 hci_dev_unlock(hdev);
1910 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1912 struct inquiry_data data;
1913 struct inquiry_info *info = (void *) (skb->data + 1);
1914 int num_rsp = *((__u8 *) skb->data);
1916 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1921 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1926 for (; num_rsp; num_rsp--, info++) {
1927 bool name_known, ssp;
1929 bacpy(&data.bdaddr, &info->bdaddr);
1930 data.pscan_rep_mode = info->pscan_rep_mode;
1931 data.pscan_period_mode = info->pscan_period_mode;
1932 data.pscan_mode = info->pscan_mode;
1933 memcpy(data.dev_class, info->dev_class, 3);
1934 data.clock_offset = info->clock_offset;
1936 data.ssp_mode = 0x00;
1938 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1939 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1940 info->dev_class, 0, !name_known, ssp, NULL,
1944 hci_dev_unlock(hdev);
1947 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1949 struct hci_ev_conn_complete *ev = (void *) skb->data;
1950 struct hci_conn *conn;
1952 BT_DBG("%s", hdev->name);
1956 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1958 if (ev->link_type != SCO_LINK)
1961 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1965 conn->type = SCO_LINK;
1969 conn->handle = __le16_to_cpu(ev->handle);
1971 if (conn->type == ACL_LINK) {
1972 conn->state = BT_CONFIG;
1973 hci_conn_hold(conn);
1975 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1976 !hci_find_link_key(hdev, &ev->bdaddr))
1977 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1979 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1981 conn->state = BT_CONNECTED;
1983 hci_conn_add_sysfs(conn);
1985 if (test_bit(HCI_AUTH, &hdev->flags))
1986 conn->link_mode |= HCI_LM_AUTH;
1988 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1989 conn->link_mode |= HCI_LM_ENCRYPT;
1991 /* Get remote features */
1992 if (conn->type == ACL_LINK) {
1993 struct hci_cp_read_remote_features cp;
1994 cp.handle = ev->handle;
1995 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1999 /* Set packet type for incoming connection */
2000 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2001 struct hci_cp_change_conn_ptype cp;
2002 cp.handle = ev->handle;
2003 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2004 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2008 conn->state = BT_CLOSED;
2009 if (conn->type == ACL_LINK)
2010 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2011 conn->dst_type, ev->status);
2014 if (conn->type == ACL_LINK)
2015 hci_sco_setup(conn, ev->status);
2018 hci_proto_connect_cfm(conn, ev->status);
2020 } else if (ev->link_type != ACL_LINK)
2021 hci_proto_connect_cfm(conn, ev->status);
2024 hci_dev_unlock(hdev);
2026 hci_conn_check_pending(hdev);
2029 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2031 struct hci_ev_conn_request *ev = (void *) skb->data;
2032 int mask = hdev->link_mode;
2035 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2038 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2041 if ((mask & HCI_LM_ACCEPT) &&
2042 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
2043 /* Connection accepted */
2044 struct inquiry_entry *ie;
2045 struct hci_conn *conn;
2049 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2051 memcpy(ie->data.dev_class, ev->dev_class, 3);
2053 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2056 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2058 BT_ERR("No memory for new connection");
2059 hci_dev_unlock(hdev);
2064 memcpy(conn->dev_class, ev->dev_class, 3);
2066 hci_dev_unlock(hdev);
2068 if (ev->link_type == ACL_LINK ||
2069 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2070 struct hci_cp_accept_conn_req cp;
2071 conn->state = BT_CONNECT;
2073 bacpy(&cp.bdaddr, &ev->bdaddr);
2075 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2076 cp.role = 0x00; /* Become master */
2078 cp.role = 0x01; /* Remain slave */
2080 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2082 } else if (!(flags & HCI_PROTO_DEFER)) {
2083 struct hci_cp_accept_sync_conn_req cp;
2084 conn->state = BT_CONNECT;
2086 bacpy(&cp.bdaddr, &ev->bdaddr);
2087 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2089 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2090 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2091 cp.max_latency = cpu_to_le16(0xffff);
2092 cp.content_format = cpu_to_le16(hdev->voice_setting);
2093 cp.retrans_effort = 0xff;
2095 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2098 conn->state = BT_CONNECT2;
2099 hci_proto_connect_cfm(conn, 0);
2102 /* Connection rejected */
2103 struct hci_cp_reject_conn_req cp;
2105 bacpy(&cp.bdaddr, &ev->bdaddr);
2106 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2107 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2111 static u8 hci_to_mgmt_reason(u8 err)
2114 case HCI_ERROR_CONNECTION_TIMEOUT:
2115 return MGMT_DEV_DISCONN_TIMEOUT;
2116 case HCI_ERROR_REMOTE_USER_TERM:
2117 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2118 case HCI_ERROR_REMOTE_POWER_OFF:
2119 return MGMT_DEV_DISCONN_REMOTE;
2120 case HCI_ERROR_LOCAL_HOST_TERM:
2121 return MGMT_DEV_DISCONN_LOCAL_HOST;
2123 return MGMT_DEV_DISCONN_UNKNOWN;
2127 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2129 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2130 u8 reason = hci_to_mgmt_reason(ev->reason);
2131 struct hci_conn_params *params;
2132 struct hci_conn *conn;
2133 bool mgmt_connected;
2136 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2140 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2145 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2146 conn->dst_type, ev->status);
2150 conn->state = BT_CLOSED;
2152 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2153 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2154 reason, mgmt_connected);
2156 if (conn->type == ACL_LINK && conn->flush_key)
2157 hci_remove_link_key(hdev, &conn->dst);
2159 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2161 switch (params->auto_connect) {
2162 case HCI_AUTO_CONN_LINK_LOSS:
2163 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2167 case HCI_AUTO_CONN_ALWAYS:
2168 hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
2178 hci_proto_disconn_cfm(conn, ev->reason);
2181 /* Re-enable advertising if necessary, since it might
2182 * have been disabled by the connection. From the
2183 * HCI_LE_Set_Advertise_Enable command description in
2184 * the core specification (v4.0):
2185 * "The Controller shall continue advertising until the Host
2186 * issues an LE_Set_Advertise_Enable command with
2187 * Advertising_Enable set to 0x00 (Advertising is disabled)
2188 * or until a connection is created or until the Advertising
2189 * is timed out due to Directed Advertising."
2191 if (type == LE_LINK)
2192 mgmt_reenable_advertising(hdev);
2195 hci_dev_unlock(hdev);
2198 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2200 struct hci_ev_auth_complete *ev = (void *) skb->data;
2201 struct hci_conn *conn;
2203 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2207 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2212 if (!hci_conn_ssp_enabled(conn) &&
2213 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2214 BT_INFO("re-auth of legacy device is not possible.");
2216 conn->link_mode |= HCI_LM_AUTH;
2217 conn->sec_level = conn->pending_sec_level;
2220 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2224 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2225 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2227 if (conn->state == BT_CONFIG) {
2228 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2229 struct hci_cp_set_conn_encrypt cp;
2230 cp.handle = ev->handle;
2232 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2235 conn->state = BT_CONNECTED;
2236 hci_proto_connect_cfm(conn, ev->status);
2237 hci_conn_drop(conn);
2240 hci_auth_cfm(conn, ev->status);
2242 hci_conn_hold(conn);
2243 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2244 hci_conn_drop(conn);
2247 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2249 struct hci_cp_set_conn_encrypt cp;
2250 cp.handle = ev->handle;
2252 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2255 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2256 hci_encrypt_cfm(conn, ev->status, 0x00);
2261 hci_dev_unlock(hdev);
2264 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2266 struct hci_ev_remote_name *ev = (void *) skb->data;
2267 struct hci_conn *conn;
2269 BT_DBG("%s", hdev->name);
2271 hci_conn_check_pending(hdev);
2275 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2277 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2280 if (ev->status == 0)
2281 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2282 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2284 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2290 if (!hci_outgoing_auth_needed(hdev, conn))
2293 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2294 struct hci_cp_auth_requested cp;
2295 cp.handle = __cpu_to_le16(conn->handle);
2296 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2300 hci_dev_unlock(hdev);
2303 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2305 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2306 struct hci_conn *conn;
2308 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2312 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2318 /* Encryption implies authentication */
2319 conn->link_mode |= HCI_LM_AUTH;
2320 conn->link_mode |= HCI_LM_ENCRYPT;
2321 conn->sec_level = conn->pending_sec_level;
2323 /* P-256 authentication key implies FIPS */
2324 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2325 conn->link_mode |= HCI_LM_FIPS;
2327 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2328 conn->type == LE_LINK)
2329 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2331 conn->link_mode &= ~HCI_LM_ENCRYPT;
2332 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2336 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2338 if (ev->status && conn->state == BT_CONNECTED) {
2339 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2340 hci_conn_drop(conn);
2344 if (conn->state == BT_CONFIG) {
2346 conn->state = BT_CONNECTED;
2348 /* In Secure Connections Only mode, do not allow any
2349 * connections that are not encrypted with AES-CCM
2350 * using a P-256 authenticated combination key.
2352 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2353 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2354 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2355 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2356 hci_conn_drop(conn);
2360 hci_proto_connect_cfm(conn, ev->status);
2361 hci_conn_drop(conn);
2363 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2366 hci_dev_unlock(hdev);
2369 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2370 struct sk_buff *skb)
2372 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2373 struct hci_conn *conn;
2375 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2379 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2382 conn->link_mode |= HCI_LM_SECURE;
2384 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2386 hci_key_change_cfm(conn, ev->status);
2389 hci_dev_unlock(hdev);
2392 static void hci_remote_features_evt(struct hci_dev *hdev,
2393 struct sk_buff *skb)
2395 struct hci_ev_remote_features *ev = (void *) skb->data;
2396 struct hci_conn *conn;
2398 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2402 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2407 memcpy(conn->features[0], ev->features, 8);
2409 if (conn->state != BT_CONFIG)
2412 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2413 struct hci_cp_read_remote_ext_features cp;
2414 cp.handle = ev->handle;
2416 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2421 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2422 struct hci_cp_remote_name_req cp;
2423 memset(&cp, 0, sizeof(cp));
2424 bacpy(&cp.bdaddr, &conn->dst);
2425 cp.pscan_rep_mode = 0x02;
2426 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2427 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2428 mgmt_device_connected(hdev, &conn->dst, conn->type,
2429 conn->dst_type, 0, NULL, 0,
2432 if (!hci_outgoing_auth_needed(hdev, conn)) {
2433 conn->state = BT_CONNECTED;
2434 hci_proto_connect_cfm(conn, ev->status);
2435 hci_conn_drop(conn);
2439 hci_dev_unlock(hdev);
2442 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2444 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2445 u8 status = skb->data[sizeof(*ev)];
2448 skb_pull(skb, sizeof(*ev));
2450 opcode = __le16_to_cpu(ev->opcode);
2453 case HCI_OP_INQUIRY_CANCEL:
2454 hci_cc_inquiry_cancel(hdev, skb);
2457 case HCI_OP_PERIODIC_INQ:
2458 hci_cc_periodic_inq(hdev, skb);
2461 case HCI_OP_EXIT_PERIODIC_INQ:
2462 hci_cc_exit_periodic_inq(hdev, skb);
2465 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2466 hci_cc_remote_name_req_cancel(hdev, skb);
2469 case HCI_OP_ROLE_DISCOVERY:
2470 hci_cc_role_discovery(hdev, skb);
2473 case HCI_OP_READ_LINK_POLICY:
2474 hci_cc_read_link_policy(hdev, skb);
2477 case HCI_OP_WRITE_LINK_POLICY:
2478 hci_cc_write_link_policy(hdev, skb);
2481 case HCI_OP_READ_DEF_LINK_POLICY:
2482 hci_cc_read_def_link_policy(hdev, skb);
2485 case HCI_OP_WRITE_DEF_LINK_POLICY:
2486 hci_cc_write_def_link_policy(hdev, skb);
2490 hci_cc_reset(hdev, skb);
2493 case HCI_OP_WRITE_LOCAL_NAME:
2494 hci_cc_write_local_name(hdev, skb);
2497 case HCI_OP_READ_LOCAL_NAME:
2498 hci_cc_read_local_name(hdev, skb);
2501 case HCI_OP_WRITE_AUTH_ENABLE:
2502 hci_cc_write_auth_enable(hdev, skb);
2505 case HCI_OP_WRITE_ENCRYPT_MODE:
2506 hci_cc_write_encrypt_mode(hdev, skb);
2509 case HCI_OP_WRITE_SCAN_ENABLE:
2510 hci_cc_write_scan_enable(hdev, skb);
2513 case HCI_OP_READ_CLASS_OF_DEV:
2514 hci_cc_read_class_of_dev(hdev, skb);
2517 case HCI_OP_WRITE_CLASS_OF_DEV:
2518 hci_cc_write_class_of_dev(hdev, skb);
2521 case HCI_OP_READ_VOICE_SETTING:
2522 hci_cc_read_voice_setting(hdev, skb);
2525 case HCI_OP_WRITE_VOICE_SETTING:
2526 hci_cc_write_voice_setting(hdev, skb);
2529 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2530 hci_cc_read_num_supported_iac(hdev, skb);
2533 case HCI_OP_WRITE_SSP_MODE:
2534 hci_cc_write_ssp_mode(hdev, skb);
2537 case HCI_OP_WRITE_SC_SUPPORT:
2538 hci_cc_write_sc_support(hdev, skb);
2541 case HCI_OP_READ_LOCAL_VERSION:
2542 hci_cc_read_local_version(hdev, skb);
2545 case HCI_OP_READ_LOCAL_COMMANDS:
2546 hci_cc_read_local_commands(hdev, skb);
2549 case HCI_OP_READ_LOCAL_FEATURES:
2550 hci_cc_read_local_features(hdev, skb);
2553 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2554 hci_cc_read_local_ext_features(hdev, skb);
2557 case HCI_OP_READ_BUFFER_SIZE:
2558 hci_cc_read_buffer_size(hdev, skb);
2561 case HCI_OP_READ_BD_ADDR:
2562 hci_cc_read_bd_addr(hdev, skb);
2565 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2566 hci_cc_read_page_scan_activity(hdev, skb);
2569 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2570 hci_cc_write_page_scan_activity(hdev, skb);
2573 case HCI_OP_READ_PAGE_SCAN_TYPE:
2574 hci_cc_read_page_scan_type(hdev, skb);
2577 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2578 hci_cc_write_page_scan_type(hdev, skb);
2581 case HCI_OP_READ_DATA_BLOCK_SIZE:
2582 hci_cc_read_data_block_size(hdev, skb);
2585 case HCI_OP_READ_FLOW_CONTROL_MODE:
2586 hci_cc_read_flow_control_mode(hdev, skb);
2589 case HCI_OP_READ_LOCAL_AMP_INFO:
2590 hci_cc_read_local_amp_info(hdev, skb);
2593 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2594 hci_cc_read_local_amp_assoc(hdev, skb);
2597 case HCI_OP_READ_INQ_RSP_TX_POWER:
2598 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2601 case HCI_OP_PIN_CODE_REPLY:
2602 hci_cc_pin_code_reply(hdev, skb);
2605 case HCI_OP_PIN_CODE_NEG_REPLY:
2606 hci_cc_pin_code_neg_reply(hdev, skb);
2609 case HCI_OP_READ_LOCAL_OOB_DATA:
2610 hci_cc_read_local_oob_data(hdev, skb);
2613 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2614 hci_cc_read_local_oob_ext_data(hdev, skb);
2617 case HCI_OP_LE_READ_BUFFER_SIZE:
2618 hci_cc_le_read_buffer_size(hdev, skb);
2621 case HCI_OP_LE_READ_LOCAL_FEATURES:
2622 hci_cc_le_read_local_features(hdev, skb);
2625 case HCI_OP_LE_READ_ADV_TX_POWER:
2626 hci_cc_le_read_adv_tx_power(hdev, skb);
2629 case HCI_OP_USER_CONFIRM_REPLY:
2630 hci_cc_user_confirm_reply(hdev, skb);
2633 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2634 hci_cc_user_confirm_neg_reply(hdev, skb);
2637 case HCI_OP_USER_PASSKEY_REPLY:
2638 hci_cc_user_passkey_reply(hdev, skb);
2641 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2642 hci_cc_user_passkey_neg_reply(hdev, skb);
2645 case HCI_OP_LE_SET_RANDOM_ADDR:
2646 hci_cc_le_set_random_addr(hdev, skb);
2649 case HCI_OP_LE_SET_ADV_ENABLE:
2650 hci_cc_le_set_adv_enable(hdev, skb);
2653 case HCI_OP_LE_SET_SCAN_PARAM:
2654 hci_cc_le_set_scan_param(hdev, skb);
2657 case HCI_OP_LE_SET_SCAN_ENABLE:
2658 hci_cc_le_set_scan_enable(hdev, skb);
2661 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2662 hci_cc_le_read_white_list_size(hdev, skb);
2665 case HCI_OP_LE_CLEAR_WHITE_LIST:
2666 hci_cc_le_clear_white_list(hdev, skb);
2669 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2670 hci_cc_le_add_to_white_list(hdev, skb);
2673 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2674 hci_cc_le_del_from_white_list(hdev, skb);
2677 case HCI_OP_LE_READ_SUPPORTED_STATES:
2678 hci_cc_le_read_supported_states(hdev, skb);
2681 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2682 hci_cc_write_le_host_supported(hdev, skb);
2685 case HCI_OP_LE_SET_ADV_PARAM:
2686 hci_cc_set_adv_param(hdev, skb);
2689 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2690 hci_cc_write_remote_amp_assoc(hdev, skb);
2693 case HCI_OP_READ_RSSI:
2694 hci_cc_read_rssi(hdev, skb);
2697 case HCI_OP_READ_TX_POWER:
2698 hci_cc_read_tx_power(hdev, skb);
2702 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2706 if (opcode != HCI_OP_NOP)
2707 del_timer(&hdev->cmd_timer);
2709 hci_req_cmd_complete(hdev, opcode, status);
2711 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2712 atomic_set(&hdev->cmd_cnt, 1);
2713 if (!skb_queue_empty(&hdev->cmd_q))
2714 queue_work(hdev->workqueue, &hdev->cmd_work);
2718 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2720 struct hci_ev_cmd_status *ev = (void *) skb->data;
2723 skb_pull(skb, sizeof(*ev));
2725 opcode = __le16_to_cpu(ev->opcode);
2728 case HCI_OP_INQUIRY:
2729 hci_cs_inquiry(hdev, ev->status);
2732 case HCI_OP_CREATE_CONN:
2733 hci_cs_create_conn(hdev, ev->status);
2736 case HCI_OP_ADD_SCO:
2737 hci_cs_add_sco(hdev, ev->status);
2740 case HCI_OP_AUTH_REQUESTED:
2741 hci_cs_auth_requested(hdev, ev->status);
2744 case HCI_OP_SET_CONN_ENCRYPT:
2745 hci_cs_set_conn_encrypt(hdev, ev->status);
2748 case HCI_OP_REMOTE_NAME_REQ:
2749 hci_cs_remote_name_req(hdev, ev->status);
2752 case HCI_OP_READ_REMOTE_FEATURES:
2753 hci_cs_read_remote_features(hdev, ev->status);
2756 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2757 hci_cs_read_remote_ext_features(hdev, ev->status);
2760 case HCI_OP_SETUP_SYNC_CONN:
2761 hci_cs_setup_sync_conn(hdev, ev->status);
2764 case HCI_OP_SNIFF_MODE:
2765 hci_cs_sniff_mode(hdev, ev->status);
2768 case HCI_OP_EXIT_SNIFF_MODE:
2769 hci_cs_exit_sniff_mode(hdev, ev->status);
2772 case HCI_OP_DISCONNECT:
2773 hci_cs_disconnect(hdev, ev->status);
2776 case HCI_OP_CREATE_PHY_LINK:
2777 hci_cs_create_phylink(hdev, ev->status);
2780 case HCI_OP_ACCEPT_PHY_LINK:
2781 hci_cs_accept_phylink(hdev, ev->status);
2784 case HCI_OP_LE_CREATE_CONN:
2785 hci_cs_le_create_conn(hdev, ev->status);
2788 case HCI_OP_LE_START_ENC:
2789 hci_cs_le_start_enc(hdev, ev->status);
2793 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2797 if (opcode != HCI_OP_NOP)
2798 del_timer(&hdev->cmd_timer);
2801 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2802 hci_req_cmd_complete(hdev, opcode, ev->status);
2804 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2805 atomic_set(&hdev->cmd_cnt, 1);
2806 if (!skb_queue_empty(&hdev->cmd_q))
2807 queue_work(hdev->workqueue, &hdev->cmd_work);
2811 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2813 struct hci_ev_role_change *ev = (void *) skb->data;
2814 struct hci_conn *conn;
2816 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2820 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2824 conn->link_mode &= ~HCI_LM_MASTER;
2826 conn->link_mode |= HCI_LM_MASTER;
2829 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2831 hci_role_switch_cfm(conn, ev->status, ev->role);
2834 hci_dev_unlock(hdev);
2837 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2839 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2842 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2843 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2847 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2848 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2849 BT_DBG("%s bad parameters", hdev->name);
2853 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2855 for (i = 0; i < ev->num_hndl; i++) {
2856 struct hci_comp_pkts_info *info = &ev->handles[i];
2857 struct hci_conn *conn;
2858 __u16 handle, count;
2860 handle = __le16_to_cpu(info->handle);
2861 count = __le16_to_cpu(info->count);
2863 conn = hci_conn_hash_lookup_handle(hdev, handle);
2867 conn->sent -= count;
2869 switch (conn->type) {
2871 hdev->acl_cnt += count;
2872 if (hdev->acl_cnt > hdev->acl_pkts)
2873 hdev->acl_cnt = hdev->acl_pkts;
2877 if (hdev->le_pkts) {
2878 hdev->le_cnt += count;
2879 if (hdev->le_cnt > hdev->le_pkts)
2880 hdev->le_cnt = hdev->le_pkts;
2882 hdev->acl_cnt += count;
2883 if (hdev->acl_cnt > hdev->acl_pkts)
2884 hdev->acl_cnt = hdev->acl_pkts;
2889 hdev->sco_cnt += count;
2890 if (hdev->sco_cnt > hdev->sco_pkts)
2891 hdev->sco_cnt = hdev->sco_pkts;
2895 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2900 queue_work(hdev->workqueue, &hdev->tx_work);
2903 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2906 struct hci_chan *chan;
2908 switch (hdev->dev_type) {
2910 return hci_conn_hash_lookup_handle(hdev, handle);
2912 chan = hci_chan_lookup_handle(hdev, handle);
2917 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2924 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2926 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2929 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2930 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2934 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2935 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2936 BT_DBG("%s bad parameters", hdev->name);
2940 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2943 for (i = 0; i < ev->num_hndl; i++) {
2944 struct hci_comp_blocks_info *info = &ev->handles[i];
2945 struct hci_conn *conn = NULL;
2946 __u16 handle, block_count;
2948 handle = __le16_to_cpu(info->handle);
2949 block_count = __le16_to_cpu(info->blocks);
2951 conn = __hci_conn_lookup_handle(hdev, handle);
2955 conn->sent -= block_count;
2957 switch (conn->type) {
2960 hdev->block_cnt += block_count;
2961 if (hdev->block_cnt > hdev->num_blocks)
2962 hdev->block_cnt = hdev->num_blocks;
2966 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2971 queue_work(hdev->workqueue, &hdev->tx_work);
2974 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2976 struct hci_ev_mode_change *ev = (void *) skb->data;
2977 struct hci_conn *conn;
2979 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2983 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2985 conn->mode = ev->mode;
2987 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2989 if (conn->mode == HCI_CM_ACTIVE)
2990 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2992 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2995 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2996 hci_sco_setup(conn, ev->status);
2999 hci_dev_unlock(hdev);
3002 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3004 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3005 struct hci_conn *conn;
3007 BT_DBG("%s", hdev->name);
3011 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3015 if (conn->state == BT_CONNECTED) {
3016 hci_conn_hold(conn);
3017 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3018 hci_conn_drop(conn);
3021 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3022 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3023 sizeof(ev->bdaddr), &ev->bdaddr);
3024 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3027 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3032 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3036 hci_dev_unlock(hdev);
3039 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3041 struct hci_ev_link_key_req *ev = (void *) skb->data;
3042 struct hci_cp_link_key_reply cp;
3043 struct hci_conn *conn;
3044 struct link_key *key;
3046 BT_DBG("%s", hdev->name);
3048 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3053 key = hci_find_link_key(hdev, &ev->bdaddr);
3055 BT_DBG("%s link key not found for %pMR", hdev->name,
3060 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3063 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
3064 key->type == HCI_LK_DEBUG_COMBINATION) {
3065 BT_DBG("%s ignoring debug key", hdev->name);
3069 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3071 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3072 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3073 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3074 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3078 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3079 conn->pending_sec_level == BT_SECURITY_HIGH) {
3080 BT_DBG("%s ignoring key unauthenticated for high security",
3085 conn->key_type = key->type;
3086 conn->pin_length = key->pin_len;
3089 bacpy(&cp.bdaddr, &ev->bdaddr);
3090 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3092 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3094 hci_dev_unlock(hdev);
3099 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3100 hci_dev_unlock(hdev);
3103 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3105 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3106 struct hci_conn *conn;
3109 BT_DBG("%s", hdev->name);
3113 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3115 hci_conn_hold(conn);
3116 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3117 pin_len = conn->pin_length;
3119 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3120 conn->key_type = ev->key_type;
3122 hci_conn_drop(conn);
3125 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3126 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
3127 ev->key_type, pin_len);
3129 hci_dev_unlock(hdev);
3132 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3134 struct hci_ev_clock_offset *ev = (void *) skb->data;
3135 struct hci_conn *conn;
3137 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3141 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3142 if (conn && !ev->status) {
3143 struct inquiry_entry *ie;
3145 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3147 ie->data.clock_offset = ev->clock_offset;
3148 ie->timestamp = jiffies;
3152 hci_dev_unlock(hdev);
3155 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3157 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3158 struct hci_conn *conn;
3160 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3164 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3165 if (conn && !ev->status)
3166 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3168 hci_dev_unlock(hdev);
3171 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3173 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3174 struct inquiry_entry *ie;
3176 BT_DBG("%s", hdev->name);
3180 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3182 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3183 ie->timestamp = jiffies;
3186 hci_dev_unlock(hdev);
3189 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3190 struct sk_buff *skb)
3192 struct inquiry_data data;
3193 int num_rsp = *((__u8 *) skb->data);
3194 bool name_known, ssp;
3196 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3201 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3206 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3207 struct inquiry_info_with_rssi_and_pscan_mode *info;
3208 info = (void *) (skb->data + 1);
3210 for (; num_rsp; num_rsp--, info++) {
3211 bacpy(&data.bdaddr, &info->bdaddr);
3212 data.pscan_rep_mode = info->pscan_rep_mode;
3213 data.pscan_period_mode = info->pscan_period_mode;
3214 data.pscan_mode = info->pscan_mode;
3215 memcpy(data.dev_class, info->dev_class, 3);
3216 data.clock_offset = info->clock_offset;
3217 data.rssi = info->rssi;
3218 data.ssp_mode = 0x00;
3220 name_known = hci_inquiry_cache_update(hdev, &data,
3222 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3223 info->dev_class, info->rssi,
3224 !name_known, ssp, NULL, 0, NULL, 0);
3227 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3229 for (; num_rsp; num_rsp--, info++) {
3230 bacpy(&data.bdaddr, &info->bdaddr);
3231 data.pscan_rep_mode = info->pscan_rep_mode;
3232 data.pscan_period_mode = info->pscan_period_mode;
3233 data.pscan_mode = 0x00;
3234 memcpy(data.dev_class, info->dev_class, 3);
3235 data.clock_offset = info->clock_offset;
3236 data.rssi = info->rssi;
3237 data.ssp_mode = 0x00;
3238 name_known = hci_inquiry_cache_update(hdev, &data,
3240 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3241 info->dev_class, info->rssi,
3242 !name_known, ssp, NULL, 0, NULL, 0);
3246 hci_dev_unlock(hdev);
3249 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3250 struct sk_buff *skb)
3252 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3253 struct hci_conn *conn;
3255 BT_DBG("%s", hdev->name);
3259 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3263 if (ev->page < HCI_MAX_PAGES)
3264 memcpy(conn->features[ev->page], ev->features, 8);
3266 if (!ev->status && ev->page == 0x01) {
3267 struct inquiry_entry *ie;
3269 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3271 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3273 if (ev->features[0] & LMP_HOST_SSP) {
3274 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3276 /* It is mandatory by the Bluetooth specification that
3277 * Extended Inquiry Results are only used when Secure
3278 * Simple Pairing is enabled, but some devices violate
3281 * To make these devices work, the internal SSP
3282 * enabled flag needs to be cleared if the remote host
3283 * features do not indicate SSP support */
3284 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3287 if (ev->features[0] & LMP_HOST_SC)
3288 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3291 if (conn->state != BT_CONFIG)
3294 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3295 struct hci_cp_remote_name_req cp;
3296 memset(&cp, 0, sizeof(cp));
3297 bacpy(&cp.bdaddr, &conn->dst);
3298 cp.pscan_rep_mode = 0x02;
3299 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3300 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3301 mgmt_device_connected(hdev, &conn->dst, conn->type,
3302 conn->dst_type, 0, NULL, 0,
3305 if (!hci_outgoing_auth_needed(hdev, conn)) {
3306 conn->state = BT_CONNECTED;
3307 hci_proto_connect_cfm(conn, ev->status);
3308 hci_conn_drop(conn);
3312 hci_dev_unlock(hdev);
3315 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3316 struct sk_buff *skb)
3318 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3319 struct hci_conn *conn;
3321 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3325 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3327 if (ev->link_type == ESCO_LINK)
3330 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3334 conn->type = SCO_LINK;
3337 switch (ev->status) {
3339 conn->handle = __le16_to_cpu(ev->handle);
3340 conn->state = BT_CONNECTED;
3342 hci_conn_add_sysfs(conn);
3345 case 0x0d: /* Connection Rejected due to Limited Resources */
3346 case 0x11: /* Unsupported Feature or Parameter Value */
3347 case 0x1c: /* SCO interval rejected */
3348 case 0x1a: /* Unsupported Remote Feature */
3349 case 0x1f: /* Unspecified error */
3350 case 0x20: /* Unsupported LMP Parameter value */
3352 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3353 (hdev->esco_type & EDR_ESCO_MASK);
3354 if (hci_setup_sync(conn, conn->link->handle))
3360 conn->state = BT_CLOSED;
3364 hci_proto_connect_cfm(conn, ev->status);
3369 hci_dev_unlock(hdev);
3372 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3376 while (parsed < eir_len) {
3377 u8 field_len = eir[0];
3382 parsed += field_len + 1;
3383 eir += field_len + 1;
3389 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3390 struct sk_buff *skb)
3392 struct inquiry_data data;
3393 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3394 int num_rsp = *((__u8 *) skb->data);
3397 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3402 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3407 for (; num_rsp; num_rsp--, info++) {
3408 bool name_known, ssp;
3410 bacpy(&data.bdaddr, &info->bdaddr);
3411 data.pscan_rep_mode = info->pscan_rep_mode;
3412 data.pscan_period_mode = info->pscan_period_mode;
3413 data.pscan_mode = 0x00;
3414 memcpy(data.dev_class, info->dev_class, 3);
3415 data.clock_offset = info->clock_offset;
3416 data.rssi = info->rssi;
3417 data.ssp_mode = 0x01;
3419 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3420 name_known = eir_has_data_type(info->data,
3426 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3428 eir_len = eir_get_length(info->data, sizeof(info->data));
3429 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3430 info->dev_class, info->rssi, !name_known,
3431 ssp, info->data, eir_len, NULL, 0);
3434 hci_dev_unlock(hdev);
3437 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3438 struct sk_buff *skb)
3440 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3441 struct hci_conn *conn;
3443 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3444 __le16_to_cpu(ev->handle));
3448 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3452 /* For BR/EDR the necessary steps are taken through the
3453 * auth_complete event.
3455 if (conn->type != LE_LINK)
3459 conn->sec_level = conn->pending_sec_level;
3461 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3463 if (ev->status && conn->state == BT_CONNECTED) {
3464 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3465 hci_conn_drop(conn);
3469 if (conn->state == BT_CONFIG) {
3471 conn->state = BT_CONNECTED;
3473 hci_proto_connect_cfm(conn, ev->status);
3474 hci_conn_drop(conn);
3476 hci_auth_cfm(conn, ev->status);
3478 hci_conn_hold(conn);
3479 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3480 hci_conn_drop(conn);
3484 hci_dev_unlock(hdev);
3487 static u8 hci_get_auth_req(struct hci_conn *conn)
3489 /* If remote requests no-bonding follow that lead */
3490 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3491 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3492 return conn->remote_auth | (conn->auth_type & 0x01);
3494 /* If both remote and local have enough IO capabilities, require
3497 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3498 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3499 return conn->remote_auth | 0x01;
3501 /* No MITM protection possible so ignore remote requirement */
3502 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3505 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3507 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3508 struct hci_conn *conn;
3510 BT_DBG("%s", hdev->name);
3514 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3518 hci_conn_hold(conn);
3520 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3523 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3524 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3525 struct hci_cp_io_capability_reply cp;
3527 bacpy(&cp.bdaddr, &ev->bdaddr);
3528 /* Change the IO capability from KeyboardDisplay
3529 * to DisplayYesNo as it is not supported by BT spec. */
3530 cp.capability = (conn->io_capability == 0x04) ?
3531 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3533 /* If we are initiators, there is no remote information yet */
3534 if (conn->remote_auth == 0xff) {
3535 cp.authentication = conn->auth_type;
3537 /* Request MITM protection if our IO caps allow it
3538 * except for the no-bonding case
3540 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3541 cp.authentication != HCI_AT_NO_BONDING)
3542 cp.authentication |= 0x01;
3544 conn->auth_type = hci_get_auth_req(conn);
3545 cp.authentication = conn->auth_type;
3548 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3549 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3554 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3557 struct hci_cp_io_capability_neg_reply cp;
3559 bacpy(&cp.bdaddr, &ev->bdaddr);
3560 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3562 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3567 hci_dev_unlock(hdev);
3570 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3572 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3573 struct hci_conn *conn;
3575 BT_DBG("%s", hdev->name);
3579 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3583 conn->remote_cap = ev->capability;
3584 conn->remote_auth = ev->authentication;
3586 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3589 hci_dev_unlock(hdev);
3592 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3593 struct sk_buff *skb)
3595 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3596 int loc_mitm, rem_mitm, confirm_hint = 0;
3597 struct hci_conn *conn;
3599 BT_DBG("%s", hdev->name);
3603 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3606 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3610 loc_mitm = (conn->auth_type & 0x01);
3611 rem_mitm = (conn->remote_auth & 0x01);
3613 /* If we require MITM but the remote device can't provide that
3614 * (it has NoInputNoOutput) then reject the confirmation request
3616 if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3617 BT_DBG("Rejecting request: remote device can't provide MITM");
3618 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3619 sizeof(ev->bdaddr), &ev->bdaddr);
3623 /* If no side requires MITM protection; auto-accept */
3624 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3625 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3627 /* If we're not the initiators request authorization to
3628 * proceed from user space (mgmt_user_confirm with
3629 * confirm_hint set to 1). */
3630 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3631 BT_DBG("Confirming auto-accept as acceptor");
3636 BT_DBG("Auto-accept of user confirmation with %ums delay",
3637 hdev->auto_accept_delay);
3639 if (hdev->auto_accept_delay > 0) {
3640 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3641 queue_delayed_work(conn->hdev->workqueue,
3642 &conn->auto_accept_work, delay);
3646 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3647 sizeof(ev->bdaddr), &ev->bdaddr);
3652 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3653 le32_to_cpu(ev->passkey), confirm_hint);
3656 hci_dev_unlock(hdev);
3659 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3660 struct sk_buff *skb)
3662 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3664 BT_DBG("%s", hdev->name);
3666 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3667 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3670 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3671 struct sk_buff *skb)
3673 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3674 struct hci_conn *conn;
3676 BT_DBG("%s", hdev->name);
3678 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3682 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3683 conn->passkey_entered = 0;
3685 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3686 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3687 conn->dst_type, conn->passkey_notify,
3688 conn->passkey_entered);
3691 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3693 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3694 struct hci_conn *conn;
3696 BT_DBG("%s", hdev->name);
3698 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3703 case HCI_KEYPRESS_STARTED:
3704 conn->passkey_entered = 0;
3707 case HCI_KEYPRESS_ENTERED:
3708 conn->passkey_entered++;
3711 case HCI_KEYPRESS_ERASED:
3712 conn->passkey_entered--;
3715 case HCI_KEYPRESS_CLEARED:
3716 conn->passkey_entered = 0;
3719 case HCI_KEYPRESS_COMPLETED:
3723 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3724 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3725 conn->dst_type, conn->passkey_notify,
3726 conn->passkey_entered);
3729 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3730 struct sk_buff *skb)
3732 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3733 struct hci_conn *conn;
3735 BT_DBG("%s", hdev->name);
3739 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3743 /* To avoid duplicate auth_failed events to user space we check
3744 * the HCI_CONN_AUTH_PEND flag which will be set if we
3745 * initiated the authentication. A traditional auth_complete
3746 * event gets always produced as initiator and is also mapped to
3747 * the mgmt_auth_failed event */
3748 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3749 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3752 hci_conn_drop(conn);
3755 hci_dev_unlock(hdev);
3758 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3759 struct sk_buff *skb)
3761 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3762 struct inquiry_entry *ie;
3763 struct hci_conn *conn;
3765 BT_DBG("%s", hdev->name);
3769 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3771 memcpy(conn->features[1], ev->features, 8);
3773 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3775 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3777 hci_dev_unlock(hdev);
3780 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3781 struct sk_buff *skb)
3783 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3784 struct oob_data *data;
3786 BT_DBG("%s", hdev->name);
3790 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3793 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3795 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3796 struct hci_cp_remote_oob_ext_data_reply cp;
3798 bacpy(&cp.bdaddr, &ev->bdaddr);
3799 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3800 memcpy(cp.randomizer192, data->randomizer192,
3801 sizeof(cp.randomizer192));
3802 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3803 memcpy(cp.randomizer256, data->randomizer256,
3804 sizeof(cp.randomizer256));
3806 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3809 struct hci_cp_remote_oob_data_reply cp;
3811 bacpy(&cp.bdaddr, &ev->bdaddr);
3812 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3813 memcpy(cp.randomizer, data->randomizer192,
3814 sizeof(cp.randomizer));
3816 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3820 struct hci_cp_remote_oob_data_neg_reply cp;
3822 bacpy(&cp.bdaddr, &ev->bdaddr);
3823 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3828 hci_dev_unlock(hdev);
3831 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3832 struct sk_buff *skb)
3834 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3835 struct hci_conn *hcon, *bredr_hcon;
3837 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3842 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3844 hci_dev_unlock(hdev);
3850 hci_dev_unlock(hdev);
3854 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3856 hcon->state = BT_CONNECTED;
3857 bacpy(&hcon->dst, &bredr_hcon->dst);
3859 hci_conn_hold(hcon);
3860 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3861 hci_conn_drop(hcon);
3863 hci_conn_add_sysfs(hcon);
3865 amp_physical_cfm(bredr_hcon, hcon);
3867 hci_dev_unlock(hdev);
3870 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3872 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3873 struct hci_conn *hcon;
3874 struct hci_chan *hchan;
3875 struct amp_mgr *mgr;
3877 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3878 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3881 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3885 /* Create AMP hchan */
3886 hchan = hci_chan_create(hcon);
3890 hchan->handle = le16_to_cpu(ev->handle);
3892 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3894 mgr = hcon->amp_mgr;
3895 if (mgr && mgr->bredr_chan) {
3896 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3898 l2cap_chan_lock(bredr_chan);
3900 bredr_chan->conn->mtu = hdev->block_mtu;
3901 l2cap_logical_cfm(bredr_chan, hchan, 0);
3902 hci_conn_hold(hcon);
3904 l2cap_chan_unlock(bredr_chan);
3908 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3909 struct sk_buff *skb)
3911 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3912 struct hci_chan *hchan;
3914 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3915 le16_to_cpu(ev->handle), ev->status);
3922 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3926 amp_destroy_logical_link(hchan, ev->reason);
3929 hci_dev_unlock(hdev);
3932 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3933 struct sk_buff *skb)
3935 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3936 struct hci_conn *hcon;
3938 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3945 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3947 hcon->state = BT_CLOSED;
3951 hci_dev_unlock(hdev);
3954 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3956 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3957 struct hci_conn *conn;
3958 struct smp_irk *irk;
3960 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3964 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3966 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3968 BT_ERR("No memory for new connection");
3972 conn->dst_type = ev->bdaddr_type;
3974 if (ev->role == LE_CONN_ROLE_MASTER) {
3976 conn->link_mode |= HCI_LM_MASTER;
3979 /* If we didn't have a hci_conn object previously
3980 * but we're in master role this must be something
3981 * initiated using a white list. Since white list based
3982 * connections are not "first class citizens" we don't
3983 * have full tracking of them. Therefore, we go ahead
3984 * with a "best effort" approach of determining the
3985 * initiator address based on the HCI_PRIVACY flag.
3988 conn->resp_addr_type = ev->bdaddr_type;
3989 bacpy(&conn->resp_addr, &ev->bdaddr);
3990 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3991 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
3992 bacpy(&conn->init_addr, &hdev->rpa);
3994 hci_copy_identity_address(hdev,
3996 &conn->init_addr_type);
4000 cancel_delayed_work(&conn->le_conn_timeout);
4004 /* Set the responder (our side) address type based on
4005 * the advertising address type.
4007 conn->resp_addr_type = hdev->adv_addr_type;
4008 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4009 bacpy(&conn->resp_addr, &hdev->random_addr);
4011 bacpy(&conn->resp_addr, &hdev->bdaddr);
4013 conn->init_addr_type = ev->bdaddr_type;
4014 bacpy(&conn->init_addr, &ev->bdaddr);
4017 /* Lookup the identity address from the stored connection
4018 * address and address type.
4020 * When establishing connections to an identity address, the
4021 * connection procedure will store the resolvable random
4022 * address first. Now if it can be converted back into the
4023 * identity address, start using the identity address from
4026 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4028 bacpy(&conn->dst, &irk->bdaddr);
4029 conn->dst_type = irk->addr_type;
4033 hci_le_conn_failed(conn, ev->status);
4037 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4038 mgmt_device_connected(hdev, &conn->dst, conn->type,
4039 conn->dst_type, 0, NULL, 0, NULL);
4041 conn->sec_level = BT_SECURITY_LOW;
4042 conn->handle = __le16_to_cpu(ev->handle);
4043 conn->state = BT_CONNECTED;
4045 if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
4046 set_bit(HCI_CONN_6LOWPAN, &conn->flags);
4048 hci_conn_add_sysfs(conn);
4050 hci_proto_connect_cfm(conn, ev->status);
4052 hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
4055 hci_dev_unlock(hdev);
4058 /* This function requires the caller holds hdev->lock */
4059 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4062 struct hci_conn *conn;
4063 struct smp_irk *irk;
4065 /* If this is a resolvable address, we should resolve it and then
4066 * update address and address type variables.
4068 irk = hci_get_irk(hdev, addr, addr_type);
4070 addr = &irk->bdaddr;
4071 addr_type = irk->addr_type;
4074 if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
4077 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4082 switch (PTR_ERR(conn)) {
4084 /* If hci_connect() returns -EBUSY it means there is already
4085 * an LE connection attempt going on. Since controllers don't
4086 * support more than one connection attempt at the time, we
4087 * don't consider this an error case.
4091 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4095 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4096 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4098 struct discovery_state *d = &hdev->discovery;
4101 /* Passive scanning shouldn't trigger any device found events */
4102 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4103 if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
4104 check_pending_le_conn(hdev, bdaddr, bdaddr_type);
4108 /* If there's nothing pending either store the data from this
4109 * event or send an immediate device found event if the data
4110 * should not be stored for later.
4112 if (!has_pending_adv_report(hdev)) {
4113 /* If the report will trigger a SCAN_REQ store it for
4116 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4117 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4122 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4123 rssi, 0, 1, data, len, NULL, 0);
4127 /* Check if the pending report is for the same device as the new one */
4128 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4129 bdaddr_type == d->last_adv_addr_type);
4131 /* If the pending data doesn't match this report or this isn't a
4132 * scan response (e.g. we got a duplicate ADV_IND) then force
4133 * sending of the pending data.
4135 if (type != LE_ADV_SCAN_RSP || !match) {
4136 /* Send out whatever is in the cache, but skip duplicates */
4138 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4139 d->last_adv_addr_type, NULL,
4140 d->last_adv_rssi, 0, 1,
4142 d->last_adv_data_len, NULL, 0);
4144 /* If the new report will trigger a SCAN_REQ store it for
4147 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4148 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4153 /* The advertising reports cannot be merged, so clear
4154 * the pending report and send out a device found event.
4156 clear_pending_adv_report(hdev);
4157 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4158 rssi, 0, 1, data, len, NULL, 0);
4162 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4163 * the new event is a SCAN_RSP. We can therefore proceed with
4164 * sending a merged device found event.
4166 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4167 d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
4168 d->last_adv_data, d->last_adv_data_len);
4169 clear_pending_adv_report(hdev);
4172 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4174 u8 num_reports = skb->data[0];
4175 void *ptr = &skb->data[1];
4179 while (num_reports--) {
4180 struct hci_ev_le_advertising_info *ev = ptr;
4183 rssi = ev->data[ev->length];
4184 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4185 ev->bdaddr_type, rssi, ev->data, ev->length);
4187 ptr += sizeof(*ev) + ev->length + 1;
4190 hci_dev_unlock(hdev);
4193 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4195 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4196 struct hci_cp_le_ltk_reply cp;
4197 struct hci_cp_le_ltk_neg_reply neg;
4198 struct hci_conn *conn;
4199 struct smp_ltk *ltk;
4201 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4205 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4209 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4213 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4214 cp.handle = cpu_to_le16(conn->handle);
4216 if (ltk->authenticated)
4217 conn->pending_sec_level = BT_SECURITY_HIGH;
4219 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4221 conn->enc_key_size = ltk->enc_size;
4223 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4225 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4226 * temporary key used to encrypt a connection following
4227 * pairing. It is used during the Encrypted Session Setup to
4228 * distribute the keys. Later, security can be re-established
4229 * using a distributed LTK.
4231 if (ltk->type == HCI_SMP_STK_SLAVE) {
4232 list_del(<k->list);
4236 hci_dev_unlock(hdev);
4241 neg.handle = ev->handle;
4242 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4243 hci_dev_unlock(hdev);
4246 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4248 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4250 skb_pull(skb, sizeof(*le_ev));
4252 switch (le_ev->subevent) {
4253 case HCI_EV_LE_CONN_COMPLETE:
4254 hci_le_conn_complete_evt(hdev, skb);
4257 case HCI_EV_LE_ADVERTISING_REPORT:
4258 hci_le_adv_report_evt(hdev, skb);
4261 case HCI_EV_LE_LTK_REQ:
4262 hci_le_ltk_request_evt(hdev, skb);
4270 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4272 struct hci_ev_channel_selected *ev = (void *) skb->data;
4273 struct hci_conn *hcon;
4275 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4277 skb_pull(skb, sizeof(*ev));
4279 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4283 amp_read_loc_assoc_final_data(hdev, hcon);
4286 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4288 struct hci_event_hdr *hdr = (void *) skb->data;
4289 __u8 event = hdr->evt;
4293 /* Received events are (currently) only needed when a request is
4294 * ongoing so avoid unnecessary memory allocation.
4296 if (hdev->req_status == HCI_REQ_PEND) {
4297 kfree_skb(hdev->recv_evt);
4298 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4301 hci_dev_unlock(hdev);
4303 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4305 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4306 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4307 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4309 hci_req_cmd_complete(hdev, opcode, 0);
4313 case HCI_EV_INQUIRY_COMPLETE:
4314 hci_inquiry_complete_evt(hdev, skb);
4317 case HCI_EV_INQUIRY_RESULT:
4318 hci_inquiry_result_evt(hdev, skb);
4321 case HCI_EV_CONN_COMPLETE:
4322 hci_conn_complete_evt(hdev, skb);
4325 case HCI_EV_CONN_REQUEST:
4326 hci_conn_request_evt(hdev, skb);
4329 case HCI_EV_DISCONN_COMPLETE:
4330 hci_disconn_complete_evt(hdev, skb);
4333 case HCI_EV_AUTH_COMPLETE:
4334 hci_auth_complete_evt(hdev, skb);
4337 case HCI_EV_REMOTE_NAME:
4338 hci_remote_name_evt(hdev, skb);
4341 case HCI_EV_ENCRYPT_CHANGE:
4342 hci_encrypt_change_evt(hdev, skb);
4345 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4346 hci_change_link_key_complete_evt(hdev, skb);
4349 case HCI_EV_REMOTE_FEATURES:
4350 hci_remote_features_evt(hdev, skb);
4353 case HCI_EV_CMD_COMPLETE:
4354 hci_cmd_complete_evt(hdev, skb);
4357 case HCI_EV_CMD_STATUS:
4358 hci_cmd_status_evt(hdev, skb);
4361 case HCI_EV_ROLE_CHANGE:
4362 hci_role_change_evt(hdev, skb);
4365 case HCI_EV_NUM_COMP_PKTS:
4366 hci_num_comp_pkts_evt(hdev, skb);
4369 case HCI_EV_MODE_CHANGE:
4370 hci_mode_change_evt(hdev, skb);
4373 case HCI_EV_PIN_CODE_REQ:
4374 hci_pin_code_request_evt(hdev, skb);
4377 case HCI_EV_LINK_KEY_REQ:
4378 hci_link_key_request_evt(hdev, skb);
4381 case HCI_EV_LINK_KEY_NOTIFY:
4382 hci_link_key_notify_evt(hdev, skb);
4385 case HCI_EV_CLOCK_OFFSET:
4386 hci_clock_offset_evt(hdev, skb);
4389 case HCI_EV_PKT_TYPE_CHANGE:
4390 hci_pkt_type_change_evt(hdev, skb);
4393 case HCI_EV_PSCAN_REP_MODE:
4394 hci_pscan_rep_mode_evt(hdev, skb);
4397 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4398 hci_inquiry_result_with_rssi_evt(hdev, skb);
4401 case HCI_EV_REMOTE_EXT_FEATURES:
4402 hci_remote_ext_features_evt(hdev, skb);
4405 case HCI_EV_SYNC_CONN_COMPLETE:
4406 hci_sync_conn_complete_evt(hdev, skb);
4409 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4410 hci_extended_inquiry_result_evt(hdev, skb);
4413 case HCI_EV_KEY_REFRESH_COMPLETE:
4414 hci_key_refresh_complete_evt(hdev, skb);
4417 case HCI_EV_IO_CAPA_REQUEST:
4418 hci_io_capa_request_evt(hdev, skb);
4421 case HCI_EV_IO_CAPA_REPLY:
4422 hci_io_capa_reply_evt(hdev, skb);
4425 case HCI_EV_USER_CONFIRM_REQUEST:
4426 hci_user_confirm_request_evt(hdev, skb);
4429 case HCI_EV_USER_PASSKEY_REQUEST:
4430 hci_user_passkey_request_evt(hdev, skb);
4433 case HCI_EV_USER_PASSKEY_NOTIFY:
4434 hci_user_passkey_notify_evt(hdev, skb);
4437 case HCI_EV_KEYPRESS_NOTIFY:
4438 hci_keypress_notify_evt(hdev, skb);
4441 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4442 hci_simple_pair_complete_evt(hdev, skb);
4445 case HCI_EV_REMOTE_HOST_FEATURES:
4446 hci_remote_host_features_evt(hdev, skb);
4449 case HCI_EV_LE_META:
4450 hci_le_meta_evt(hdev, skb);
4453 case HCI_EV_CHANNEL_SELECTED:
4454 hci_chan_selected_evt(hdev, skb);
4457 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4458 hci_remote_oob_data_request_evt(hdev, skb);
4461 case HCI_EV_PHY_LINK_COMPLETE:
4462 hci_phy_link_complete_evt(hdev, skb);
4465 case HCI_EV_LOGICAL_LINK_COMPLETE:
4466 hci_loglink_complete_evt(hdev, skb);
4469 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4470 hci_disconn_loglink_complete_evt(hdev, skb);
4473 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4474 hci_disconn_phylink_complete_evt(hdev, skb);
4477 case HCI_EV_NUM_COMP_BLOCKS:
4478 hci_num_comp_blocks_evt(hdev, skb);
4482 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4487 hdev->stat.evt_rx++;