~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/bluetooth/hci_core.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2    BlueZ - Bluetooth protocol stack for Linux
  3    Copyright (C) 2000-2001 Qualcomm Incorporated
  4    Copyright (C) 2011 ProFUSION Embedded Systems
  5 
  6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
  7 
  8    This program is free software; you can redistribute it and/or modify
  9    it under the terms of the GNU General Public License version 2 as
 10    published by the Free Software Foundation;
 11 
 12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
 15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
 16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
 17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 20 
 21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
 22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
 23    SOFTWARE IS DISCLAIMED.
 24 */
 25 
 26 /* Bluetooth HCI core. */
 27 
 28 #include <linux/export.h>
 29 #include <linux/rfkill.h>
 30 #include <linux/debugfs.h>
 31 #include <linux/crypto.h>
 32 #include <linux/kcov.h>
 33 #include <linux/property.h>
 34 #include <linux/suspend.h>
 35 #include <linux/wait.h>
 36 #include <asm/unaligned.h>
 37 
 38 #include <net/bluetooth/bluetooth.h>
 39 #include <net/bluetooth/hci_core.h>
 40 #include <net/bluetooth/l2cap.h>
 41 #include <net/bluetooth/mgmt.h>
 42 
 43 #include "hci_debugfs.h"
 44 #include "smp.h"
 45 #include "leds.h"
 46 #include "msft.h"
 47 #include "aosp.h"
 48 #include "hci_codec.h"
 49 
 50 static void hci_rx_work(struct work_struct *work);
 51 static void hci_cmd_work(struct work_struct *work);
 52 static void hci_tx_work(struct work_struct *work);
 53 
 54 /* HCI device list */
 55 LIST_HEAD(hci_dev_list);
 56 DEFINE_RWLOCK(hci_dev_list_lock);
 57 
 58 /* HCI callback list */
 59 LIST_HEAD(hci_cb_list);
 60 DEFINE_MUTEX(hci_cb_list_lock);
 61 
 62 /* HCI ID Numbering */
 63 static DEFINE_IDA(hci_index_ida);
 64 
 65 /* Get HCI device by index.
 66  * Device is held on return. */
 67 struct hci_dev *hci_dev_get(int index)
 68 {
 69         struct hci_dev *hdev = NULL, *d;
 70 
 71         BT_DBG("%d", index);
 72 
 73         if (index < 0)
 74                 return NULL;
 75 
 76         read_lock(&hci_dev_list_lock);
 77         list_for_each_entry(d, &hci_dev_list, list) {
 78                 if (d->id == index) {
 79                         hdev = hci_dev_hold(d);
 80                         break;
 81                 }
 82         }
 83         read_unlock(&hci_dev_list_lock);
 84         return hdev;
 85 }
 86 
 87 /* ---- Inquiry support ---- */
 88 
 89 bool hci_discovery_active(struct hci_dev *hdev)
 90 {
 91         struct discovery_state *discov = &hdev->discovery;
 92 
 93         switch (discov->state) {
 94         case DISCOVERY_FINDING:
 95         case DISCOVERY_RESOLVING:
 96                 return true;
 97 
 98         default:
 99                 return false;
100         }
101 }
102 
103 void hci_discovery_set_state(struct hci_dev *hdev, int state)
104 {
105         int old_state = hdev->discovery.state;
106 
107         if (old_state == state)
108                 return;
109 
110         hdev->discovery.state = state;
111 
112         switch (state) {
113         case DISCOVERY_STOPPED:
114                 hci_update_passive_scan(hdev);
115 
116                 if (old_state != DISCOVERY_STARTING)
117                         mgmt_discovering(hdev, 0);
118                 break;
119         case DISCOVERY_STARTING:
120                 break;
121         case DISCOVERY_FINDING:
122                 mgmt_discovering(hdev, 1);
123                 break;
124         case DISCOVERY_RESOLVING:
125                 break;
126         case DISCOVERY_STOPPING:
127                 break;
128         }
129 
130         bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
131 }
132 
133 void hci_inquiry_cache_flush(struct hci_dev *hdev)
134 {
135         struct discovery_state *cache = &hdev->discovery;
136         struct inquiry_entry *p, *n;
137 
138         list_for_each_entry_safe(p, n, &cache->all, all) {
139                 list_del(&p->all);
140                 kfree(p);
141         }
142 
143         INIT_LIST_HEAD(&cache->unknown);
144         INIT_LIST_HEAD(&cache->resolve);
145 }
146 
147 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
148                                                bdaddr_t *bdaddr)
149 {
150         struct discovery_state *cache = &hdev->discovery;
151         struct inquiry_entry *e;
152 
153         BT_DBG("cache %p, %pMR", cache, bdaddr);
154 
155         list_for_each_entry(e, &cache->all, all) {
156                 if (!bacmp(&e->data.bdaddr, bdaddr))
157                         return e;
158         }
159 
160         return NULL;
161 }
162 
163 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
164                                                        bdaddr_t *bdaddr)
165 {
166         struct discovery_state *cache = &hdev->discovery;
167         struct inquiry_entry *e;
168 
169         BT_DBG("cache %p, %pMR", cache, bdaddr);
170 
171         list_for_each_entry(e, &cache->unknown, list) {
172                 if (!bacmp(&e->data.bdaddr, bdaddr))
173                         return e;
174         }
175 
176         return NULL;
177 }
178 
179 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
180                                                        bdaddr_t *bdaddr,
181                                                        int state)
182 {
183         struct discovery_state *cache = &hdev->discovery;
184         struct inquiry_entry *e;
185 
186         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
187 
188         list_for_each_entry(e, &cache->resolve, list) {
189                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
190                         return e;
191                 if (!bacmp(&e->data.bdaddr, bdaddr))
192                         return e;
193         }
194 
195         return NULL;
196 }
197 
198 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
199                                       struct inquiry_entry *ie)
200 {
201         struct discovery_state *cache = &hdev->discovery;
202         struct list_head *pos = &cache->resolve;
203         struct inquiry_entry *p;
204 
205         list_del(&ie->list);
206 
207         list_for_each_entry(p, &cache->resolve, list) {
208                 if (p->name_state != NAME_PENDING &&
209                     abs(p->data.rssi) >= abs(ie->data.rssi))
210                         break;
211                 pos = &p->list;
212         }
213 
214         list_add(&ie->list, pos);
215 }
216 
217 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
218                              bool name_known)
219 {
220         struct discovery_state *cache = &hdev->discovery;
221         struct inquiry_entry *ie;
222         u32 flags = 0;
223 
224         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
225 
226         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
227 
228         if (!data->ssp_mode)
229                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
230 
231         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
232         if (ie) {
233                 if (!ie->data.ssp_mode)
234                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
235 
236                 if (ie->name_state == NAME_NEEDED &&
237                     data->rssi != ie->data.rssi) {
238                         ie->data.rssi = data->rssi;
239                         hci_inquiry_cache_update_resolve(hdev, ie);
240                 }
241 
242                 goto update;
243         }
244 
245         /* Entry not in the cache. Add new one. */
246         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
247         if (!ie) {
248                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
249                 goto done;
250         }
251 
252         list_add(&ie->all, &cache->all);
253 
254         if (name_known) {
255                 ie->name_state = NAME_KNOWN;
256         } else {
257                 ie->name_state = NAME_NOT_KNOWN;
258                 list_add(&ie->list, &cache->unknown);
259         }
260 
261 update:
262         if (name_known && ie->name_state != NAME_KNOWN &&
263             ie->name_state != NAME_PENDING) {
264                 ie->name_state = NAME_KNOWN;
265                 list_del(&ie->list);
266         }
267 
268         memcpy(&ie->data, data, sizeof(*data));
269         ie->timestamp = jiffies;
270         cache->timestamp = jiffies;
271 
272         if (ie->name_state == NAME_NOT_KNOWN)
273                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
274 
275 done:
276         return flags;
277 }
278 
279 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
280 {
281         struct discovery_state *cache = &hdev->discovery;
282         struct inquiry_info *info = (struct inquiry_info *) buf;
283         struct inquiry_entry *e;
284         int copied = 0;
285 
286         list_for_each_entry(e, &cache->all, all) {
287                 struct inquiry_data *data = &e->data;
288 
289                 if (copied >= num)
290                         break;
291 
292                 bacpy(&info->bdaddr, &data->bdaddr);
293                 info->pscan_rep_mode    = data->pscan_rep_mode;
294                 info->pscan_period_mode = data->pscan_period_mode;
295                 info->pscan_mode        = data->pscan_mode;
296                 memcpy(info->dev_class, data->dev_class, 3);
297                 info->clock_offset      = data->clock_offset;
298 
299                 info++;
300                 copied++;
301         }
302 
303         BT_DBG("cache %p, copied %d", cache, copied);
304         return copied;
305 }
306 
307 int hci_inquiry(void __user *arg)
308 {
309         __u8 __user *ptr = arg;
310         struct hci_inquiry_req ir;
311         struct hci_dev *hdev;
312         int err = 0, do_inquiry = 0, max_rsp;
313         __u8 *buf;
314 
315         if (copy_from_user(&ir, ptr, sizeof(ir)))
316                 return -EFAULT;
317 
318         hdev = hci_dev_get(ir.dev_id);
319         if (!hdev)
320                 return -ENODEV;
321 
322         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
323                 err = -EBUSY;
324                 goto done;
325         }
326 
327         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
328                 err = -EOPNOTSUPP;
329                 goto done;
330         }
331 
332         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
333                 err = -EOPNOTSUPP;
334                 goto done;
335         }
336 
337         /* Restrict maximum inquiry length to 60 seconds */
338         if (ir.length > 60) {
339                 err = -EINVAL;
340                 goto done;
341         }
342 
343         hci_dev_lock(hdev);
344         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
345             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
346                 hci_inquiry_cache_flush(hdev);
347                 do_inquiry = 1;
348         }
349         hci_dev_unlock(hdev);
350 
351         if (do_inquiry) {
352                 hci_req_sync_lock(hdev);
353                 err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
354                 hci_req_sync_unlock(hdev);
355 
356                 if (err < 0)
357                         goto done;
358 
359                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
360                  * cleared). If it is interrupted by a signal, return -EINTR.
361                  */
362                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
363                                 TASK_INTERRUPTIBLE)) {
364                         err = -EINTR;
365                         goto done;
366                 }
367         }
368 
369         /* for unlimited number of responses we will use buffer with
370          * 255 entries
371          */
372         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
373 
374         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
375          * copy it to the user space.
376          */
377         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
378         if (!buf) {
379                 err = -ENOMEM;
380                 goto done;
381         }
382 
383         hci_dev_lock(hdev);
384         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
385         hci_dev_unlock(hdev);
386 
387         BT_DBG("num_rsp %d", ir.num_rsp);
388 
389         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
390                 ptr += sizeof(ir);
391                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
392                                  ir.num_rsp))
393                         err = -EFAULT;
394         } else
395                 err = -EFAULT;
396 
397         kfree(buf);
398 
399 done:
400         hci_dev_put(hdev);
401         return err;
402 }
403 
404 static int hci_dev_do_open(struct hci_dev *hdev)
405 {
406         int ret = 0;
407 
408         BT_DBG("%s %p", hdev->name, hdev);
409 
410         hci_req_sync_lock(hdev);
411 
412         ret = hci_dev_open_sync(hdev);
413 
414         hci_req_sync_unlock(hdev);
415         return ret;
416 }
417 
418 /* ---- HCI ioctl helpers ---- */
419 
420 int hci_dev_open(__u16 dev)
421 {
422         struct hci_dev *hdev;
423         int err;
424 
425         hdev = hci_dev_get(dev);
426         if (!hdev)
427                 return -ENODEV;
428 
429         /* Devices that are marked as unconfigured can only be powered
430          * up as user channel. Trying to bring them up as normal devices
431          * will result into a failure. Only user channel operation is
432          * possible.
433          *
434          * When this function is called for a user channel, the flag
435          * HCI_USER_CHANNEL will be set first before attempting to
436          * open the device.
437          */
438         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
439             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
440                 err = -EOPNOTSUPP;
441                 goto done;
442         }
443 
444         /* We need to ensure that no other power on/off work is pending
445          * before proceeding to call hci_dev_do_open. This is
446          * particularly important if the setup procedure has not yet
447          * completed.
448          */
449         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
450                 cancel_delayed_work(&hdev->power_off);
451 
452         /* After this call it is guaranteed that the setup procedure
453          * has finished. This means that error conditions like RFKILL
454          * or no valid public or static random address apply.
455          */
456         flush_workqueue(hdev->req_workqueue);
457 
458         /* For controllers not using the management interface and that
459          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
460          * so that pairing works for them. Once the management interface
461          * is in use this bit will be cleared again and userspace has
462          * to explicitly enable it.
463          */
464         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
465             !hci_dev_test_flag(hdev, HCI_MGMT))
466                 hci_dev_set_flag(hdev, HCI_BONDABLE);
467 
468         err = hci_dev_do_open(hdev);
469 
470 done:
471         hci_dev_put(hdev);
472         return err;
473 }
474 
475 int hci_dev_do_close(struct hci_dev *hdev)
476 {
477         int err;
478 
479         BT_DBG("%s %p", hdev->name, hdev);
480 
481         hci_req_sync_lock(hdev);
482 
483         err = hci_dev_close_sync(hdev);
484 
485         hci_req_sync_unlock(hdev);
486 
487         return err;
488 }
489 
490 int hci_dev_close(__u16 dev)
491 {
492         struct hci_dev *hdev;
493         int err;
494 
495         hdev = hci_dev_get(dev);
496         if (!hdev)
497                 return -ENODEV;
498 
499         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
500                 err = -EBUSY;
501                 goto done;
502         }
503 
504         cancel_work_sync(&hdev->power_on);
505         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
506                 cancel_delayed_work(&hdev->power_off);
507 
508         err = hci_dev_do_close(hdev);
509 
510 done:
511         hci_dev_put(hdev);
512         return err;
513 }
514 
515 static int hci_dev_do_reset(struct hci_dev *hdev)
516 {
517         int ret;
518 
519         BT_DBG("%s %p", hdev->name, hdev);
520 
521         hci_req_sync_lock(hdev);
522 
523         /* Drop queues */
524         skb_queue_purge(&hdev->rx_q);
525         skb_queue_purge(&hdev->cmd_q);
526 
527         /* Cancel these to avoid queueing non-chained pending work */
528         hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
529         /* Wait for
530          *
531          *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
532          *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
533          *
534          * inside RCU section to see the flag or complete scheduling.
535          */
536         synchronize_rcu();
537         /* Explicitly cancel works in case scheduled after setting the flag. */
538         cancel_delayed_work(&hdev->cmd_timer);
539         cancel_delayed_work(&hdev->ncmd_timer);
540 
541         /* Avoid potential lockdep warnings from the *_flush() calls by
542          * ensuring the workqueue is empty up front.
543          */
544         drain_workqueue(hdev->workqueue);
545 
546         hci_dev_lock(hdev);
547         hci_inquiry_cache_flush(hdev);
548         hci_conn_hash_flush(hdev);
549         hci_dev_unlock(hdev);
550 
551         if (hdev->flush)
552                 hdev->flush(hdev);
553 
554         hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
555 
556         atomic_set(&hdev->cmd_cnt, 1);
557         hdev->acl_cnt = 0;
558         hdev->sco_cnt = 0;
559         hdev->le_cnt = 0;
560         hdev->iso_cnt = 0;
561 
562         ret = hci_reset_sync(hdev);
563 
564         hci_req_sync_unlock(hdev);
565         return ret;
566 }
567 
568 int hci_dev_reset(__u16 dev)
569 {
570         struct hci_dev *hdev;
571         int err;
572 
573         hdev = hci_dev_get(dev);
574         if (!hdev)
575                 return -ENODEV;
576 
577         if (!test_bit(HCI_UP, &hdev->flags)) {
578                 err = -ENETDOWN;
579                 goto done;
580         }
581 
582         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
583                 err = -EBUSY;
584                 goto done;
585         }
586 
587         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
588                 err = -EOPNOTSUPP;
589                 goto done;
590         }
591 
592         err = hci_dev_do_reset(hdev);
593 
594 done:
595         hci_dev_put(hdev);
596         return err;
597 }
598 
599 int hci_dev_reset_stat(__u16 dev)
600 {
601         struct hci_dev *hdev;
602         int ret = 0;
603 
604         hdev = hci_dev_get(dev);
605         if (!hdev)
606                 return -ENODEV;
607 
608         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
609                 ret = -EBUSY;
610                 goto done;
611         }
612 
613         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
614                 ret = -EOPNOTSUPP;
615                 goto done;
616         }
617 
618         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
619 
620 done:
621         hci_dev_put(hdev);
622         return ret;
623 }
624 
625 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
626 {
627         bool conn_changed, discov_changed;
628 
629         BT_DBG("%s scan 0x%02x", hdev->name, scan);
630 
631         if ((scan & SCAN_PAGE))
632                 conn_changed = !hci_dev_test_and_set_flag(hdev,
633                                                           HCI_CONNECTABLE);
634         else
635                 conn_changed = hci_dev_test_and_clear_flag(hdev,
636                                                            HCI_CONNECTABLE);
637 
638         if ((scan & SCAN_INQUIRY)) {
639                 discov_changed = !hci_dev_test_and_set_flag(hdev,
640                                                             HCI_DISCOVERABLE);
641         } else {
642                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
643                 discov_changed = hci_dev_test_and_clear_flag(hdev,
644                                                              HCI_DISCOVERABLE);
645         }
646 
647         if (!hci_dev_test_flag(hdev, HCI_MGMT))
648                 return;
649 
650         if (conn_changed || discov_changed) {
651                 /* In case this was disabled through mgmt */
652                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
653 
654                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
655                         hci_update_adv_data(hdev, hdev->cur_adv_instance);
656 
657                 mgmt_new_settings(hdev);
658         }
659 }
660 
661 int hci_dev_cmd(unsigned int cmd, void __user *arg)
662 {
663         struct hci_dev *hdev;
664         struct hci_dev_req dr;
665         __le16 policy;
666         int err = 0;
667 
668         if (copy_from_user(&dr, arg, sizeof(dr)))
669                 return -EFAULT;
670 
671         hdev = hci_dev_get(dr.dev_id);
672         if (!hdev)
673                 return -ENODEV;
674 
675         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
676                 err = -EBUSY;
677                 goto done;
678         }
679 
680         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
681                 err = -EOPNOTSUPP;
682                 goto done;
683         }
684 
685         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
686                 err = -EOPNOTSUPP;
687                 goto done;
688         }
689 
690         switch (cmd) {
691         case HCISETAUTH:
692                 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
693                                           1, &dr.dev_opt, HCI_CMD_TIMEOUT);
694                 break;
695 
696         case HCISETENCRYPT:
697                 if (!lmp_encrypt_capable(hdev)) {
698                         err = -EOPNOTSUPP;
699                         break;
700                 }
701 
702                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
703                         /* Auth must be enabled first */
704                         err = hci_cmd_sync_status(hdev,
705                                                   HCI_OP_WRITE_AUTH_ENABLE,
706                                                   1, &dr.dev_opt,
707                                                   HCI_CMD_TIMEOUT);
708                         if (err)
709                                 break;
710                 }
711 
712                 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
713                                           1, &dr.dev_opt, HCI_CMD_TIMEOUT);
714                 break;
715 
716         case HCISETSCAN:
717                 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
718                                           1, &dr.dev_opt, HCI_CMD_TIMEOUT);
719 
720                 /* Ensure that the connectable and discoverable states
721                  * get correctly modified as this was a non-mgmt change.
722                  */
723                 if (!err)
724                         hci_update_passive_scan_state(hdev, dr.dev_opt);
725                 break;
726 
727         case HCISETLINKPOL:
728                 policy = cpu_to_le16(dr.dev_opt);
729 
730                 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
731                                           2, &policy, HCI_CMD_TIMEOUT);
732                 break;
733 
734         case HCISETLINKMODE:
735                 hdev->link_mode = ((__u16) dr.dev_opt) &
736                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
737                 break;
738 
739         case HCISETPTYPE:
740                 if (hdev->pkt_type == (__u16) dr.dev_opt)
741                         break;
742 
743                 hdev->pkt_type = (__u16) dr.dev_opt;
744                 mgmt_phy_configuration_changed(hdev, NULL);
745                 break;
746 
747         case HCISETACLMTU:
748                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
749                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
750                 break;
751 
752         case HCISETSCOMTU:
753                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
754                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
755                 break;
756 
757         default:
758                 err = -EINVAL;
759                 break;
760         }
761 
762 done:
763         hci_dev_put(hdev);
764         return err;
765 }
766 
767 int hci_get_dev_list(void __user *arg)
768 {
769         struct hci_dev *hdev;
770         struct hci_dev_list_req *dl;
771         struct hci_dev_req *dr;
772         int n = 0, err;
773         __u16 dev_num;
774 
775         if (get_user(dev_num, (__u16 __user *) arg))
776                 return -EFAULT;
777 
778         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
779                 return -EINVAL;
780 
781         dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL);
782         if (!dl)
783                 return -ENOMEM;
784 
785         dl->dev_num = dev_num;
786         dr = dl->dev_req;
787 
788         read_lock(&hci_dev_list_lock);
789         list_for_each_entry(hdev, &hci_dev_list, list) {
790                 unsigned long flags = hdev->flags;
791 
792                 /* When the auto-off is configured it means the transport
793                  * is running, but in that case still indicate that the
794                  * device is actually down.
795                  */
796                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
797                         flags &= ~BIT(HCI_UP);
798 
799                 dr[n].dev_id  = hdev->id;
800                 dr[n].dev_opt = flags;
801 
802                 if (++n >= dev_num)
803                         break;
804         }
805         read_unlock(&hci_dev_list_lock);
806 
807         dl->dev_num = n;
808         err = copy_to_user(arg, dl, struct_size(dl, dev_req, n));
809         kfree(dl);
810 
811         return err ? -EFAULT : 0;
812 }
813 
814 int hci_get_dev_info(void __user *arg)
815 {
816         struct hci_dev *hdev;
817         struct hci_dev_info di;
818         unsigned long flags;
819         int err = 0;
820 
821         if (copy_from_user(&di, arg, sizeof(di)))
822                 return -EFAULT;
823 
824         hdev = hci_dev_get(di.dev_id);
825         if (!hdev)
826                 return -ENODEV;
827 
828         /* When the auto-off is configured it means the transport
829          * is running, but in that case still indicate that the
830          * device is actually down.
831          */
832         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
833                 flags = hdev->flags & ~BIT(HCI_UP);
834         else
835                 flags = hdev->flags;
836 
837         strscpy(di.name, hdev->name, sizeof(di.name));
838         di.bdaddr   = hdev->bdaddr;
839         di.type     = (hdev->bus & 0x0f);
840         di.flags    = flags;
841         di.pkt_type = hdev->pkt_type;
842         if (lmp_bredr_capable(hdev)) {
843                 di.acl_mtu  = hdev->acl_mtu;
844                 di.acl_pkts = hdev->acl_pkts;
845                 di.sco_mtu  = hdev->sco_mtu;
846                 di.sco_pkts = hdev->sco_pkts;
847         } else {
848                 di.acl_mtu  = hdev->le_mtu;
849                 di.acl_pkts = hdev->le_pkts;
850                 di.sco_mtu  = 0;
851                 di.sco_pkts = 0;
852         }
853         di.link_policy = hdev->link_policy;
854         di.link_mode   = hdev->link_mode;
855 
856         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
857         memcpy(&di.features, &hdev->features, sizeof(di.features));
858 
859         if (copy_to_user(arg, &di, sizeof(di)))
860                 err = -EFAULT;
861 
862         hci_dev_put(hdev);
863 
864         return err;
865 }
866 
867 /* ---- Interface to HCI drivers ---- */
868 
869 static int hci_dev_do_poweroff(struct hci_dev *hdev)
870 {
871         int err;
872 
873         BT_DBG("%s %p", hdev->name, hdev);
874 
875         hci_req_sync_lock(hdev);
876 
877         err = hci_set_powered_sync(hdev, false);
878 
879         hci_req_sync_unlock(hdev);
880 
881         return err;
882 }
883 
884 static int hci_rfkill_set_block(void *data, bool blocked)
885 {
886         struct hci_dev *hdev = data;
887         int err;
888 
889         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
890 
891         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
892                 return -EBUSY;
893 
894         if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
895                 return 0;
896 
897         if (blocked) {
898                 hci_dev_set_flag(hdev, HCI_RFKILLED);
899 
900                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
901                     !hci_dev_test_flag(hdev, HCI_CONFIG)) {
902                         err = hci_dev_do_poweroff(hdev);
903                         if (err) {
904                                 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
905                                            err);
906 
907                                 /* Make sure the device is still closed even if
908                                  * anything during power off sequence (eg.
909                                  * disconnecting devices) failed.
910                                  */
911                                 hci_dev_do_close(hdev);
912                         }
913                 }
914         } else {
915                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
916         }
917 
918         return 0;
919 }
920 
921 static const struct rfkill_ops hci_rfkill_ops = {
922         .set_block = hci_rfkill_set_block,
923 };
924 
925 static void hci_power_on(struct work_struct *work)
926 {
927         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
928         int err;
929 
930         BT_DBG("%s", hdev->name);
931 
932         if (test_bit(HCI_UP, &hdev->flags) &&
933             hci_dev_test_flag(hdev, HCI_MGMT) &&
934             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
935                 cancel_delayed_work(&hdev->power_off);
936                 err = hci_powered_update_sync(hdev);
937                 mgmt_power_on(hdev, err);
938                 return;
939         }
940 
941         err = hci_dev_do_open(hdev);
942         if (err < 0) {
943                 hci_dev_lock(hdev);
944                 mgmt_set_powered_failed(hdev, err);
945                 hci_dev_unlock(hdev);
946                 return;
947         }
948 
949         /* During the HCI setup phase, a few error conditions are
950          * ignored and they need to be checked now. If they are still
951          * valid, it is important to turn the device back off.
952          */
953         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
954             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
955             (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
956              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
957                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
958                 hci_dev_do_close(hdev);
959         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
960                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
961                                    HCI_AUTO_OFF_TIMEOUT);
962         }
963 
964         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
965                 /* For unconfigured devices, set the HCI_RAW flag
966                  * so that userspace can easily identify them.
967                  */
968                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
969                         set_bit(HCI_RAW, &hdev->flags);
970 
971                 /* For fully configured devices, this will send
972                  * the Index Added event. For unconfigured devices,
973                  * it will send Unconfigued Index Added event.
974                  *
975                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
976                  * and no event will be send.
977                  */
978                 mgmt_index_added(hdev);
979         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
980                 /* When the controller is now configured, then it
981                  * is important to clear the HCI_RAW flag.
982                  */
983                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
984                         clear_bit(HCI_RAW, &hdev->flags);
985 
986                 /* Powering on the controller with HCI_CONFIG set only
987                  * happens with the transition from unconfigured to
988                  * configured. This will send the Index Added event.
989                  */
990                 mgmt_index_added(hdev);
991         }
992 }
993 
994 static void hci_power_off(struct work_struct *work)
995 {
996         struct hci_dev *hdev = container_of(work, struct hci_dev,
997                                             power_off.work);
998 
999         BT_DBG("%s", hdev->name);
1000 
1001         hci_dev_do_close(hdev);
1002 }
1003 
1004 static void hci_error_reset(struct work_struct *work)
1005 {
1006         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1007 
1008         hci_dev_hold(hdev);
1009         BT_DBG("%s", hdev->name);
1010 
1011         if (hdev->hw_error)
1012                 hdev->hw_error(hdev, hdev->hw_error_code);
1013         else
1014                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1015 
1016         if (!hci_dev_do_close(hdev))
1017                 hci_dev_do_open(hdev);
1018 
1019         hci_dev_put(hdev);
1020 }
1021 
1022 void hci_uuids_clear(struct hci_dev *hdev)
1023 {
1024         struct bt_uuid *uuid, *tmp;
1025 
1026         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1027                 list_del(&uuid->list);
1028                 kfree(uuid);
1029         }
1030 }
1031 
1032 void hci_link_keys_clear(struct hci_dev *hdev)
1033 {
1034         struct link_key *key, *tmp;
1035 
1036         list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1037                 list_del_rcu(&key->list);
1038                 kfree_rcu(key, rcu);
1039         }
1040 }
1041 
1042 void hci_smp_ltks_clear(struct hci_dev *hdev)
1043 {
1044         struct smp_ltk *k, *tmp;
1045 
1046         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1047                 list_del_rcu(&k->list);
1048                 kfree_rcu(k, rcu);
1049         }
1050 }
1051 
1052 void hci_smp_irks_clear(struct hci_dev *hdev)
1053 {
1054         struct smp_irk *k, *tmp;
1055 
1056         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1057                 list_del_rcu(&k->list);
1058                 kfree_rcu(k, rcu);
1059         }
1060 }
1061 
1062 void hci_blocked_keys_clear(struct hci_dev *hdev)
1063 {
1064         struct blocked_key *b, *tmp;
1065 
1066         list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1067                 list_del_rcu(&b->list);
1068                 kfree_rcu(b, rcu);
1069         }
1070 }
1071 
1072 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1073 {
1074         bool blocked = false;
1075         struct blocked_key *b;
1076 
1077         rcu_read_lock();
1078         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1079                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1080                         blocked = true;
1081                         break;
1082                 }
1083         }
1084 
1085         rcu_read_unlock();
1086         return blocked;
1087 }
1088 
1089 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1090 {
1091         struct link_key *k;
1092 
1093         rcu_read_lock();
1094         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1095                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1096                         rcu_read_unlock();
1097 
1098                         if (hci_is_blocked_key(hdev,
1099                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
1100                                                k->val)) {
1101                                 bt_dev_warn_ratelimited(hdev,
1102                                                         "Link key blocked for %pMR",
1103                                                         &k->bdaddr);
1104                                 return NULL;
1105                         }
1106 
1107                         return k;
1108                 }
1109         }
1110         rcu_read_unlock();
1111 
1112         return NULL;
1113 }
1114 
1115 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1116                                u8 key_type, u8 old_key_type)
1117 {
1118         /* Legacy key */
1119         if (key_type < 0x03)
1120                 return true;
1121 
1122         /* Debug keys are insecure so don't store them persistently */
1123         if (key_type == HCI_LK_DEBUG_COMBINATION)
1124                 return false;
1125 
1126         /* Changed combination key and there's no previous one */
1127         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1128                 return false;
1129 
1130         /* Security mode 3 case */
1131         if (!conn)
1132                 return true;
1133 
1134         /* BR/EDR key derived using SC from an LE link */
1135         if (conn->type == LE_LINK)
1136                 return true;
1137 
1138         /* Neither local nor remote side had no-bonding as requirement */
1139         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1140                 return true;
1141 
1142         /* Local side had dedicated bonding as requirement */
1143         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1144                 return true;
1145 
1146         /* Remote side had dedicated bonding as requirement */
1147         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1148                 return true;
1149 
1150         /* If none of the above criteria match, then don't store the key
1151          * persistently */
1152         return false;
1153 }
1154 
1155 static u8 ltk_role(u8 type)
1156 {
1157         if (type == SMP_LTK)
1158                 return HCI_ROLE_MASTER;
1159 
1160         return HCI_ROLE_SLAVE;
1161 }
1162 
1163 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1164                              u8 addr_type, u8 role)
1165 {
1166         struct smp_ltk *k;
1167 
1168         rcu_read_lock();
1169         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1170                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1171                         continue;
1172 
1173                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1174                         rcu_read_unlock();
1175 
1176                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1177                                                k->val)) {
1178                                 bt_dev_warn_ratelimited(hdev,
1179                                                         "LTK blocked for %pMR",
1180                                                         &k->bdaddr);
1181                                 return NULL;
1182                         }
1183 
1184                         return k;
1185                 }
1186         }
1187         rcu_read_unlock();
1188 
1189         return NULL;
1190 }
1191 
1192 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1193 {
1194         struct smp_irk *irk_to_return = NULL;
1195         struct smp_irk *irk;
1196 
1197         rcu_read_lock();
1198         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1199                 if (!bacmp(&irk->rpa, rpa)) {
1200                         irk_to_return = irk;
1201                         goto done;
1202                 }
1203         }
1204 
1205         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1206                 if (smp_irk_matches(hdev, irk->val, rpa)) {
1207                         bacpy(&irk->rpa, rpa);
1208                         irk_to_return = irk;
1209                         goto done;
1210                 }
1211         }
1212 
1213 done:
1214         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1215                                                 irk_to_return->val)) {
1216                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1217                                         &irk_to_return->bdaddr);
1218                 irk_to_return = NULL;
1219         }
1220 
1221         rcu_read_unlock();
1222 
1223         return irk_to_return;
1224 }
1225 
1226 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1227                                      u8 addr_type)
1228 {
1229         struct smp_irk *irk_to_return = NULL;
1230         struct smp_irk *irk;
1231 
1232         /* Identity Address must be public or static random */
1233         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1234                 return NULL;
1235 
1236         rcu_read_lock();
1237         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1238                 if (addr_type == irk->addr_type &&
1239                     bacmp(bdaddr, &irk->bdaddr) == 0) {
1240                         irk_to_return = irk;
1241                         goto done;
1242                 }
1243         }
1244 
1245 done:
1246 
1247         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1248                                                 irk_to_return->val)) {
1249                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1250                                         &irk_to_return->bdaddr);
1251                 irk_to_return = NULL;
1252         }
1253 
1254         rcu_read_unlock();
1255 
1256         return irk_to_return;
1257 }
1258 
1259 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1260                                   bdaddr_t *bdaddr, u8 *val, u8 type,
1261                                   u8 pin_len, bool *persistent)
1262 {
1263         struct link_key *key, *old_key;
1264         u8 old_key_type;
1265 
1266         old_key = hci_find_link_key(hdev, bdaddr);
1267         if (old_key) {
1268                 old_key_type = old_key->type;
1269                 key = old_key;
1270         } else {
1271                 old_key_type = conn ? conn->key_type : 0xff;
1272                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1273                 if (!key)
1274                         return NULL;
1275                 list_add_rcu(&key->list, &hdev->link_keys);
1276         }
1277 
1278         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1279 
1280         /* Some buggy controller combinations generate a changed
1281          * combination key for legacy pairing even when there's no
1282          * previous key */
1283         if (type == HCI_LK_CHANGED_COMBINATION &&
1284             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1285                 type = HCI_LK_COMBINATION;
1286                 if (conn)
1287                         conn->key_type = type;
1288         }
1289 
1290         bacpy(&key->bdaddr, bdaddr);
1291         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1292         key->pin_len = pin_len;
1293 
1294         if (type == HCI_LK_CHANGED_COMBINATION)
1295                 key->type = old_key_type;
1296         else
1297                 key->type = type;
1298 
1299         if (persistent)
1300                 *persistent = hci_persistent_key(hdev, conn, type,
1301                                                  old_key_type);
1302 
1303         return key;
1304 }
1305 
1306 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1307                             u8 addr_type, u8 type, u8 authenticated,
1308                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1309 {
1310         struct smp_ltk *key, *old_key;
1311         u8 role = ltk_role(type);
1312 
1313         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1314         if (old_key)
1315                 key = old_key;
1316         else {
1317                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1318                 if (!key)
1319                         return NULL;
1320                 list_add_rcu(&key->list, &hdev->long_term_keys);
1321         }
1322 
1323         bacpy(&key->bdaddr, bdaddr);
1324         key->bdaddr_type = addr_type;
1325         memcpy(key->val, tk, sizeof(key->val));
1326         key->authenticated = authenticated;
1327         key->ediv = ediv;
1328         key->rand = rand;
1329         key->enc_size = enc_size;
1330         key->type = type;
1331 
1332         return key;
1333 }
1334 
1335 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1336                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
1337 {
1338         struct smp_irk *irk;
1339 
1340         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1341         if (!irk) {
1342                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1343                 if (!irk)
1344                         return NULL;
1345 
1346                 bacpy(&irk->bdaddr, bdaddr);
1347                 irk->addr_type = addr_type;
1348 
1349                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1350         }
1351 
1352         memcpy(irk->val, val, 16);
1353         bacpy(&irk->rpa, rpa);
1354 
1355         return irk;
1356 }
1357 
1358 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1359 {
1360         struct link_key *key;
1361 
1362         key = hci_find_link_key(hdev, bdaddr);
1363         if (!key)
1364                 return -ENOENT;
1365 
1366         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1367 
1368         list_del_rcu(&key->list);
1369         kfree_rcu(key, rcu);
1370 
1371         return 0;
1372 }
1373 
1374 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1375 {
1376         struct smp_ltk *k, *tmp;
1377         int removed = 0;
1378 
1379         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1380                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1381                         continue;
1382 
1383                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1384 
1385                 list_del_rcu(&k->list);
1386                 kfree_rcu(k, rcu);
1387                 removed++;
1388         }
1389 
1390         return removed ? 0 : -ENOENT;
1391 }
1392 
1393 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1394 {
1395         struct smp_irk *k, *tmp;
1396 
1397         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1398                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1399                         continue;
1400 
1401                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1402 
1403                 list_del_rcu(&k->list);
1404                 kfree_rcu(k, rcu);
1405         }
1406 }
1407 
1408 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1409 {
1410         struct smp_ltk *k;
1411         struct smp_irk *irk;
1412         u8 addr_type;
1413 
1414         if (type == BDADDR_BREDR) {
1415                 if (hci_find_link_key(hdev, bdaddr))
1416                         return true;
1417                 return false;
1418         }
1419 
1420         /* Convert to HCI addr type which struct smp_ltk uses */
1421         if (type == BDADDR_LE_PUBLIC)
1422                 addr_type = ADDR_LE_DEV_PUBLIC;
1423         else
1424                 addr_type = ADDR_LE_DEV_RANDOM;
1425 
1426         irk = hci_get_irk(hdev, bdaddr, addr_type);
1427         if (irk) {
1428                 bdaddr = &irk->bdaddr;
1429                 addr_type = irk->addr_type;
1430         }
1431 
1432         rcu_read_lock();
1433         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1434                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1435                         rcu_read_unlock();
1436                         return true;
1437                 }
1438         }
1439         rcu_read_unlock();
1440 
1441         return false;
1442 }
1443 
1444 /* HCI command timer function */
1445 static void hci_cmd_timeout(struct work_struct *work)
1446 {
1447         struct hci_dev *hdev = container_of(work, struct hci_dev,
1448                                             cmd_timer.work);
1449 
1450         if (hdev->req_skb) {
1451                 u16 opcode = hci_skb_opcode(hdev->req_skb);
1452 
1453                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1454 
1455                 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1456         } else {
1457                 bt_dev_err(hdev, "command tx timeout");
1458         }
1459 
1460         if (hdev->cmd_timeout)
1461                 hdev->cmd_timeout(hdev);
1462 
1463         atomic_set(&hdev->cmd_cnt, 1);
1464         queue_work(hdev->workqueue, &hdev->cmd_work);
1465 }
1466 
1467 /* HCI ncmd timer function */
1468 static void hci_ncmd_timeout(struct work_struct *work)
1469 {
1470         struct hci_dev *hdev = container_of(work, struct hci_dev,
1471                                             ncmd_timer.work);
1472 
1473         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1474 
1475         /* During HCI_INIT phase no events can be injected if the ncmd timer
1476          * triggers since the procedure has its own timeout handling.
1477          */
1478         if (test_bit(HCI_INIT, &hdev->flags))
1479                 return;
1480 
1481         /* This is an irrecoverable state, inject hardware error event */
1482         hci_reset_dev(hdev);
1483 }
1484 
1485 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1486                                           bdaddr_t *bdaddr, u8 bdaddr_type)
1487 {
1488         struct oob_data *data;
1489 
1490         list_for_each_entry(data, &hdev->remote_oob_data, list) {
1491                 if (bacmp(bdaddr, &data->bdaddr) != 0)
1492                         continue;
1493                 if (data->bdaddr_type != bdaddr_type)
1494                         continue;
1495                 return data;
1496         }
1497 
1498         return NULL;
1499 }
1500 
1501 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1502                                u8 bdaddr_type)
1503 {
1504         struct oob_data *data;
1505 
1506         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1507         if (!data)
1508                 return -ENOENT;
1509 
1510         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1511 
1512         list_del(&data->list);
1513         kfree(data);
1514 
1515         return 0;
1516 }
1517 
1518 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1519 {
1520         struct oob_data *data, *n;
1521 
1522         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1523                 list_del(&data->list);
1524                 kfree(data);
1525         }
1526 }
1527 
1528 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1529                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
1530                             u8 *hash256, u8 *rand256)
1531 {
1532         struct oob_data *data;
1533 
1534         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1535         if (!data) {
1536                 data = kmalloc(sizeof(*data), GFP_KERNEL);
1537                 if (!data)
1538                         return -ENOMEM;
1539 
1540                 bacpy(&data->bdaddr, bdaddr);
1541                 data->bdaddr_type = bdaddr_type;
1542                 list_add(&data->list, &hdev->remote_oob_data);
1543         }
1544 
1545         if (hash192 && rand192) {
1546                 memcpy(data->hash192, hash192, sizeof(data->hash192));
1547                 memcpy(data->rand192, rand192, sizeof(data->rand192));
1548                 if (hash256 && rand256)
1549                         data->present = 0x03;
1550         } else {
1551                 memset(data->hash192, 0, sizeof(data->hash192));
1552                 memset(data->rand192, 0, sizeof(data->rand192));
1553                 if (hash256 && rand256)
1554                         data->present = 0x02;
1555                 else
1556                         data->present = 0x00;
1557         }
1558 
1559         if (hash256 && rand256) {
1560                 memcpy(data->hash256, hash256, sizeof(data->hash256));
1561                 memcpy(data->rand256, rand256, sizeof(data->rand256));
1562         } else {
1563                 memset(data->hash256, 0, sizeof(data->hash256));
1564                 memset(data->rand256, 0, sizeof(data->rand256));
1565                 if (hash192 && rand192)
1566                         data->present = 0x01;
1567         }
1568 
1569         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1570 
1571         return 0;
1572 }
1573 
1574 /* This function requires the caller holds hdev->lock */
1575 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1576 {
1577         struct adv_info *adv_instance;
1578 
1579         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1580                 if (adv_instance->instance == instance)
1581                         return adv_instance;
1582         }
1583 
1584         return NULL;
1585 }
1586 
1587 /* This function requires the caller holds hdev->lock */
1588 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1589 {
1590         struct adv_info *cur_instance;
1591 
1592         cur_instance = hci_find_adv_instance(hdev, instance);
1593         if (!cur_instance)
1594                 return NULL;
1595 
1596         if (cur_instance == list_last_entry(&hdev->adv_instances,
1597                                             struct adv_info, list))
1598                 return list_first_entry(&hdev->adv_instances,
1599                                                  struct adv_info, list);
1600         else
1601                 return list_next_entry(cur_instance, list);
1602 }
1603 
1604 /* This function requires the caller holds hdev->lock */
1605 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1606 {
1607         struct adv_info *adv_instance;
1608 
1609         adv_instance = hci_find_adv_instance(hdev, instance);
1610         if (!adv_instance)
1611                 return -ENOENT;
1612 
1613         BT_DBG("%s removing %dMR", hdev->name, instance);
1614 
1615         if (hdev->cur_adv_instance == instance) {
1616                 if (hdev->adv_instance_timeout) {
1617                         cancel_delayed_work(&hdev->adv_instance_expire);
1618                         hdev->adv_instance_timeout = 0;
1619                 }
1620                 hdev->cur_adv_instance = 0x00;
1621         }
1622 
1623         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1624 
1625         list_del(&adv_instance->list);
1626         kfree(adv_instance);
1627 
1628         hdev->adv_instance_cnt--;
1629 
1630         return 0;
1631 }
1632 
1633 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1634 {
1635         struct adv_info *adv_instance, *n;
1636 
1637         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1638                 adv_instance->rpa_expired = rpa_expired;
1639 }
1640 
1641 /* This function requires the caller holds hdev->lock */
1642 void hci_adv_instances_clear(struct hci_dev *hdev)
1643 {
1644         struct adv_info *adv_instance, *n;
1645 
1646         if (hdev->adv_instance_timeout) {
1647                 cancel_delayed_work(&hdev->adv_instance_expire);
1648                 hdev->adv_instance_timeout = 0;
1649         }
1650 
1651         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1652                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1653                 list_del(&adv_instance->list);
1654                 kfree(adv_instance);
1655         }
1656 
1657         hdev->adv_instance_cnt = 0;
1658         hdev->cur_adv_instance = 0x00;
1659 }
1660 
1661 static void adv_instance_rpa_expired(struct work_struct *work)
1662 {
1663         struct adv_info *adv_instance = container_of(work, struct adv_info,
1664                                                      rpa_expired_cb.work);
1665 
1666         BT_DBG("");
1667 
1668         adv_instance->rpa_expired = true;
1669 }
1670 
1671 /* This function requires the caller holds hdev->lock */
1672 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1673                                       u32 flags, u16 adv_data_len, u8 *adv_data,
1674                                       u16 scan_rsp_len, u8 *scan_rsp_data,
1675                                       u16 timeout, u16 duration, s8 tx_power,
1676                                       u32 min_interval, u32 max_interval,
1677                                       u8 mesh_handle)
1678 {
1679         struct adv_info *adv;
1680 
1681         adv = hci_find_adv_instance(hdev, instance);
1682         if (adv) {
1683                 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1684                 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1685                 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1686         } else {
1687                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1688                     instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1689                         return ERR_PTR(-EOVERFLOW);
1690 
1691                 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1692                 if (!adv)
1693                         return ERR_PTR(-ENOMEM);
1694 
1695                 adv->pending = true;
1696                 adv->instance = instance;
1697 
1698                 /* If controller support only one set and the instance is set to
1699                  * 1 then there is no option other than using handle 0x00.
1700                  */
1701                 if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1702                         adv->handle = 0x00;
1703                 else
1704                         adv->handle = instance;
1705 
1706                 list_add(&adv->list, &hdev->adv_instances);
1707                 hdev->adv_instance_cnt++;
1708         }
1709 
1710         adv->flags = flags;
1711         adv->min_interval = min_interval;
1712         adv->max_interval = max_interval;
1713         adv->tx_power = tx_power;
1714         /* Defining a mesh_handle changes the timing units to ms,
1715          * rather than seconds, and ties the instance to the requested
1716          * mesh_tx queue.
1717          */
1718         adv->mesh = mesh_handle;
1719 
1720         hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1721                                   scan_rsp_len, scan_rsp_data);
1722 
1723         adv->timeout = timeout;
1724         adv->remaining_time = timeout;
1725 
1726         if (duration == 0)
1727                 adv->duration = hdev->def_multi_adv_rotation_duration;
1728         else
1729                 adv->duration = duration;
1730 
1731         INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1732 
1733         BT_DBG("%s for %dMR", hdev->name, instance);
1734 
1735         return adv;
1736 }
1737 
1738 /* This function requires the caller holds hdev->lock */
1739 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1740                                       u32 flags, u8 data_len, u8 *data,
1741                                       u32 min_interval, u32 max_interval)
1742 {
1743         struct adv_info *adv;
1744 
1745         adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1746                                    0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1747                                    min_interval, max_interval, 0);
1748         if (IS_ERR(adv))
1749                 return adv;
1750 
1751         adv->periodic = true;
1752         adv->per_adv_data_len = data_len;
1753 
1754         if (data)
1755                 memcpy(adv->per_adv_data, data, data_len);
1756 
1757         return adv;
1758 }
1759 
1760 /* This function requires the caller holds hdev->lock */
1761 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1762                               u16 adv_data_len, u8 *adv_data,
1763                               u16 scan_rsp_len, u8 *scan_rsp_data)
1764 {
1765         struct adv_info *adv;
1766 
1767         adv = hci_find_adv_instance(hdev, instance);
1768 
1769         /* If advertisement doesn't exist, we can't modify its data */
1770         if (!adv)
1771                 return -ENOENT;
1772 
1773         if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1774                 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1775                 memcpy(adv->adv_data, adv_data, adv_data_len);
1776                 adv->adv_data_len = adv_data_len;
1777                 adv->adv_data_changed = true;
1778         }
1779 
1780         if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1781                 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1782                 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1783                 adv->scan_rsp_len = scan_rsp_len;
1784                 adv->scan_rsp_changed = true;
1785         }
1786 
1787         /* Mark as changed if there are flags which would affect it */
1788         if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1789             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1790                 adv->scan_rsp_changed = true;
1791 
1792         return 0;
1793 }
1794 
1795 /* This function requires the caller holds hdev->lock */
1796 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1797 {
1798         u32 flags;
1799         struct adv_info *adv;
1800 
1801         if (instance == 0x00) {
1802                 /* Instance 0 always manages the "Tx Power" and "Flags"
1803                  * fields
1804                  */
1805                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1806 
1807                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1808                  * corresponds to the "connectable" instance flag.
1809                  */
1810                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1811                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1812 
1813                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1814                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1815                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1816                         flags |= MGMT_ADV_FLAG_DISCOV;
1817 
1818                 return flags;
1819         }
1820 
1821         adv = hci_find_adv_instance(hdev, instance);
1822 
1823         /* Return 0 when we got an invalid instance identifier. */
1824         if (!adv)
1825                 return 0;
1826 
1827         return adv->flags;
1828 }
1829 
1830 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1831 {
1832         struct adv_info *adv;
1833 
1834         /* Instance 0x00 always set local name */
1835         if (instance == 0x00)
1836                 return true;
1837 
1838         adv = hci_find_adv_instance(hdev, instance);
1839         if (!adv)
1840                 return false;
1841 
1842         if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1843             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1844                 return true;
1845 
1846         return adv->scan_rsp_len ? true : false;
1847 }
1848 
1849 /* This function requires the caller holds hdev->lock */
1850 void hci_adv_monitors_clear(struct hci_dev *hdev)
1851 {
1852         struct adv_monitor *monitor;
1853         int handle;
1854 
1855         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1856                 hci_free_adv_monitor(hdev, monitor);
1857 
1858         idr_destroy(&hdev->adv_monitors_idr);
1859 }
1860 
1861 /* Frees the monitor structure and do some bookkeepings.
1862  * This function requires the caller holds hdev->lock.
1863  */
1864 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1865 {
1866         struct adv_pattern *pattern;
1867         struct adv_pattern *tmp;
1868 
1869         if (!monitor)
1870                 return;
1871 
1872         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1873                 list_del(&pattern->list);
1874                 kfree(pattern);
1875         }
1876 
1877         if (monitor->handle)
1878                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1879 
1880         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1881                 hdev->adv_monitors_cnt--;
1882                 mgmt_adv_monitor_removed(hdev, monitor->handle);
1883         }
1884 
1885         kfree(monitor);
1886 }
1887 
1888 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1889  * also attempts to forward the request to the controller.
1890  * This function requires the caller holds hci_req_sync_lock.
1891  */
1892 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1893 {
1894         int min, max, handle;
1895         int status = 0;
1896 
1897         if (!monitor)
1898                 return -EINVAL;
1899 
1900         hci_dev_lock(hdev);
1901 
1902         min = HCI_MIN_ADV_MONITOR_HANDLE;
1903         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1904         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1905                            GFP_KERNEL);
1906 
1907         hci_dev_unlock(hdev);
1908 
1909         if (handle < 0)
1910                 return handle;
1911 
1912         monitor->handle = handle;
1913 
1914         if (!hdev_is_powered(hdev))
1915                 return status;
1916 
1917         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1918         case HCI_ADV_MONITOR_EXT_NONE:
1919                 bt_dev_dbg(hdev, "add monitor %d status %d",
1920                            monitor->handle, status);
1921                 /* Message was not forwarded to controller - not an error */
1922                 break;
1923 
1924         case HCI_ADV_MONITOR_EXT_MSFT:
1925                 status = msft_add_monitor_pattern(hdev, monitor);
1926                 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1927                            handle, status);
1928                 break;
1929         }
1930 
1931         return status;
1932 }
1933 
1934 /* Attempts to tell the controller and free the monitor. If somehow the
1935  * controller doesn't have a corresponding handle, remove anyway.
1936  * This function requires the caller holds hci_req_sync_lock.
1937  */
1938 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1939                                   struct adv_monitor *monitor)
1940 {
1941         int status = 0;
1942         int handle;
1943 
1944         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1945         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1946                 bt_dev_dbg(hdev, "remove monitor %d status %d",
1947                            monitor->handle, status);
1948                 goto free_monitor;
1949 
1950         case HCI_ADV_MONITOR_EXT_MSFT:
1951                 handle = monitor->handle;
1952                 status = msft_remove_monitor(hdev, monitor);
1953                 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1954                            handle, status);
1955                 break;
1956         }
1957 
1958         /* In case no matching handle registered, just free the monitor */
1959         if (status == -ENOENT)
1960                 goto free_monitor;
1961 
1962         return status;
1963 
1964 free_monitor:
1965         if (status == -ENOENT)
1966                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1967                             monitor->handle);
1968         hci_free_adv_monitor(hdev, monitor);
1969 
1970         return status;
1971 }
1972 
1973 /* This function requires the caller holds hci_req_sync_lock */
1974 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1975 {
1976         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1977 
1978         if (!monitor)
1979                 return -EINVAL;
1980 
1981         return hci_remove_adv_monitor(hdev, monitor);
1982 }
1983 
1984 /* This function requires the caller holds hci_req_sync_lock */
1985 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1986 {
1987         struct adv_monitor *monitor;
1988         int idr_next_id = 0;
1989         int status = 0;
1990 
1991         while (1) {
1992                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
1993                 if (!monitor)
1994                         break;
1995 
1996                 status = hci_remove_adv_monitor(hdev, monitor);
1997                 if (status)
1998                         return status;
1999 
2000                 idr_next_id++;
2001         }
2002 
2003         return status;
2004 }
2005 
2006 /* This function requires the caller holds hdev->lock */
2007 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2008 {
2009         return !idr_is_empty(&hdev->adv_monitors_idr);
2010 }
2011 
2012 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2013 {
2014         if (msft_monitor_supported(hdev))
2015                 return HCI_ADV_MONITOR_EXT_MSFT;
2016 
2017         return HCI_ADV_MONITOR_EXT_NONE;
2018 }
2019 
2020 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2021                                          bdaddr_t *bdaddr, u8 type)
2022 {
2023         struct bdaddr_list *b;
2024 
2025         list_for_each_entry(b, bdaddr_list, list) {
2026                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2027                         return b;
2028         }
2029 
2030         return NULL;
2031 }
2032 
2033 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2034                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2035                                 u8 type)
2036 {
2037         struct bdaddr_list_with_irk *b;
2038 
2039         list_for_each_entry(b, bdaddr_list, list) {
2040                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2041                         return b;
2042         }
2043 
2044         return NULL;
2045 }
2046 
2047 struct bdaddr_list_with_flags *
2048 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2049                                   bdaddr_t *bdaddr, u8 type)
2050 {
2051         struct bdaddr_list_with_flags *b;
2052 
2053         list_for_each_entry(b, bdaddr_list, list) {
2054                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2055                         return b;
2056         }
2057 
2058         return NULL;
2059 }
2060 
2061 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2062 {
2063         struct bdaddr_list *b, *n;
2064 
2065         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2066                 list_del(&b->list);
2067                 kfree(b);
2068         }
2069 }
2070 
2071 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2072 {
2073         struct bdaddr_list *entry;
2074 
2075         if (!bacmp(bdaddr, BDADDR_ANY))
2076                 return -EBADF;
2077 
2078         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2079                 return -EEXIST;
2080 
2081         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2082         if (!entry)
2083                 return -ENOMEM;
2084 
2085         bacpy(&entry->bdaddr, bdaddr);
2086         entry->bdaddr_type = type;
2087 
2088         list_add(&entry->list, list);
2089 
2090         return 0;
2091 }
2092 
2093 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2094                                         u8 type, u8 *peer_irk, u8 *local_irk)
2095 {
2096         struct bdaddr_list_with_irk *entry;
2097 
2098         if (!bacmp(bdaddr, BDADDR_ANY))
2099                 return -EBADF;
2100 
2101         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2102                 return -EEXIST;
2103 
2104         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2105         if (!entry)
2106                 return -ENOMEM;
2107 
2108         bacpy(&entry->bdaddr, bdaddr);
2109         entry->bdaddr_type = type;
2110 
2111         if (peer_irk)
2112                 memcpy(entry->peer_irk, peer_irk, 16);
2113 
2114         if (local_irk)
2115                 memcpy(entry->local_irk, local_irk, 16);
2116 
2117         list_add(&entry->list, list);
2118 
2119         return 0;
2120 }
2121 
2122 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2123                                    u8 type, u32 flags)
2124 {
2125         struct bdaddr_list_with_flags *entry;
2126 
2127         if (!bacmp(bdaddr, BDADDR_ANY))
2128                 return -EBADF;
2129 
2130         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2131                 return -EEXIST;
2132 
2133         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2134         if (!entry)
2135                 return -ENOMEM;
2136 
2137         bacpy(&entry->bdaddr, bdaddr);
2138         entry->bdaddr_type = type;
2139         entry->flags = flags;
2140 
2141         list_add(&entry->list, list);
2142 
2143         return 0;
2144 }
2145 
2146 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2147 {
2148         struct bdaddr_list *entry;
2149 
2150         if (!bacmp(bdaddr, BDADDR_ANY)) {
2151                 hci_bdaddr_list_clear(list);
2152                 return 0;
2153         }
2154 
2155         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2156         if (!entry)
2157                 return -ENOENT;
2158 
2159         list_del(&entry->list);
2160         kfree(entry);
2161 
2162         return 0;
2163 }
2164 
2165 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2166                                                         u8 type)
2167 {
2168         struct bdaddr_list_with_irk *entry;
2169 
2170         if (!bacmp(bdaddr, BDADDR_ANY)) {
2171                 hci_bdaddr_list_clear(list);
2172                 return 0;
2173         }
2174 
2175         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2176         if (!entry)
2177                 return -ENOENT;
2178 
2179         list_del(&entry->list);
2180         kfree(entry);
2181 
2182         return 0;
2183 }
2184 
2185 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2186                                    u8 type)
2187 {
2188         struct bdaddr_list_with_flags *entry;
2189 
2190         if (!bacmp(bdaddr, BDADDR_ANY)) {
2191                 hci_bdaddr_list_clear(list);
2192                 return 0;
2193         }
2194 
2195         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2196         if (!entry)
2197                 return -ENOENT;
2198 
2199         list_del(&entry->list);
2200         kfree(entry);
2201 
2202         return 0;
2203 }
2204 
2205 /* This function requires the caller holds hdev->lock */
2206 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2207                                                bdaddr_t *addr, u8 addr_type)
2208 {
2209         struct hci_conn_params *params;
2210 
2211         list_for_each_entry(params, &hdev->le_conn_params, list) {
2212                 if (bacmp(&params->addr, addr) == 0 &&
2213                     params->addr_type == addr_type) {
2214                         return params;
2215                 }
2216         }
2217 
2218         return NULL;
2219 }
2220 
2221 /* This function requires the caller holds hdev->lock or rcu_read_lock */
2222 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2223                                                   bdaddr_t *addr, u8 addr_type)
2224 {
2225         struct hci_conn_params *param;
2226 
2227         rcu_read_lock();
2228 
2229         list_for_each_entry_rcu(param, list, action) {
2230                 if (bacmp(&param->addr, addr) == 0 &&
2231                     param->addr_type == addr_type) {
2232                         rcu_read_unlock();
2233                         return param;
2234                 }
2235         }
2236 
2237         rcu_read_unlock();
2238 
2239         return NULL;
2240 }
2241 
2242 /* This function requires the caller holds hdev->lock */
2243 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2244 {
2245         if (list_empty(&param->action))
2246                 return;
2247 
2248         list_del_rcu(&param->action);
2249         synchronize_rcu();
2250         INIT_LIST_HEAD(&param->action);
2251 }
2252 
2253 /* This function requires the caller holds hdev->lock */
2254 void hci_pend_le_list_add(struct hci_conn_params *param,
2255                           struct list_head *list)
2256 {
2257         list_add_rcu(&param->action, list);
2258 }
2259 
2260 /* This function requires the caller holds hdev->lock */
2261 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2262                                             bdaddr_t *addr, u8 addr_type)
2263 {
2264         struct hci_conn_params *params;
2265 
2266         params = hci_conn_params_lookup(hdev, addr, addr_type);
2267         if (params)
2268                 return params;
2269 
2270         params = kzalloc(sizeof(*params), GFP_KERNEL);
2271         if (!params) {
2272                 bt_dev_err(hdev, "out of memory");
2273                 return NULL;
2274         }
2275 
2276         bacpy(&params->addr, addr);
2277         params->addr_type = addr_type;
2278 
2279         list_add(&params->list, &hdev->le_conn_params);
2280         INIT_LIST_HEAD(&params->action);
2281 
2282         params->conn_min_interval = hdev->le_conn_min_interval;
2283         params->conn_max_interval = hdev->le_conn_max_interval;
2284         params->conn_latency = hdev->le_conn_latency;
2285         params->supervision_timeout = hdev->le_supv_timeout;
2286         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2287 
2288         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2289 
2290         return params;
2291 }
2292 
2293 void hci_conn_params_free(struct hci_conn_params *params)
2294 {
2295         hci_pend_le_list_del_init(params);
2296 
2297         if (params->conn) {
2298                 hci_conn_drop(params->conn);
2299                 hci_conn_put(params->conn);
2300         }
2301 
2302         list_del(&params->list);
2303         kfree(params);
2304 }
2305 
2306 /* This function requires the caller holds hdev->lock */
2307 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2308 {
2309         struct hci_conn_params *params;
2310 
2311         params = hci_conn_params_lookup(hdev, addr, addr_type);
2312         if (!params)
2313                 return;
2314 
2315         hci_conn_params_free(params);
2316 
2317         hci_update_passive_scan(hdev);
2318 
2319         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2320 }
2321 
2322 /* This function requires the caller holds hdev->lock */
2323 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2324 {
2325         struct hci_conn_params *params, *tmp;
2326 
2327         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2328                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2329                         continue;
2330 
2331                 /* If trying to establish one time connection to disabled
2332                  * device, leave the params, but mark them as just once.
2333                  */
2334                 if (params->explicit_connect) {
2335                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2336                         continue;
2337                 }
2338 
2339                 hci_conn_params_free(params);
2340         }
2341 
2342         BT_DBG("All LE disabled connection parameters were removed");
2343 }
2344 
2345 /* This function requires the caller holds hdev->lock */
2346 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2347 {
2348         struct hci_conn_params *params, *tmp;
2349 
2350         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2351                 hci_conn_params_free(params);
2352 
2353         BT_DBG("All LE connection parameters were removed");
2354 }
2355 
2356 /* Copy the Identity Address of the controller.
2357  *
2358  * If the controller has a public BD_ADDR, then by default use that one.
2359  * If this is a LE only controller without a public address, default to
2360  * the static random address.
2361  *
2362  * For debugging purposes it is possible to force controllers with a
2363  * public address to use the static random address instead.
2364  *
2365  * In case BR/EDR has been disabled on a dual-mode controller and
2366  * userspace has configured a static address, then that address
2367  * becomes the identity address instead of the public BR/EDR address.
2368  */
2369 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2370                                u8 *bdaddr_type)
2371 {
2372         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2373             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2374             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2375              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2376                 bacpy(bdaddr, &hdev->static_addr);
2377                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2378         } else {
2379                 bacpy(bdaddr, &hdev->bdaddr);
2380                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2381         }
2382 }
2383 
2384 static void hci_clear_wake_reason(struct hci_dev *hdev)
2385 {
2386         hci_dev_lock(hdev);
2387 
2388         hdev->wake_reason = 0;
2389         bacpy(&hdev->wake_addr, BDADDR_ANY);
2390         hdev->wake_addr_type = 0;
2391 
2392         hci_dev_unlock(hdev);
2393 }
2394 
2395 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2396                                 void *data)
2397 {
2398         struct hci_dev *hdev =
2399                 container_of(nb, struct hci_dev, suspend_notifier);
2400         int ret = 0;
2401 
2402         /* Userspace has full control of this device. Do nothing. */
2403         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2404                 return NOTIFY_DONE;
2405 
2406         /* To avoid a potential race with hci_unregister_dev. */
2407         hci_dev_hold(hdev);
2408 
2409         switch (action) {
2410         case PM_HIBERNATION_PREPARE:
2411         case PM_SUSPEND_PREPARE:
2412                 ret = hci_suspend_dev(hdev);
2413                 break;
2414         case PM_POST_HIBERNATION:
2415         case PM_POST_SUSPEND:
2416                 ret = hci_resume_dev(hdev);
2417                 break;
2418         }
2419 
2420         if (ret)
2421                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2422                            action, ret);
2423 
2424         hci_dev_put(hdev);
2425         return NOTIFY_DONE;
2426 }
2427 
2428 /* Alloc HCI device */
2429 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2430 {
2431         struct hci_dev *hdev;
2432         unsigned int alloc_size;
2433 
2434         alloc_size = sizeof(*hdev);
2435         if (sizeof_priv) {
2436                 /* Fixme: May need ALIGN-ment? */
2437                 alloc_size += sizeof_priv;
2438         }
2439 
2440         hdev = kzalloc(alloc_size, GFP_KERNEL);
2441         if (!hdev)
2442                 return NULL;
2443 
2444         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2445         hdev->esco_type = (ESCO_HV1);
2446         hdev->link_mode = (HCI_LM_ACCEPT);
2447         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2448         hdev->io_capability = 0x03;     /* No Input No Output */
2449         hdev->manufacturer = 0xffff;    /* Default to internal use */
2450         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2451         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2452         hdev->adv_instance_cnt = 0;
2453         hdev->cur_adv_instance = 0x00;
2454         hdev->adv_instance_timeout = 0;
2455 
2456         hdev->advmon_allowlist_duration = 300;
2457         hdev->advmon_no_filter_duration = 500;
2458         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
2459 
2460         hdev->sniff_max_interval = 800;
2461         hdev->sniff_min_interval = 80;
2462 
2463         hdev->le_adv_channel_map = 0x07;
2464         hdev->le_adv_min_interval = 0x0800;
2465         hdev->le_adv_max_interval = 0x0800;
2466         hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2467         hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2468         hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2469         hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2470         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2471         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2472         hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2473         hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2474         hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2475         hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2476         hdev->le_conn_min_interval = 0x0018;
2477         hdev->le_conn_max_interval = 0x0028;
2478         hdev->le_conn_latency = 0x0000;
2479         hdev->le_supv_timeout = 0x002a;
2480         hdev->le_def_tx_len = 0x001b;
2481         hdev->le_def_tx_time = 0x0148;
2482         hdev->le_max_tx_len = 0x001b;
2483         hdev->le_max_tx_time = 0x0148;
2484         hdev->le_max_rx_len = 0x001b;
2485         hdev->le_max_rx_time = 0x0148;
2486         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2487         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2488         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2489         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2490         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2491         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2492         hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2493         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2494         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2495 
2496         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2497         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2498         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2499         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2500         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2501         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2502 
2503         /* default 1.28 sec page scan */
2504         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2505         hdev->def_page_scan_int = 0x0800;
2506         hdev->def_page_scan_window = 0x0012;
2507 
2508         mutex_init(&hdev->lock);
2509         mutex_init(&hdev->req_lock);
2510 
2511         ida_init(&hdev->unset_handle_ida);
2512 
2513         INIT_LIST_HEAD(&hdev->mesh_pending);
2514         INIT_LIST_HEAD(&hdev->mgmt_pending);
2515         INIT_LIST_HEAD(&hdev->reject_list);
2516         INIT_LIST_HEAD(&hdev->accept_list);
2517         INIT_LIST_HEAD(&hdev->uuids);
2518         INIT_LIST_HEAD(&hdev->link_keys);
2519         INIT_LIST_HEAD(&hdev->long_term_keys);
2520         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2521         INIT_LIST_HEAD(&hdev->remote_oob_data);
2522         INIT_LIST_HEAD(&hdev->le_accept_list);
2523         INIT_LIST_HEAD(&hdev->le_resolv_list);
2524         INIT_LIST_HEAD(&hdev->le_conn_params);
2525         INIT_LIST_HEAD(&hdev->pend_le_conns);
2526         INIT_LIST_HEAD(&hdev->pend_le_reports);
2527         INIT_LIST_HEAD(&hdev->conn_hash.list);
2528         INIT_LIST_HEAD(&hdev->adv_instances);
2529         INIT_LIST_HEAD(&hdev->blocked_keys);
2530         INIT_LIST_HEAD(&hdev->monitored_devices);
2531 
2532         INIT_LIST_HEAD(&hdev->local_codecs);
2533         INIT_WORK(&hdev->rx_work, hci_rx_work);
2534         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2535         INIT_WORK(&hdev->tx_work, hci_tx_work);
2536         INIT_WORK(&hdev->power_on, hci_power_on);
2537         INIT_WORK(&hdev->error_reset, hci_error_reset);
2538 
2539         hci_cmd_sync_init(hdev);
2540 
2541         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2542 
2543         skb_queue_head_init(&hdev->rx_q);
2544         skb_queue_head_init(&hdev->cmd_q);
2545         skb_queue_head_init(&hdev->raw_q);
2546 
2547         init_waitqueue_head(&hdev->req_wait_q);
2548 
2549         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2550         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2551 
2552         hci_devcd_setup(hdev);
2553 
2554         hci_init_sysfs(hdev);
2555         discovery_init(hdev);
2556 
2557         return hdev;
2558 }
2559 EXPORT_SYMBOL(hci_alloc_dev_priv);
2560 
2561 /* Free HCI device */
2562 void hci_free_dev(struct hci_dev *hdev)
2563 {
2564         /* will free via device release */
2565         put_device(&hdev->dev);
2566 }
2567 EXPORT_SYMBOL(hci_free_dev);
2568 
2569 /* Register HCI device */
2570 int hci_register_dev(struct hci_dev *hdev)
2571 {
2572         int id, error;
2573 
2574         if (!hdev->open || !hdev->close || !hdev->send)
2575                 return -EINVAL;
2576 
2577         id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2578         if (id < 0)
2579                 return id;
2580 
2581         error = dev_set_name(&hdev->dev, "hci%u", id);
2582         if (error)
2583                 return error;
2584 
2585         hdev->name = dev_name(&hdev->dev);
2586         hdev->id = id;
2587 
2588         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2589 
2590         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2591         if (!hdev->workqueue) {
2592                 error = -ENOMEM;
2593                 goto err;
2594         }
2595 
2596         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2597                                                       hdev->name);
2598         if (!hdev->req_workqueue) {
2599                 destroy_workqueue(hdev->workqueue);
2600                 error = -ENOMEM;
2601                 goto err;
2602         }
2603 
2604         if (!IS_ERR_OR_NULL(bt_debugfs))
2605                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2606 
2607         error = device_add(&hdev->dev);
2608         if (error < 0)
2609                 goto err_wqueue;
2610 
2611         hci_leds_init(hdev);
2612 
2613         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2614                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2615                                     hdev);
2616         if (hdev->rfkill) {
2617                 if (rfkill_register(hdev->rfkill) < 0) {
2618                         rfkill_destroy(hdev->rfkill);
2619                         hdev->rfkill = NULL;
2620                 }
2621         }
2622 
2623         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2624                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2625 
2626         hci_dev_set_flag(hdev, HCI_SETUP);
2627         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2628 
2629         /* Assume BR/EDR support until proven otherwise (such as
2630          * through reading supported features during init.
2631          */
2632         hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2633 
2634         write_lock(&hci_dev_list_lock);
2635         list_add(&hdev->list, &hci_dev_list);
2636         write_unlock(&hci_dev_list_lock);
2637 
2638         /* Devices that are marked for raw-only usage are unconfigured
2639          * and should not be included in normal operation.
2640          */
2641         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2642                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2643 
2644         /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2645          * callback.
2646          */
2647         if (hdev->wakeup)
2648                 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2649 
2650         hci_sock_dev_event(hdev, HCI_DEV_REG);
2651         hci_dev_hold(hdev);
2652 
2653         error = hci_register_suspend_notifier(hdev);
2654         if (error)
2655                 BT_WARN("register suspend notifier failed error:%d\n", error);
2656 
2657         queue_work(hdev->req_workqueue, &hdev->power_on);
2658 
2659         idr_init(&hdev->adv_monitors_idr);
2660         msft_register(hdev);
2661 
2662         return id;
2663 
2664 err_wqueue:
2665         debugfs_remove_recursive(hdev->debugfs);
2666         destroy_workqueue(hdev->workqueue);
2667         destroy_workqueue(hdev->req_workqueue);
2668 err:
2669         ida_free(&hci_index_ida, hdev->id);
2670 
2671         return error;
2672 }
2673 EXPORT_SYMBOL(hci_register_dev);
2674 
2675 /* Unregister HCI device */
2676 void hci_unregister_dev(struct hci_dev *hdev)
2677 {
2678         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2679 
2680         mutex_lock(&hdev->unregister_lock);
2681         hci_dev_set_flag(hdev, HCI_UNREGISTER);
2682         mutex_unlock(&hdev->unregister_lock);
2683 
2684         write_lock(&hci_dev_list_lock);
2685         list_del(&hdev->list);
2686         write_unlock(&hci_dev_list_lock);
2687 
2688         cancel_work_sync(&hdev->rx_work);
2689         cancel_work_sync(&hdev->cmd_work);
2690         cancel_work_sync(&hdev->tx_work);
2691         cancel_work_sync(&hdev->power_on);
2692         cancel_work_sync(&hdev->error_reset);
2693 
2694         hci_cmd_sync_clear(hdev);
2695 
2696         hci_unregister_suspend_notifier(hdev);
2697 
2698         hci_dev_do_close(hdev);
2699 
2700         if (!test_bit(HCI_INIT, &hdev->flags) &&
2701             !hci_dev_test_flag(hdev, HCI_SETUP) &&
2702             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2703                 hci_dev_lock(hdev);
2704                 mgmt_index_removed(hdev);
2705                 hci_dev_unlock(hdev);
2706         }
2707 
2708         /* mgmt_index_removed should take care of emptying the
2709          * pending list */
2710         BUG_ON(!list_empty(&hdev->mgmt_pending));
2711 
2712         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2713 
2714         if (hdev->rfkill) {
2715                 rfkill_unregister(hdev->rfkill);
2716                 rfkill_destroy(hdev->rfkill);
2717         }
2718 
2719         device_del(&hdev->dev);
2720         /* Actual cleanup is deferred until hci_release_dev(). */
2721         hci_dev_put(hdev);
2722 }
2723 EXPORT_SYMBOL(hci_unregister_dev);
2724 
2725 /* Release HCI device */
2726 void hci_release_dev(struct hci_dev *hdev)
2727 {
2728         debugfs_remove_recursive(hdev->debugfs);
2729         kfree_const(hdev->hw_info);
2730         kfree_const(hdev->fw_info);
2731 
2732         destroy_workqueue(hdev->workqueue);
2733         destroy_workqueue(hdev->req_workqueue);
2734 
2735         hci_dev_lock(hdev);
2736         hci_bdaddr_list_clear(&hdev->reject_list);
2737         hci_bdaddr_list_clear(&hdev->accept_list);
2738         hci_uuids_clear(hdev);
2739         hci_link_keys_clear(hdev);
2740         hci_smp_ltks_clear(hdev);
2741         hci_smp_irks_clear(hdev);
2742         hci_remote_oob_data_clear(hdev);
2743         hci_adv_instances_clear(hdev);
2744         hci_adv_monitors_clear(hdev);
2745         hci_bdaddr_list_clear(&hdev->le_accept_list);
2746         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2747         hci_conn_params_clear_all(hdev);
2748         hci_discovery_filter_clear(hdev);
2749         hci_blocked_keys_clear(hdev);
2750         hci_codec_list_clear(&hdev->local_codecs);
2751         msft_release(hdev);
2752         hci_dev_unlock(hdev);
2753 
2754         ida_destroy(&hdev->unset_handle_ida);
2755         ida_free(&hci_index_ida, hdev->id);
2756         kfree_skb(hdev->sent_cmd);
2757         kfree_skb(hdev->req_skb);
2758         kfree_skb(hdev->recv_event);
2759         kfree(hdev);
2760 }
2761 EXPORT_SYMBOL(hci_release_dev);
2762 
2763 int hci_register_suspend_notifier(struct hci_dev *hdev)
2764 {
2765         int ret = 0;
2766 
2767         if (!hdev->suspend_notifier.notifier_call &&
2768             !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2769                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2770                 ret = register_pm_notifier(&hdev->suspend_notifier);
2771         }
2772 
2773         return ret;
2774 }
2775 
2776 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2777 {
2778         int ret = 0;
2779 
2780         if (hdev->suspend_notifier.notifier_call) {
2781                 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2782                 if (!ret)
2783                         hdev->suspend_notifier.notifier_call = NULL;
2784         }
2785 
2786         return ret;
2787 }
2788 
2789 /* Cancel ongoing command synchronously:
2790  *
2791  * - Cancel command timer
2792  * - Reset command counter
2793  * - Cancel command request
2794  */
2795 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2796 {
2797         bt_dev_dbg(hdev, "err 0x%2.2x", err);
2798 
2799         cancel_delayed_work_sync(&hdev->cmd_timer);
2800         cancel_delayed_work_sync(&hdev->ncmd_timer);
2801         atomic_set(&hdev->cmd_cnt, 1);
2802 
2803         hci_cmd_sync_cancel_sync(hdev, err);
2804 }
2805 
2806 /* Suspend HCI device */
2807 int hci_suspend_dev(struct hci_dev *hdev)
2808 {
2809         int ret;
2810 
2811         bt_dev_dbg(hdev, "");
2812 
2813         /* Suspend should only act on when powered. */
2814         if (!hdev_is_powered(hdev) ||
2815             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2816                 return 0;
2817 
2818         /* If powering down don't attempt to suspend */
2819         if (mgmt_powering_down(hdev))
2820                 return 0;
2821 
2822         /* Cancel potentially blocking sync operation before suspend */
2823         hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2824 
2825         hci_req_sync_lock(hdev);
2826         ret = hci_suspend_sync(hdev);
2827         hci_req_sync_unlock(hdev);
2828 
2829         hci_clear_wake_reason(hdev);
2830         mgmt_suspending(hdev, hdev->suspend_state);
2831 
2832         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2833         return ret;
2834 }
2835 EXPORT_SYMBOL(hci_suspend_dev);
2836 
2837 /* Resume HCI device */
2838 int hci_resume_dev(struct hci_dev *hdev)
2839 {
2840         int ret;
2841 
2842         bt_dev_dbg(hdev, "");
2843 
2844         /* Resume should only act on when powered. */
2845         if (!hdev_is_powered(hdev) ||
2846             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2847                 return 0;
2848 
2849         /* If powering down don't attempt to resume */
2850         if (mgmt_powering_down(hdev))
2851                 return 0;
2852 
2853         hci_req_sync_lock(hdev);
2854         ret = hci_resume_sync(hdev);
2855         hci_req_sync_unlock(hdev);
2856 
2857         mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2858                       hdev->wake_addr_type);
2859 
2860         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2861         return ret;
2862 }
2863 EXPORT_SYMBOL(hci_resume_dev);
2864 
2865 /* Reset HCI device */
2866 int hci_reset_dev(struct hci_dev *hdev)
2867 {
2868         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2869         struct sk_buff *skb;
2870 
2871         skb = bt_skb_alloc(3, GFP_ATOMIC);
2872         if (!skb)
2873                 return -ENOMEM;
2874 
2875         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2876         skb_put_data(skb, hw_err, 3);
2877 
2878         bt_dev_err(hdev, "Injecting HCI hardware error event");
2879 
2880         /* Send Hardware Error to upper stack */
2881         return hci_recv_frame(hdev, skb);
2882 }
2883 EXPORT_SYMBOL(hci_reset_dev);
2884 
2885 static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
2886 {
2887         if (hdev->classify_pkt_type)
2888                 return hdev->classify_pkt_type(hdev, skb);
2889 
2890         return hci_skb_pkt_type(skb);
2891 }
2892 
2893 /* Receive frame from HCI drivers */
2894 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2895 {
2896         u8 dev_pkt_type;
2897 
2898         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2899                       && !test_bit(HCI_INIT, &hdev->flags))) {
2900                 kfree_skb(skb);
2901                 return -ENXIO;
2902         }
2903 
2904         /* Check if the driver agree with packet type classification */
2905         dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
2906         if (hci_skb_pkt_type(skb) != dev_pkt_type) {
2907                 hci_skb_pkt_type(skb) = dev_pkt_type;
2908         }
2909 
2910         switch (hci_skb_pkt_type(skb)) {
2911         case HCI_EVENT_PKT:
2912                 break;
2913         case HCI_ACLDATA_PKT:
2914                 /* Detect if ISO packet has been sent as ACL */
2915                 if (hci_conn_num(hdev, ISO_LINK)) {
2916                         __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2917                         __u8 type;
2918 
2919                         type = hci_conn_lookup_type(hdev, hci_handle(handle));
2920                         if (type == ISO_LINK)
2921                                 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2922                 }
2923                 break;
2924         case HCI_SCODATA_PKT:
2925                 break;
2926         case HCI_ISODATA_PKT:
2927                 break;
2928         default:
2929                 kfree_skb(skb);
2930                 return -EINVAL;
2931         }
2932 
2933         /* Incoming skb */
2934         bt_cb(skb)->incoming = 1;
2935 
2936         /* Time stamp */
2937         __net_timestamp(skb);
2938 
2939         skb_queue_tail(&hdev->rx_q, skb);
2940         queue_work(hdev->workqueue, &hdev->rx_work);
2941 
2942         return 0;
2943 }
2944 EXPORT_SYMBOL(hci_recv_frame);
2945 
2946 /* Receive diagnostic message from HCI drivers */
2947 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2948 {
2949         /* Mark as diagnostic packet */
2950         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2951 
2952         /* Time stamp */
2953         __net_timestamp(skb);
2954 
2955         skb_queue_tail(&hdev->rx_q, skb);
2956         queue_work(hdev->workqueue, &hdev->rx_work);
2957 
2958         return 0;
2959 }
2960 EXPORT_SYMBOL(hci_recv_diag);
2961 
2962 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2963 {
2964         va_list vargs;
2965 
2966         va_start(vargs, fmt);
2967         kfree_const(hdev->hw_info);
2968         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2969         va_end(vargs);
2970 }
2971 EXPORT_SYMBOL(hci_set_hw_info);
2972 
2973 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2974 {
2975         va_list vargs;
2976 
2977         va_start(vargs, fmt);
2978         kfree_const(hdev->fw_info);
2979         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2980         va_end(vargs);
2981 }
2982 EXPORT_SYMBOL(hci_set_fw_info);
2983 
2984 /* ---- Interface to upper protocols ---- */
2985 
2986 int hci_register_cb(struct hci_cb *cb)
2987 {
2988         BT_DBG("%p name %s", cb, cb->name);
2989 
2990         mutex_lock(&hci_cb_list_lock);
2991         list_add_tail(&cb->list, &hci_cb_list);
2992         mutex_unlock(&hci_cb_list_lock);
2993 
2994         return 0;
2995 }
2996 EXPORT_SYMBOL(hci_register_cb);
2997 
2998 int hci_unregister_cb(struct hci_cb *cb)
2999 {
3000         BT_DBG("%p name %s", cb, cb->name);
3001 
3002         mutex_lock(&hci_cb_list_lock);
3003         list_del(&cb->list);
3004         mutex_unlock(&hci_cb_list_lock);
3005 
3006         return 0;
3007 }
3008 EXPORT_SYMBOL(hci_unregister_cb);
3009 
3010 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3011 {
3012         int err;
3013 
3014         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3015                skb->len);
3016 
3017         /* Time stamp */
3018         __net_timestamp(skb);
3019 
3020         /* Send copy to monitor */
3021         hci_send_to_monitor(hdev, skb);
3022 
3023         if (atomic_read(&hdev->promisc)) {
3024                 /* Send copy to the sockets */
3025                 hci_send_to_sock(hdev, skb);
3026         }
3027 
3028         /* Get rid of skb owner, prior to sending to the driver. */
3029         skb_orphan(skb);
3030 
3031         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3032                 kfree_skb(skb);
3033                 return -EINVAL;
3034         }
3035 
3036         err = hdev->send(hdev, skb);
3037         if (err < 0) {
3038                 bt_dev_err(hdev, "sending frame failed (%d)", err);
3039                 kfree_skb(skb);
3040                 return err;
3041         }
3042 
3043         return 0;
3044 }
3045 
3046 /* Send HCI command */
3047 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3048                  const void *param)
3049 {
3050         struct sk_buff *skb;
3051 
3052         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3053 
3054         skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3055         if (!skb) {
3056                 bt_dev_err(hdev, "no memory for command");
3057                 return -ENOMEM;
3058         }
3059 
3060         /* Stand-alone HCI commands must be flagged as
3061          * single-command requests.
3062          */
3063         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3064 
3065         skb_queue_tail(&hdev->cmd_q, skb);
3066         queue_work(hdev->workqueue, &hdev->cmd_work);
3067 
3068         return 0;
3069 }
3070 
3071 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3072                    const void *param)
3073 {
3074         struct sk_buff *skb;
3075 
3076         if (hci_opcode_ogf(opcode) != 0x3f) {
3077                 /* A controller receiving a command shall respond with either
3078                  * a Command Status Event or a Command Complete Event.
3079                  * Therefore, all standard HCI commands must be sent via the
3080                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3081                  * Some vendors do not comply with this rule for vendor-specific
3082                  * commands and do not return any event. We want to support
3083                  * unresponded commands for such cases only.
3084                  */
3085                 bt_dev_err(hdev, "unresponded command not supported");
3086                 return -EINVAL;
3087         }
3088 
3089         skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3090         if (!skb) {
3091                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3092                            opcode);
3093                 return -ENOMEM;
3094         }
3095 
3096         hci_send_frame(hdev, skb);
3097 
3098         return 0;
3099 }
3100 EXPORT_SYMBOL(__hci_cmd_send);
3101 
3102 /* Get data from the previously sent command */
3103 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3104 {
3105         struct hci_command_hdr *hdr;
3106 
3107         if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3108                 return NULL;
3109 
3110         hdr = (void *)skb->data;
3111 
3112         if (hdr->opcode != cpu_to_le16(opcode))
3113                 return NULL;
3114 
3115         return skb->data + HCI_COMMAND_HDR_SIZE;
3116 }
3117 
3118 /* Get data from the previously sent command */
3119 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3120 {
3121         void *data;
3122 
3123         /* Check if opcode matches last sent command */
3124         data = hci_cmd_data(hdev->sent_cmd, opcode);
3125         if (!data)
3126                 /* Check if opcode matches last request */
3127                 data = hci_cmd_data(hdev->req_skb, opcode);
3128 
3129         return data;
3130 }
3131 
3132 /* Get data from last received event */
3133 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3134 {
3135         struct hci_event_hdr *hdr;
3136         int offset;
3137 
3138         if (!hdev->recv_event)
3139                 return NULL;
3140 
3141         hdr = (void *)hdev->recv_event->data;
3142         offset = sizeof(*hdr);
3143 
3144         if (hdr->evt != event) {
3145                 /* In case of LE metaevent check the subevent match */
3146                 if (hdr->evt == HCI_EV_LE_META) {
3147                         struct hci_ev_le_meta *ev;
3148 
3149                         ev = (void *)hdev->recv_event->data + offset;
3150                         offset += sizeof(*ev);
3151                         if (ev->subevent == event)
3152                                 goto found;
3153                 }
3154                 return NULL;
3155         }
3156 
3157 found:
3158         bt_dev_dbg(hdev, "event 0x%2.2x", event);
3159 
3160         return hdev->recv_event->data + offset;
3161 }
3162 
3163 /* Send ACL data */
3164 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3165 {
3166         struct hci_acl_hdr *hdr;
3167         int len = skb->len;
3168 
3169         skb_push(skb, HCI_ACL_HDR_SIZE);
3170         skb_reset_transport_header(skb);
3171         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3172         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3173         hdr->dlen   = cpu_to_le16(len);
3174 }
3175 
3176 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3177                           struct sk_buff *skb, __u16 flags)
3178 {
3179         struct hci_conn *conn = chan->conn;
3180         struct hci_dev *hdev = conn->hdev;
3181         struct sk_buff *list;
3182 
3183         skb->len = skb_headlen(skb);
3184         skb->data_len = 0;
3185 
3186         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3187 
3188         hci_add_acl_hdr(skb, conn->handle, flags);
3189 
3190         list = skb_shinfo(skb)->frag_list;
3191         if (!list) {
3192                 /* Non fragmented */
3193                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3194 
3195                 skb_queue_tail(queue, skb);
3196         } else {
3197                 /* Fragmented */
3198                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3199 
3200                 skb_shinfo(skb)->frag_list = NULL;
3201 
3202                 /* Queue all fragments atomically. We need to use spin_lock_bh
3203                  * here because of 6LoWPAN links, as there this function is
3204                  * called from softirq and using normal spin lock could cause
3205                  * deadlocks.
3206                  */
3207                 spin_lock_bh(&queue->lock);
3208 
3209                 __skb_queue_tail(queue, skb);
3210 
3211                 flags &= ~ACL_START;
3212                 flags |= ACL_CONT;
3213                 do {
3214                         skb = list; list = list->next;
3215 
3216                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3217                         hci_add_acl_hdr(skb, conn->handle, flags);
3218 
3219                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3220 
3221                         __skb_queue_tail(queue, skb);
3222                 } while (list);
3223 
3224                 spin_unlock_bh(&queue->lock);
3225         }
3226 }
3227 
3228 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3229 {
3230         struct hci_dev *hdev = chan->conn->hdev;
3231 
3232         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3233 
3234         hci_queue_acl(chan, &chan->data_q, skb, flags);
3235 
3236         queue_work(hdev->workqueue, &hdev->tx_work);
3237 }
3238 
3239 /* Send SCO data */
3240 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3241 {
3242         struct hci_dev *hdev = conn->hdev;
3243         struct hci_sco_hdr hdr;
3244 
3245         BT_DBG("%s len %d", hdev->name, skb->len);
3246 
3247         hdr.handle = cpu_to_le16(conn->handle);
3248         hdr.dlen   = skb->len;
3249 
3250         skb_push(skb, HCI_SCO_HDR_SIZE);
3251         skb_reset_transport_header(skb);
3252         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3253 
3254         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3255 
3256         skb_queue_tail(&conn->data_q, skb);
3257         queue_work(hdev->workqueue, &hdev->tx_work);
3258 }
3259 
3260 /* Send ISO data */
3261 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3262 {
3263         struct hci_iso_hdr *hdr;
3264         int len = skb->len;
3265 
3266         skb_push(skb, HCI_ISO_HDR_SIZE);
3267         skb_reset_transport_header(skb);
3268         hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3269         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3270         hdr->dlen   = cpu_to_le16(len);
3271 }
3272 
3273 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3274                           struct sk_buff *skb)
3275 {
3276         struct hci_dev *hdev = conn->hdev;
3277         struct sk_buff *list;
3278         __u16 flags;
3279 
3280         skb->len = skb_headlen(skb);
3281         skb->data_len = 0;
3282 
3283         hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3284 
3285         list = skb_shinfo(skb)->frag_list;
3286 
3287         flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3288         hci_add_iso_hdr(skb, conn->handle, flags);
3289 
3290         if (!list) {
3291                 /* Non fragmented */
3292                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3293 
3294                 skb_queue_tail(queue, skb);
3295         } else {
3296                 /* Fragmented */
3297                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3298 
3299                 skb_shinfo(skb)->frag_list = NULL;
3300 
3301                 __skb_queue_tail(queue, skb);
3302 
3303                 do {
3304                         skb = list; list = list->next;
3305 
3306                         hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3307                         flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3308                                                    0x00);
3309                         hci_add_iso_hdr(skb, conn->handle, flags);
3310 
3311                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3312 
3313                         __skb_queue_tail(queue, skb);
3314                 } while (list);
3315         }
3316 }
3317 
3318 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3319 {
3320         struct hci_dev *hdev = conn->hdev;
3321 
3322         BT_DBG("%s len %d", hdev->name, skb->len);
3323 
3324         hci_queue_iso(conn, &conn->data_q, skb);
3325 
3326         queue_work(hdev->workqueue, &hdev->tx_work);
3327 }
3328 
3329 /* ---- HCI TX task (outgoing data) ---- */
3330 
3331 /* HCI Connection scheduler */
3332 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3333 {
3334         struct hci_dev *hdev;
3335         int cnt, q;
3336 
3337         if (!conn) {
3338                 *quote = 0;
3339                 return;
3340         }
3341 
3342         hdev = conn->hdev;
3343 
3344         switch (conn->type) {
3345         case ACL_LINK:
3346                 cnt = hdev->acl_cnt;
3347                 break;
3348         case SCO_LINK:
3349         case ESCO_LINK:
3350                 cnt = hdev->sco_cnt;
3351                 break;
3352         case LE_LINK:
3353                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3354                 break;
3355         case ISO_LINK:
3356                 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3357                         hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3358                 break;
3359         default:
3360                 cnt = 0;
3361                 bt_dev_err(hdev, "unknown link type %d", conn->type);
3362         }
3363 
3364         q = cnt / num;
3365         *quote = q ? q : 1;
3366 }
3367 
3368 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3369                                      int *quote)
3370 {
3371         struct hci_conn_hash *h = &hdev->conn_hash;
3372         struct hci_conn *conn = NULL, *c;
3373         unsigned int num = 0, min = ~0;
3374 
3375         /* We don't have to lock device here. Connections are always
3376          * added and removed with TX task disabled. */
3377 
3378         rcu_read_lock();
3379 
3380         list_for_each_entry_rcu(c, &h->list, list) {
3381                 if (c->type != type || skb_queue_empty(&c->data_q))
3382                         continue;
3383 
3384                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3385                         continue;
3386 
3387                 num++;
3388 
3389                 if (c->sent < min) {
3390                         min  = c->sent;
3391                         conn = c;
3392                 }
3393 
3394                 if (hci_conn_num(hdev, type) == num)
3395                         break;
3396         }
3397 
3398         rcu_read_unlock();
3399 
3400         hci_quote_sent(conn, num, quote);
3401 
3402         BT_DBG("conn %p quote %d", conn, *quote);
3403         return conn;
3404 }
3405 
3406 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3407 {
3408         struct hci_conn_hash *h = &hdev->conn_hash;
3409         struct hci_conn *c;
3410 
3411         bt_dev_err(hdev, "link tx timeout");
3412 
3413         rcu_read_lock();
3414 
3415         /* Kill stalled connections */
3416         list_for_each_entry_rcu(c, &h->list, list) {
3417                 if (c->type == type && c->sent) {
3418                         bt_dev_err(hdev, "killing stalled connection %pMR",
3419                                    &c->dst);
3420                         /* hci_disconnect might sleep, so, we have to release
3421                          * the RCU read lock before calling it.
3422                          */
3423                         rcu_read_unlock();
3424                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3425                         rcu_read_lock();
3426                 }
3427         }
3428 
3429         rcu_read_unlock();
3430 }
3431 
3432 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3433                                       int *quote)
3434 {
3435         struct hci_conn_hash *h = &hdev->conn_hash;
3436         struct hci_chan *chan = NULL;
3437         unsigned int num = 0, min = ~0, cur_prio = 0;
3438         struct hci_conn *conn;
3439         int conn_num = 0;
3440 
3441         BT_DBG("%s", hdev->name);
3442 
3443         rcu_read_lock();
3444 
3445         list_for_each_entry_rcu(conn, &h->list, list) {
3446                 struct hci_chan *tmp;
3447 
3448                 if (conn->type != type)
3449                         continue;
3450 
3451                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3452                         continue;
3453 
3454                 conn_num++;
3455 
3456                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3457                         struct sk_buff *skb;
3458 
3459                         if (skb_queue_empty(&tmp->data_q))
3460                                 continue;
3461 
3462                         skb = skb_peek(&tmp->data_q);
3463                         if (skb->priority < cur_prio)
3464                                 continue;
3465 
3466                         if (skb->priority > cur_prio) {
3467                                 num = 0;
3468                                 min = ~0;
3469                                 cur_prio = skb->priority;
3470                         }
3471 
3472                         num++;
3473 
3474                         if (conn->sent < min) {
3475                                 min  = conn->sent;
3476                                 chan = tmp;
3477                         }
3478                 }
3479 
3480                 if (hci_conn_num(hdev, type) == conn_num)
3481                         break;
3482         }
3483 
3484         rcu_read_unlock();
3485 
3486         if (!chan)
3487                 return NULL;
3488 
3489         hci_quote_sent(chan->conn, num, quote);
3490 
3491         BT_DBG("chan %p quote %d", chan, *quote);
3492         return chan;
3493 }
3494 
3495 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3496 {
3497         struct hci_conn_hash *h = &hdev->conn_hash;
3498         struct hci_conn *conn;
3499         int num = 0;
3500 
3501         BT_DBG("%s", hdev->name);
3502 
3503         rcu_read_lock();
3504 
3505         list_for_each_entry_rcu(conn, &h->list, list) {
3506                 struct hci_chan *chan;
3507 
3508                 if (conn->type != type)
3509                         continue;
3510 
3511                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3512                         continue;
3513 
3514                 num++;
3515 
3516                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3517                         struct sk_buff *skb;
3518 
3519                         if (chan->sent) {
3520                                 chan->sent = 0;
3521                                 continue;
3522                         }
3523 
3524                         if (skb_queue_empty(&chan->data_q))
3525                                 continue;
3526 
3527                         skb = skb_peek(&chan->data_q);
3528                         if (skb->priority >= HCI_PRIO_MAX - 1)
3529                                 continue;
3530 
3531                         skb->priority = HCI_PRIO_MAX - 1;
3532 
3533                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3534                                skb->priority);
3535                 }
3536 
3537                 if (hci_conn_num(hdev, type) == num)
3538                         break;
3539         }
3540 
3541         rcu_read_unlock();
3542 
3543 }
3544 
3545 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3546 {
3547         unsigned long last_tx;
3548 
3549         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3550                 return;
3551 
3552         switch (type) {
3553         case LE_LINK:
3554                 last_tx = hdev->le_last_tx;
3555                 break;
3556         default:
3557                 last_tx = hdev->acl_last_tx;
3558                 break;
3559         }
3560 
3561         /* tx timeout must be longer than maximum link supervision timeout
3562          * (40.9 seconds)
3563          */
3564         if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3565                 hci_link_tx_to(hdev, type);
3566 }
3567 
3568 /* Schedule SCO */
3569 static void hci_sched_sco(struct hci_dev *hdev)
3570 {
3571         struct hci_conn *conn;
3572         struct sk_buff *skb;
3573         int quote;
3574 
3575         BT_DBG("%s", hdev->name);
3576 
3577         if (!hci_conn_num(hdev, SCO_LINK))
3578                 return;
3579 
3580         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3581                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3582                         BT_DBG("skb %p len %d", skb, skb->len);
3583                         hci_send_frame(hdev, skb);
3584 
3585                         conn->sent++;
3586                         if (conn->sent == ~0)
3587                                 conn->sent = 0;
3588                 }
3589         }
3590 }
3591 
3592 static void hci_sched_esco(struct hci_dev *hdev)
3593 {
3594         struct hci_conn *conn;
3595         struct sk_buff *skb;
3596         int quote;
3597 
3598         BT_DBG("%s", hdev->name);
3599 
3600         if (!hci_conn_num(hdev, ESCO_LINK))
3601                 return;
3602 
3603         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3604                                                      &quote))) {
3605                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3606                         BT_DBG("skb %p len %d", skb, skb->len);
3607                         hci_send_frame(hdev, skb);
3608 
3609                         conn->sent++;
3610                         if (conn->sent == ~0)
3611                                 conn->sent = 0;
3612                 }
3613         }
3614 }
3615 
3616 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3617 {
3618         unsigned int cnt = hdev->acl_cnt;
3619         struct hci_chan *chan;
3620         struct sk_buff *skb;
3621         int quote;
3622 
3623         __check_timeout(hdev, cnt, ACL_LINK);
3624 
3625         while (hdev->acl_cnt &&
3626                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3627                 u32 priority = (skb_peek(&chan->data_q))->priority;
3628                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3629                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3630                                skb->len, skb->priority);
3631 
3632                         /* Stop if priority has changed */
3633                         if (skb->priority < priority)
3634                                 break;
3635 
3636                         skb = skb_dequeue(&chan->data_q);
3637 
3638                         hci_conn_enter_active_mode(chan->conn,
3639                                                    bt_cb(skb)->force_active);
3640 
3641                         hci_send_frame(hdev, skb);
3642                         hdev->acl_last_tx = jiffies;
3643 
3644                         hdev->acl_cnt--;
3645                         chan->sent++;
3646                         chan->conn->sent++;
3647 
3648                         /* Send pending SCO packets right away */
3649                         hci_sched_sco(hdev);
3650                         hci_sched_esco(hdev);
3651                 }
3652         }
3653 
3654         if (cnt != hdev->acl_cnt)
3655                 hci_prio_recalculate(hdev, ACL_LINK);
3656 }
3657 
3658 static void hci_sched_acl(struct hci_dev *hdev)
3659 {
3660         BT_DBG("%s", hdev->name);
3661 
3662         /* No ACL link over BR/EDR controller */
3663         if (!hci_conn_num(hdev, ACL_LINK))
3664                 return;
3665 
3666         hci_sched_acl_pkt(hdev);
3667 }
3668 
3669 static void hci_sched_le(struct hci_dev *hdev)
3670 {
3671         struct hci_chan *chan;
3672         struct sk_buff *skb;
3673         int quote, *cnt, tmp;
3674 
3675         BT_DBG("%s", hdev->name);
3676 
3677         if (!hci_conn_num(hdev, LE_LINK))
3678                 return;
3679 
3680         cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3681 
3682         __check_timeout(hdev, *cnt, LE_LINK);
3683 
3684         tmp = *cnt;
3685         while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3686                 u32 priority = (skb_peek(&chan->data_q))->priority;
3687                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3688                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3689                                skb->len, skb->priority);
3690 
3691                         /* Stop if priority has changed */
3692                         if (skb->priority < priority)
3693                                 break;
3694 
3695                         skb = skb_dequeue(&chan->data_q);
3696 
3697                         hci_send_frame(hdev, skb);
3698                         hdev->le_last_tx = jiffies;
3699 
3700                         (*cnt)--;
3701                         chan->sent++;
3702                         chan->conn->sent++;
3703 
3704                         /* Send pending SCO packets right away */
3705                         hci_sched_sco(hdev);
3706                         hci_sched_esco(hdev);
3707                 }
3708         }
3709 
3710         if (*cnt != tmp)
3711                 hci_prio_recalculate(hdev, LE_LINK);
3712 }
3713 
3714 /* Schedule CIS */
3715 static void hci_sched_iso(struct hci_dev *hdev)
3716 {
3717         struct hci_conn *conn;
3718         struct sk_buff *skb;
3719         int quote, *cnt;
3720 
3721         BT_DBG("%s", hdev->name);
3722 
3723         if (!hci_conn_num(hdev, ISO_LINK))
3724                 return;
3725 
3726         cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3727                 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3728         while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3729                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3730                         BT_DBG("skb %p len %d", skb, skb->len);
3731                         hci_send_frame(hdev, skb);
3732 
3733                         conn->sent++;
3734                         if (conn->sent == ~0)
3735                                 conn->sent = 0;
3736                         (*cnt)--;
3737                 }
3738         }
3739 }
3740 
3741 static void hci_tx_work(struct work_struct *work)
3742 {
3743         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3744         struct sk_buff *skb;
3745 
3746         BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3747                hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3748 
3749         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3750                 /* Schedule queues and send stuff to HCI driver */
3751                 hci_sched_sco(hdev);
3752                 hci_sched_esco(hdev);
3753                 hci_sched_iso(hdev);
3754                 hci_sched_acl(hdev);
3755                 hci_sched_le(hdev);
3756         }
3757 
3758         /* Send next queued raw (unknown type) packet */
3759         while ((skb = skb_dequeue(&hdev->raw_q)))
3760                 hci_send_frame(hdev, skb);
3761 }
3762 
3763 /* ----- HCI RX task (incoming data processing) ----- */
3764 
3765 /* ACL data packet */
3766 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3767 {
3768         struct hci_acl_hdr *hdr = (void *) skb->data;
3769         struct hci_conn *conn;
3770         __u16 handle, flags;
3771 
3772         skb_pull(skb, HCI_ACL_HDR_SIZE);
3773 
3774         handle = __le16_to_cpu(hdr->handle);
3775         flags  = hci_flags(handle);
3776         handle = hci_handle(handle);
3777 
3778         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3779                handle, flags);
3780 
3781         hdev->stat.acl_rx++;
3782 
3783         hci_dev_lock(hdev);
3784         conn = hci_conn_hash_lookup_handle(hdev, handle);
3785         if (conn && hci_dev_test_flag(hdev, HCI_MGMT))
3786                 mgmt_device_connected(hdev, conn, NULL, 0);
3787         hci_dev_unlock(hdev);
3788 
3789         if (conn) {
3790                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3791 
3792                 /* Send to upper protocol */
3793                 l2cap_recv_acldata(conn, skb, flags);
3794                 return;
3795         } else {
3796                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3797                            handle);
3798         }
3799 
3800         kfree_skb(skb);
3801 }
3802 
3803 /* SCO data packet */
3804 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3805 {
3806         struct hci_sco_hdr *hdr = (void *) skb->data;
3807         struct hci_conn *conn;
3808         __u16 handle, flags;
3809 
3810         skb_pull(skb, HCI_SCO_HDR_SIZE);
3811 
3812         handle = __le16_to_cpu(hdr->handle);
3813         flags  = hci_flags(handle);
3814         handle = hci_handle(handle);
3815 
3816         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3817                handle, flags);
3818 
3819         hdev->stat.sco_rx++;
3820 
3821         hci_dev_lock(hdev);
3822         conn = hci_conn_hash_lookup_handle(hdev, handle);
3823         hci_dev_unlock(hdev);
3824 
3825         if (conn) {
3826                 /* Send to upper protocol */
3827                 hci_skb_pkt_status(skb) = flags & 0x03;
3828                 sco_recv_scodata(conn, skb);
3829                 return;
3830         } else {
3831                 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3832                                        handle);
3833         }
3834 
3835         kfree_skb(skb);
3836 }
3837 
3838 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3839 {
3840         struct hci_iso_hdr *hdr;
3841         struct hci_conn *conn;
3842         __u16 handle, flags;
3843 
3844         hdr = skb_pull_data(skb, sizeof(*hdr));
3845         if (!hdr) {
3846                 bt_dev_err(hdev, "ISO packet too small");
3847                 goto drop;
3848         }
3849 
3850         handle = __le16_to_cpu(hdr->handle);
3851         flags  = hci_flags(handle);
3852         handle = hci_handle(handle);
3853 
3854         bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3855                    handle, flags);
3856 
3857         hci_dev_lock(hdev);
3858         conn = hci_conn_hash_lookup_handle(hdev, handle);
3859         hci_dev_unlock(hdev);
3860 
3861         if (!conn) {
3862                 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3863                            handle);
3864                 goto drop;
3865         }
3866 
3867         /* Send to upper protocol */
3868         iso_recv(conn, skb, flags);
3869         return;
3870 
3871 drop:
3872         kfree_skb(skb);
3873 }
3874 
3875 static bool hci_req_is_complete(struct hci_dev *hdev)
3876 {
3877         struct sk_buff *skb;
3878 
3879         skb = skb_peek(&hdev->cmd_q);
3880         if (!skb)
3881                 return true;
3882 
3883         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3884 }
3885 
3886 static void hci_resend_last(struct hci_dev *hdev)
3887 {
3888         struct hci_command_hdr *sent;
3889         struct sk_buff *skb;
3890         u16 opcode;
3891 
3892         if (!hdev->sent_cmd)
3893                 return;
3894 
3895         sent = (void *) hdev->sent_cmd->data;
3896         opcode = __le16_to_cpu(sent->opcode);
3897         if (opcode == HCI_OP_RESET)
3898                 return;
3899 
3900         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3901         if (!skb)
3902                 return;
3903 
3904         skb_queue_head(&hdev->cmd_q, skb);
3905         queue_work(hdev->workqueue, &hdev->cmd_work);
3906 }
3907 
3908 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3909                           hci_req_complete_t *req_complete,
3910                           hci_req_complete_skb_t *req_complete_skb)
3911 {
3912         struct sk_buff *skb;
3913         unsigned long flags;
3914 
3915         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3916 
3917         /* If the completed command doesn't match the last one that was
3918          * sent we need to do special handling of it.
3919          */
3920         if (!hci_sent_cmd_data(hdev, opcode)) {
3921                 /* Some CSR based controllers generate a spontaneous
3922                  * reset complete event during init and any pending
3923                  * command will never be completed. In such a case we
3924                  * need to resend whatever was the last sent
3925                  * command.
3926                  */
3927                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3928                         hci_resend_last(hdev);
3929 
3930                 return;
3931         }
3932 
3933         /* If we reach this point this event matches the last command sent */
3934         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3935 
3936         /* If the command succeeded and there's still more commands in
3937          * this request the request is not yet complete.
3938          */
3939         if (!status && !hci_req_is_complete(hdev))
3940                 return;
3941 
3942         skb = hdev->req_skb;
3943 
3944         /* If this was the last command in a request the complete
3945          * callback would be found in hdev->req_skb instead of the
3946          * command queue (hdev->cmd_q).
3947          */
3948         if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3949                 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3950                 return;
3951         }
3952 
3953         if (skb && bt_cb(skb)->hci.req_complete) {
3954                 *req_complete = bt_cb(skb)->hci.req_complete;
3955                 return;
3956         }
3957 
3958         /* Remove all pending commands belonging to this request */
3959         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3960         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3961                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3962                         __skb_queue_head(&hdev->cmd_q, skb);
3963                         break;
3964                 }
3965 
3966                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3967                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3968                 else
3969                         *req_complete = bt_cb(skb)->hci.req_complete;
3970                 dev_kfree_skb_irq(skb);
3971         }
3972         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3973 }
3974 
3975 static void hci_rx_work(struct work_struct *work)
3976 {
3977         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3978         struct sk_buff *skb;
3979 
3980         BT_DBG("%s", hdev->name);
3981 
3982         /* The kcov_remote functions used for collecting packet parsing
3983          * coverage information from this background thread and associate
3984          * the coverage with the syscall's thread which originally injected
3985          * the packet. This helps fuzzing the kernel.
3986          */
3987         for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3988                 kcov_remote_start_common(skb_get_kcov_handle(skb));
3989 
3990                 /* Send copy to monitor */
3991                 hci_send_to_monitor(hdev, skb);
3992 
3993                 if (atomic_read(&hdev->promisc)) {
3994                         /* Send copy to the sockets */
3995                         hci_send_to_sock(hdev, skb);
3996                 }
3997 
3998                 /* If the device has been opened in HCI_USER_CHANNEL,
3999                  * the userspace has exclusive access to device.
4000                  * When device is HCI_INIT, we still need to process
4001                  * the data packets to the driver in order
4002                  * to complete its setup().
4003                  */
4004                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4005                     !test_bit(HCI_INIT, &hdev->flags)) {
4006                         kfree_skb(skb);
4007                         continue;
4008                 }
4009 
4010                 if (test_bit(HCI_INIT, &hdev->flags)) {
4011                         /* Don't process data packets in this states. */
4012                         switch (hci_skb_pkt_type(skb)) {
4013                         case HCI_ACLDATA_PKT:
4014                         case HCI_SCODATA_PKT:
4015                         case HCI_ISODATA_PKT:
4016                                 kfree_skb(skb);
4017                                 continue;
4018                         }
4019                 }
4020 
4021                 /* Process frame */
4022                 switch (hci_skb_pkt_type(skb)) {
4023                 case HCI_EVENT_PKT:
4024                         BT_DBG("%s Event packet", hdev->name);
4025                         hci_event_packet(hdev, skb);
4026                         break;
4027 
4028                 case HCI_ACLDATA_PKT:
4029                         BT_DBG("%s ACL data packet", hdev->name);
4030                         hci_acldata_packet(hdev, skb);
4031                         break;
4032 
4033                 case HCI_SCODATA_PKT:
4034                         BT_DBG("%s SCO data packet", hdev->name);
4035                         hci_scodata_packet(hdev, skb);
4036                         break;
4037 
4038                 case HCI_ISODATA_PKT:
4039                         BT_DBG("%s ISO data packet", hdev->name);
4040                         hci_isodata_packet(hdev, skb);
4041                         break;
4042 
4043                 default:
4044                         kfree_skb(skb);
4045                         break;
4046                 }
4047         }
4048 }
4049 
4050 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4051 {
4052         int err;
4053 
4054         bt_dev_dbg(hdev, "skb %p", skb);
4055 
4056         kfree_skb(hdev->sent_cmd);
4057 
4058         hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4059         if (!hdev->sent_cmd) {
4060                 skb_queue_head(&hdev->cmd_q, skb);
4061                 queue_work(hdev->workqueue, &hdev->cmd_work);
4062                 return;
4063         }
4064 
4065         err = hci_send_frame(hdev, skb);
4066         if (err < 0) {
4067                 hci_cmd_sync_cancel_sync(hdev, -err);
4068                 return;
4069         }
4070 
4071         if (hdev->req_status == HCI_REQ_PEND &&
4072             !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4073                 kfree_skb(hdev->req_skb);
4074                 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4075         }
4076 
4077         atomic_dec(&hdev->cmd_cnt);
4078 }
4079 
4080 static void hci_cmd_work(struct work_struct *work)
4081 {
4082         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4083         struct sk_buff *skb;
4084 
4085         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4086                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4087 
4088         /* Send queued commands */
4089         if (atomic_read(&hdev->cmd_cnt)) {
4090                 skb = skb_dequeue(&hdev->cmd_q);
4091                 if (!skb)
4092                         return;
4093 
4094                 hci_send_cmd_sync(hdev, skb);
4095 
4096                 rcu_read_lock();
4097                 if (test_bit(HCI_RESET, &hdev->flags) ||
4098                     hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4099                         cancel_delayed_work(&hdev->cmd_timer);
4100                 else
4101                         queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4102                                            HCI_CMD_TIMEOUT);
4103                 rcu_read_unlock();
4104         }
4105 }
4106 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php