Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[linux-drm-fsl-dcu.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <net/sock.h>
45
46 #include <asm/system.h>
47 #include <asm/uaccess.h>
48 #include <asm/unaligned.h>
49
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52
53 static void hci_cmd_task(unsigned long arg);
54 static void hci_rx_task(unsigned long arg);
55 static void hci_tx_task(unsigned long arg);
56 static void hci_notify(struct hci_dev *hdev, int event);
57
58 static DEFINE_RWLOCK(hci_task_lock);
59
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
63
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
67
68 /* HCI protocols */
69 #define HCI_MAX_PROTO   2
70 struct hci_proto *hci_proto[HCI_MAX_PROTO];
71
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75 /* ---- HCI notifications ---- */
76
77 int hci_register_notifier(struct notifier_block *nb)
78 {
79         return atomic_notifier_chain_register(&hci_notifier, nb);
80 }
81
82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84         return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 }
86
87 static void hci_notify(struct hci_dev *hdev, int event)
88 {
89         atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 }
91
92 /* ---- HCI requests ---- */
93
94 void hci_req_complete(struct hci_dev *hdev, int result)
95 {
96         BT_DBG("%s result 0x%2.2x", hdev->name, result);
97
98         if (hdev->req_status == HCI_REQ_PEND) {
99                 hdev->req_result = result;
100                 hdev->req_status = HCI_REQ_DONE;
101                 wake_up_interruptible(&hdev->req_wait_q);
102         }
103 }
104
105 static void hci_req_cancel(struct hci_dev *hdev, int err)
106 {
107         BT_DBG("%s err 0x%2.2x", hdev->name, err);
108
109         if (hdev->req_status == HCI_REQ_PEND) {
110                 hdev->req_result = err;
111                 hdev->req_status = HCI_REQ_CANCELED;
112                 wake_up_interruptible(&hdev->req_wait_q);
113         }
114 }
115
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
118                                 unsigned long opt, __u32 timeout)
119 {
120         DECLARE_WAITQUEUE(wait, current);
121         int err = 0;
122
123         BT_DBG("%s start", hdev->name);
124
125         hdev->req_status = HCI_REQ_PEND;
126
127         add_wait_queue(&hdev->req_wait_q, &wait);
128         set_current_state(TASK_INTERRUPTIBLE);
129
130         req(hdev, opt);
131         schedule_timeout(timeout);
132
133         remove_wait_queue(&hdev->req_wait_q, &wait);
134
135         if (signal_pending(current))
136                 return -EINTR;
137
138         switch (hdev->req_status) {
139         case HCI_REQ_DONE:
140                 err = -bt_err(hdev->req_result);
141                 break;
142
143         case HCI_REQ_CANCELED:
144                 err = -hdev->req_result;
145                 break;
146
147         default:
148                 err = -ETIMEDOUT;
149                 break;
150         }
151
152         hdev->req_status = hdev->req_result = 0;
153
154         BT_DBG("%s end: err %d", hdev->name, err);
155
156         return err;
157 }
158
159 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160                                 unsigned long opt, __u32 timeout)
161 {
162         int ret;
163
164         if (!test_bit(HCI_UP, &hdev->flags))
165                 return -ENETDOWN;
166
167         /* Serialize all requests */
168         hci_req_lock(hdev);
169         ret = __hci_request(hdev, req, opt, timeout);
170         hci_req_unlock(hdev);
171
172         return ret;
173 }
174
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 {
177         BT_DBG("%s %ld", hdev->name, opt);
178
179         /* Reset device */
180         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
181 }
182
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184 {
185         struct sk_buff *skb;
186         __le16 param;
187         __u8 flt_type;
188
189         BT_DBG("%s %ld", hdev->name, opt);
190
191         /* Driver initialization */
192
193         /* Special commands */
194         while ((skb = skb_dequeue(&hdev->driver_init))) {
195                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
196                 skb->dev = (void *) hdev;
197
198                 skb_queue_tail(&hdev->cmd_q, skb);
199                 tasklet_schedule(&hdev->cmd_task);
200         }
201         skb_queue_purge(&hdev->driver_init);
202
203         /* Mandatory initialization */
204
205         /* Reset */
206         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
207                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208
209         /* Read Local Supported Features */
210         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
211
212         /* Read Local Version */
213         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214
215         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
217
218 #if 0
219         /* Host buffer size */
220         {
221                 struct hci_cp_host_buffer_size cp;
222                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
223                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
224                 cp.acl_max_pkt = cpu_to_le16(0xffff);
225                 cp.sco_max_pkt = cpu_to_le16(0xffff);
226                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
227         }
228 #endif
229
230         /* Read BD Address */
231         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
232
233         /* Read Class of Device */
234         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
235
236         /* Read Local Name */
237         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
238
239         /* Read Voice Setting */
240         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
241
242         /* Optional initialization */
243
244         /* Clear Event Filters */
245         flt_type = HCI_FLT_CLEAR_ALL;
246         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
247
248         /* Page timeout ~20 secs */
249         param = cpu_to_le16(0x8000);
250         hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
251
252         /* Connection accept timeout ~20 secs */
253         param = cpu_to_le16(0x7d00);
254         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
255 }
256
257 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
258 {
259         __u8 scan = opt;
260
261         BT_DBG("%s %x", hdev->name, scan);
262
263         /* Inquiry and Page scans */
264         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
265 }
266
267 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
268 {
269         __u8 auth = opt;
270
271         BT_DBG("%s %x", hdev->name, auth);
272
273         /* Authentication */
274         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
275 }
276
277 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
278 {
279         __u8 encrypt = opt;
280
281         BT_DBG("%s %x", hdev->name, encrypt);
282
283         /* Encryption */
284         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
285 }
286
287 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
288 {
289         __le16 policy = cpu_to_le16(opt);
290
291         BT_DBG("%s %x", hdev->name, policy);
292
293         /* Default link policy */
294         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
295 }
296
297 /* Get HCI device by index.
298  * Device is held on return. */
299 struct hci_dev *hci_dev_get(int index)
300 {
301         struct hci_dev *hdev = NULL;
302         struct list_head *p;
303
304         BT_DBG("%d", index);
305
306         if (index < 0)
307                 return NULL;
308
309         read_lock(&hci_dev_list_lock);
310         list_for_each(p, &hci_dev_list) {
311                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312                 if (d->id == index) {
313                         hdev = hci_dev_hold(d);
314                         break;
315                 }
316         }
317         read_unlock(&hci_dev_list_lock);
318         return hdev;
319 }
320
321 /* ---- Inquiry support ---- */
322 static void inquiry_cache_flush(struct hci_dev *hdev)
323 {
324         struct inquiry_cache *cache = &hdev->inq_cache;
325         struct inquiry_entry *next  = cache->list, *e;
326
327         BT_DBG("cache %p", cache);
328
329         cache->list = NULL;
330         while ((e = next)) {
331                 next = e->next;
332                 kfree(e);
333         }
334 }
335
336 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
337 {
338         struct inquiry_cache *cache = &hdev->inq_cache;
339         struct inquiry_entry *e;
340
341         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
342
343         for (e = cache->list; e; e = e->next)
344                 if (!bacmp(&e->data.bdaddr, bdaddr))
345                         break;
346         return e;
347 }
348
349 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
350 {
351         struct inquiry_cache *cache = &hdev->inq_cache;
352         struct inquiry_entry *e;
353
354         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
355
356         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
357                 /* Entry not in the cache. Add new one. */
358                 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
359                         return;
360                 e->next     = cache->list;
361                 cache->list = e;
362         }
363
364         memcpy(&e->data, data, sizeof(*data));
365         e->timestamp = jiffies;
366         cache->timestamp = jiffies;
367 }
368
369 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
370 {
371         struct inquiry_cache *cache = &hdev->inq_cache;
372         struct inquiry_info *info = (struct inquiry_info *) buf;
373         struct inquiry_entry *e;
374         int copied = 0;
375
376         for (e = cache->list; e && copied < num; e = e->next, copied++) {
377                 struct inquiry_data *data = &e->data;
378                 bacpy(&info->bdaddr, &data->bdaddr);
379                 info->pscan_rep_mode    = data->pscan_rep_mode;
380                 info->pscan_period_mode = data->pscan_period_mode;
381                 info->pscan_mode        = data->pscan_mode;
382                 memcpy(info->dev_class, data->dev_class, 3);
383                 info->clock_offset      = data->clock_offset;
384                 info++;
385         }
386
387         BT_DBG("cache %p, copied %d", cache, copied);
388         return copied;
389 }
390
391 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
392 {
393         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
394         struct hci_cp_inquiry cp;
395
396         BT_DBG("%s", hdev->name);
397
398         if (test_bit(HCI_INQUIRY, &hdev->flags))
399                 return;
400
401         /* Start Inquiry */
402         memcpy(&cp.lap, &ir->lap, 3);
403         cp.length  = ir->length;
404         cp.num_rsp = ir->num_rsp;
405         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
406 }
407
408 int hci_inquiry(void __user *arg)
409 {
410         __u8 __user *ptr = arg;
411         struct hci_inquiry_req ir;
412         struct hci_dev *hdev;
413         int err = 0, do_inquiry = 0, max_rsp;
414         long timeo;
415         __u8 *buf;
416
417         if (copy_from_user(&ir, ptr, sizeof(ir)))
418                 return -EFAULT;
419
420         if (!(hdev = hci_dev_get(ir.dev_id)))
421                 return -ENODEV;
422
423         hci_dev_lock_bh(hdev);
424         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
425                                         inquiry_cache_empty(hdev) ||
426                                         ir.flags & IREQ_CACHE_FLUSH) {
427                 inquiry_cache_flush(hdev);
428                 do_inquiry = 1;
429         }
430         hci_dev_unlock_bh(hdev);
431
432         timeo = ir.length * msecs_to_jiffies(2000);
433         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
434                 goto done;
435
436         /* for unlimited number of responses we will use buffer with 255 entries */
437         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
438
439         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
440          * copy it to the user space.
441          */
442         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
443                 err = -ENOMEM;
444                 goto done;
445         }
446
447         hci_dev_lock_bh(hdev);
448         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
449         hci_dev_unlock_bh(hdev);
450
451         BT_DBG("num_rsp %d", ir.num_rsp);
452
453         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
454                 ptr += sizeof(ir);
455                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
456                                         ir.num_rsp))
457                         err = -EFAULT;
458         } else
459                 err = -EFAULT;
460
461         kfree(buf);
462
463 done:
464         hci_dev_put(hdev);
465         return err;
466 }
467
468 /* ---- HCI ioctl helpers ---- */
469
470 int hci_dev_open(__u16 dev)
471 {
472         struct hci_dev *hdev;
473         int ret = 0;
474
475         if (!(hdev = hci_dev_get(dev)))
476                 return -ENODEV;
477
478         BT_DBG("%s %p", hdev->name, hdev);
479
480         hci_req_lock(hdev);
481
482         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
483                 ret = -ERFKILL;
484                 goto done;
485         }
486
487         if (test_bit(HCI_UP, &hdev->flags)) {
488                 ret = -EALREADY;
489                 goto done;
490         }
491
492         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
493                 set_bit(HCI_RAW, &hdev->flags);
494
495         /* Treat all non BR/EDR controllers as raw devices for now */
496         if (hdev->dev_type != HCI_BREDR)
497                 set_bit(HCI_RAW, &hdev->flags);
498
499         if (hdev->open(hdev)) {
500                 ret = -EIO;
501                 goto done;
502         }
503
504         if (!test_bit(HCI_RAW, &hdev->flags)) {
505                 atomic_set(&hdev->cmd_cnt, 1);
506                 set_bit(HCI_INIT, &hdev->flags);
507
508                 //__hci_request(hdev, hci_reset_req, 0, HZ);
509                 ret = __hci_request(hdev, hci_init_req, 0,
510                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
511
512                 clear_bit(HCI_INIT, &hdev->flags);
513         }
514
515         if (!ret) {
516                 hci_dev_hold(hdev);
517                 set_bit(HCI_UP, &hdev->flags);
518                 hci_notify(hdev, HCI_DEV_UP);
519         } else {
520                 /* Init failed, cleanup */
521                 tasklet_kill(&hdev->rx_task);
522                 tasklet_kill(&hdev->tx_task);
523                 tasklet_kill(&hdev->cmd_task);
524
525                 skb_queue_purge(&hdev->cmd_q);
526                 skb_queue_purge(&hdev->rx_q);
527
528                 if (hdev->flush)
529                         hdev->flush(hdev);
530
531                 if (hdev->sent_cmd) {
532                         kfree_skb(hdev->sent_cmd);
533                         hdev->sent_cmd = NULL;
534                 }
535
536                 hdev->close(hdev);
537                 hdev->flags = 0;
538         }
539
540 done:
541         hci_req_unlock(hdev);
542         hci_dev_put(hdev);
543         return ret;
544 }
545
546 static int hci_dev_do_close(struct hci_dev *hdev)
547 {
548         BT_DBG("%s %p", hdev->name, hdev);
549
550         hci_req_cancel(hdev, ENODEV);
551         hci_req_lock(hdev);
552
553         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
554                 hci_req_unlock(hdev);
555                 return 0;
556         }
557
558         /* Kill RX and TX tasks */
559         tasklet_kill(&hdev->rx_task);
560         tasklet_kill(&hdev->tx_task);
561
562         hci_dev_lock_bh(hdev);
563         inquiry_cache_flush(hdev);
564         hci_conn_hash_flush(hdev);
565         hci_dev_unlock_bh(hdev);
566
567         hci_notify(hdev, HCI_DEV_DOWN);
568
569         if (hdev->flush)
570                 hdev->flush(hdev);
571
572         /* Reset device */
573         skb_queue_purge(&hdev->cmd_q);
574         atomic_set(&hdev->cmd_cnt, 1);
575         if (!test_bit(HCI_RAW, &hdev->flags)) {
576                 set_bit(HCI_INIT, &hdev->flags);
577                 __hci_request(hdev, hci_reset_req, 0,
578                                         msecs_to_jiffies(250));
579                 clear_bit(HCI_INIT, &hdev->flags);
580         }
581
582         /* Kill cmd task */
583         tasklet_kill(&hdev->cmd_task);
584
585         /* Drop queues */
586         skb_queue_purge(&hdev->rx_q);
587         skb_queue_purge(&hdev->cmd_q);
588         skb_queue_purge(&hdev->raw_q);
589
590         /* Drop last sent command */
591         if (hdev->sent_cmd) {
592                 kfree_skb(hdev->sent_cmd);
593                 hdev->sent_cmd = NULL;
594         }
595
596         /* After this point our queues are empty
597          * and no tasks are scheduled. */
598         hdev->close(hdev);
599
600         /* Clear flags */
601         hdev->flags = 0;
602
603         hci_req_unlock(hdev);
604
605         hci_dev_put(hdev);
606         return 0;
607 }
608
609 int hci_dev_close(__u16 dev)
610 {
611         struct hci_dev *hdev;
612         int err;
613
614         if (!(hdev = hci_dev_get(dev)))
615                 return -ENODEV;
616         err = hci_dev_do_close(hdev);
617         hci_dev_put(hdev);
618         return err;
619 }
620
621 int hci_dev_reset(__u16 dev)
622 {
623         struct hci_dev *hdev;
624         int ret = 0;
625
626         if (!(hdev = hci_dev_get(dev)))
627                 return -ENODEV;
628
629         hci_req_lock(hdev);
630         tasklet_disable(&hdev->tx_task);
631
632         if (!test_bit(HCI_UP, &hdev->flags))
633                 goto done;
634
635         /* Drop queues */
636         skb_queue_purge(&hdev->rx_q);
637         skb_queue_purge(&hdev->cmd_q);
638
639         hci_dev_lock_bh(hdev);
640         inquiry_cache_flush(hdev);
641         hci_conn_hash_flush(hdev);
642         hci_dev_unlock_bh(hdev);
643
644         if (hdev->flush)
645                 hdev->flush(hdev);
646
647         atomic_set(&hdev->cmd_cnt, 1);
648         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
649
650         if (!test_bit(HCI_RAW, &hdev->flags))
651                 ret = __hci_request(hdev, hci_reset_req, 0,
652                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
653
654 done:
655         tasklet_enable(&hdev->tx_task);
656         hci_req_unlock(hdev);
657         hci_dev_put(hdev);
658         return ret;
659 }
660
661 int hci_dev_reset_stat(__u16 dev)
662 {
663         struct hci_dev *hdev;
664         int ret = 0;
665
666         if (!(hdev = hci_dev_get(dev)))
667                 return -ENODEV;
668
669         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
670
671         hci_dev_put(hdev);
672
673         return ret;
674 }
675
676 int hci_dev_cmd(unsigned int cmd, void __user *arg)
677 {
678         struct hci_dev *hdev;
679         struct hci_dev_req dr;
680         int err = 0;
681
682         if (copy_from_user(&dr, arg, sizeof(dr)))
683                 return -EFAULT;
684
685         if (!(hdev = hci_dev_get(dr.dev_id)))
686                 return -ENODEV;
687
688         switch (cmd) {
689         case HCISETAUTH:
690                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
691                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
692                 break;
693
694         case HCISETENCRYPT:
695                 if (!lmp_encrypt_capable(hdev)) {
696                         err = -EOPNOTSUPP;
697                         break;
698                 }
699
700                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
701                         /* Auth must be enabled first */
702                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
703                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
704                         if (err)
705                                 break;
706                 }
707
708                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
709                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
710                 break;
711
712         case HCISETSCAN:
713                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
714                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
715                 break;
716
717         case HCISETLINKPOL:
718                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
719                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
720                 break;
721
722         case HCISETLINKMODE:
723                 hdev->link_mode = ((__u16) dr.dev_opt) &
724                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
725                 break;
726
727         case HCISETPTYPE:
728                 hdev->pkt_type = (__u16) dr.dev_opt;
729                 break;
730
731         case HCISETACLMTU:
732                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
733                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
734                 break;
735
736         case HCISETSCOMTU:
737                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
738                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
739                 break;
740
741         default:
742                 err = -EINVAL;
743                 break;
744         }
745
746         hci_dev_put(hdev);
747         return err;
748 }
749
750 int hci_get_dev_list(void __user *arg)
751 {
752         struct hci_dev_list_req *dl;
753         struct hci_dev_req *dr;
754         struct list_head *p;
755         int n = 0, size, err;
756         __u16 dev_num;
757
758         if (get_user(dev_num, (__u16 __user *) arg))
759                 return -EFAULT;
760
761         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
762                 return -EINVAL;
763
764         size = sizeof(*dl) + dev_num * sizeof(*dr);
765
766         if (!(dl = kzalloc(size, GFP_KERNEL)))
767                 return -ENOMEM;
768
769         dr = dl->dev_req;
770
771         read_lock_bh(&hci_dev_list_lock);
772         list_for_each(p, &hci_dev_list) {
773                 struct hci_dev *hdev;
774                 hdev = list_entry(p, struct hci_dev, list);
775                 (dr + n)->dev_id  = hdev->id;
776                 (dr + n)->dev_opt = hdev->flags;
777                 if (++n >= dev_num)
778                         break;
779         }
780         read_unlock_bh(&hci_dev_list_lock);
781
782         dl->dev_num = n;
783         size = sizeof(*dl) + n * sizeof(*dr);
784
785         err = copy_to_user(arg, dl, size);
786         kfree(dl);
787
788         return err ? -EFAULT : 0;
789 }
790
791 int hci_get_dev_info(void __user *arg)
792 {
793         struct hci_dev *hdev;
794         struct hci_dev_info di;
795         int err = 0;
796
797         if (copy_from_user(&di, arg, sizeof(di)))
798                 return -EFAULT;
799
800         if (!(hdev = hci_dev_get(di.dev_id)))
801                 return -ENODEV;
802
803         strcpy(di.name, hdev->name);
804         di.bdaddr   = hdev->bdaddr;
805         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
806         di.flags    = hdev->flags;
807         di.pkt_type = hdev->pkt_type;
808         di.acl_mtu  = hdev->acl_mtu;
809         di.acl_pkts = hdev->acl_pkts;
810         di.sco_mtu  = hdev->sco_mtu;
811         di.sco_pkts = hdev->sco_pkts;
812         di.link_policy = hdev->link_policy;
813         di.link_mode   = hdev->link_mode;
814
815         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
816         memcpy(&di.features, &hdev->features, sizeof(di.features));
817
818         if (copy_to_user(arg, &di, sizeof(di)))
819                 err = -EFAULT;
820
821         hci_dev_put(hdev);
822
823         return err;
824 }
825
826 /* ---- Interface to HCI drivers ---- */
827
828 static int hci_rfkill_set_block(void *data, bool blocked)
829 {
830         struct hci_dev *hdev = data;
831
832         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
833
834         if (!blocked)
835                 return 0;
836
837         hci_dev_do_close(hdev);
838
839         return 0;
840 }
841
842 static const struct rfkill_ops hci_rfkill_ops = {
843         .set_block = hci_rfkill_set_block,
844 };
845
846 /* Alloc HCI device */
847 struct hci_dev *hci_alloc_dev(void)
848 {
849         struct hci_dev *hdev;
850
851         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
852         if (!hdev)
853                 return NULL;
854
855         skb_queue_head_init(&hdev->driver_init);
856
857         return hdev;
858 }
859 EXPORT_SYMBOL(hci_alloc_dev);
860
861 /* Free HCI device */
862 void hci_free_dev(struct hci_dev *hdev)
863 {
864         skb_queue_purge(&hdev->driver_init);
865
866         /* will free via device release */
867         put_device(&hdev->dev);
868 }
869 EXPORT_SYMBOL(hci_free_dev);
870
871 /* Register HCI device */
872 int hci_register_dev(struct hci_dev *hdev)
873 {
874         struct list_head *head = &hci_dev_list, *p;
875         int i, id = 0;
876
877         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
878                                                 hdev->bus, hdev->owner);
879
880         if (!hdev->open || !hdev->close || !hdev->destruct)
881                 return -EINVAL;
882
883         write_lock_bh(&hci_dev_list_lock);
884
885         /* Find first available device id */
886         list_for_each(p, &hci_dev_list) {
887                 if (list_entry(p, struct hci_dev, list)->id != id)
888                         break;
889                 head = p; id++;
890         }
891
892         sprintf(hdev->name, "hci%d", id);
893         hdev->id = id;
894         list_add(&hdev->list, head);
895
896         atomic_set(&hdev->refcnt, 1);
897         spin_lock_init(&hdev->lock);
898
899         hdev->flags = 0;
900         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
901         hdev->esco_type = (ESCO_HV1);
902         hdev->link_mode = (HCI_LM_ACCEPT);
903
904         hdev->idle_timeout = 0;
905         hdev->sniff_max_interval = 800;
906         hdev->sniff_min_interval = 80;
907
908         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
909         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
910         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
911
912         skb_queue_head_init(&hdev->rx_q);
913         skb_queue_head_init(&hdev->cmd_q);
914         skb_queue_head_init(&hdev->raw_q);
915
916         for (i = 0; i < 3; i++)
917                 hdev->reassembly[i] = NULL;
918
919         init_waitqueue_head(&hdev->req_wait_q);
920         mutex_init(&hdev->req_lock);
921
922         inquiry_cache_init(hdev);
923
924         hci_conn_hash_init(hdev);
925
926         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
927
928         atomic_set(&hdev->promisc, 0);
929
930         write_unlock_bh(&hci_dev_list_lock);
931
932         hdev->workqueue = create_singlethread_workqueue(hdev->name);
933         if (!hdev->workqueue)
934                 goto nomem;
935
936         hci_register_sysfs(hdev);
937
938         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
939                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
940         if (hdev->rfkill) {
941                 if (rfkill_register(hdev->rfkill) < 0) {
942                         rfkill_destroy(hdev->rfkill);
943                         hdev->rfkill = NULL;
944                 }
945         }
946
947         hci_notify(hdev, HCI_DEV_REG);
948
949         return id;
950
951 nomem:
952         write_lock_bh(&hci_dev_list_lock);
953         list_del(&hdev->list);
954         write_unlock_bh(&hci_dev_list_lock);
955
956         return -ENOMEM;
957 }
958 EXPORT_SYMBOL(hci_register_dev);
959
960 /* Unregister HCI device */
961 int hci_unregister_dev(struct hci_dev *hdev)
962 {
963         int i;
964
965         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
966
967         write_lock_bh(&hci_dev_list_lock);
968         list_del(&hdev->list);
969         write_unlock_bh(&hci_dev_list_lock);
970
971         hci_dev_do_close(hdev);
972
973         for (i = 0; i < 3; i++)
974                 kfree_skb(hdev->reassembly[i]);
975
976         hci_notify(hdev, HCI_DEV_UNREG);
977
978         if (hdev->rfkill) {
979                 rfkill_unregister(hdev->rfkill);
980                 rfkill_destroy(hdev->rfkill);
981         }
982
983         hci_unregister_sysfs(hdev);
984
985         destroy_workqueue(hdev->workqueue);
986
987         __hci_dev_put(hdev);
988
989         return 0;
990 }
991 EXPORT_SYMBOL(hci_unregister_dev);
992
993 /* Suspend HCI device */
994 int hci_suspend_dev(struct hci_dev *hdev)
995 {
996         hci_notify(hdev, HCI_DEV_SUSPEND);
997         return 0;
998 }
999 EXPORT_SYMBOL(hci_suspend_dev);
1000
1001 /* Resume HCI device */
1002 int hci_resume_dev(struct hci_dev *hdev)
1003 {
1004         hci_notify(hdev, HCI_DEV_RESUME);
1005         return 0;
1006 }
1007 EXPORT_SYMBOL(hci_resume_dev);
1008
1009 /* Receive frame from HCI drivers */
1010 int hci_recv_frame(struct sk_buff *skb)
1011 {
1012         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1013         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1014                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1015                 kfree_skb(skb);
1016                 return -ENXIO;
1017         }
1018
1019         /* Incomming skb */
1020         bt_cb(skb)->incoming = 1;
1021
1022         /* Time stamp */
1023         __net_timestamp(skb);
1024
1025         /* Queue frame for rx task */
1026         skb_queue_tail(&hdev->rx_q, skb);
1027         tasklet_schedule(&hdev->rx_task);
1028
1029         return 0;
1030 }
1031 EXPORT_SYMBOL(hci_recv_frame);
1032
1033 /* Receive packet type fragment */
1034 #define __reassembly(hdev, type)  ((hdev)->reassembly[(type) - 2])
1035
1036 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1037 {
1038         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1039                 return -EILSEQ;
1040
1041         while (count) {
1042                 struct sk_buff *skb = __reassembly(hdev, type);
1043                 struct { int expect; } *scb;
1044                 int len = 0;
1045
1046                 if (!skb) {
1047                         /* Start of the frame */
1048
1049                         switch (type) {
1050                         case HCI_EVENT_PKT:
1051                                 if (count >= HCI_EVENT_HDR_SIZE) {
1052                                         struct hci_event_hdr *h = data;
1053                                         len = HCI_EVENT_HDR_SIZE + h->plen;
1054                                 } else
1055                                         return -EILSEQ;
1056                                 break;
1057
1058                         case HCI_ACLDATA_PKT:
1059                                 if (count >= HCI_ACL_HDR_SIZE) {
1060                                         struct hci_acl_hdr *h = data;
1061                                         len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1062                                 } else
1063                                         return -EILSEQ;
1064                                 break;
1065
1066                         case HCI_SCODATA_PKT:
1067                                 if (count >= HCI_SCO_HDR_SIZE) {
1068                                         struct hci_sco_hdr *h = data;
1069                                         len = HCI_SCO_HDR_SIZE + h->dlen;
1070                                 } else
1071                                         return -EILSEQ;
1072                                 break;
1073                         }
1074
1075                         skb = bt_skb_alloc(len, GFP_ATOMIC);
1076                         if (!skb) {
1077                                 BT_ERR("%s no memory for packet", hdev->name);
1078                                 return -ENOMEM;
1079                         }
1080
1081                         skb->dev = (void *) hdev;
1082                         bt_cb(skb)->pkt_type = type;
1083
1084                         __reassembly(hdev, type) = skb;
1085
1086                         scb = (void *) skb->cb;
1087                         scb->expect = len;
1088                 } else {
1089                         /* Continuation */
1090
1091                         scb = (void *) skb->cb;
1092                         len = scb->expect;
1093                 }
1094
1095                 len = min(len, count);
1096
1097                 memcpy(skb_put(skb, len), data, len);
1098
1099                 scb->expect -= len;
1100
1101                 if (scb->expect == 0) {
1102                         /* Complete frame */
1103
1104                         __reassembly(hdev, type) = NULL;
1105
1106                         bt_cb(skb)->pkt_type = type;
1107                         hci_recv_frame(skb);
1108                 }
1109
1110                 count -= len; data += len;
1111         }
1112
1113         return 0;
1114 }
1115 EXPORT_SYMBOL(hci_recv_fragment);
1116
1117 /* ---- Interface to upper protocols ---- */
1118
1119 /* Register/Unregister protocols.
1120  * hci_task_lock is used to ensure that no tasks are running. */
1121 int hci_register_proto(struct hci_proto *hp)
1122 {
1123         int err = 0;
1124
1125         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1126
1127         if (hp->id >= HCI_MAX_PROTO)
1128                 return -EINVAL;
1129
1130         write_lock_bh(&hci_task_lock);
1131
1132         if (!hci_proto[hp->id])
1133                 hci_proto[hp->id] = hp;
1134         else
1135                 err = -EEXIST;
1136
1137         write_unlock_bh(&hci_task_lock);
1138
1139         return err;
1140 }
1141 EXPORT_SYMBOL(hci_register_proto);
1142
1143 int hci_unregister_proto(struct hci_proto *hp)
1144 {
1145         int err = 0;
1146
1147         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1148
1149         if (hp->id >= HCI_MAX_PROTO)
1150                 return -EINVAL;
1151
1152         write_lock_bh(&hci_task_lock);
1153
1154         if (hci_proto[hp->id])
1155                 hci_proto[hp->id] = NULL;
1156         else
1157                 err = -ENOENT;
1158
1159         write_unlock_bh(&hci_task_lock);
1160
1161         return err;
1162 }
1163 EXPORT_SYMBOL(hci_unregister_proto);
1164
1165 int hci_register_cb(struct hci_cb *cb)
1166 {
1167         BT_DBG("%p name %s", cb, cb->name);
1168
1169         write_lock_bh(&hci_cb_list_lock);
1170         list_add(&cb->list, &hci_cb_list);
1171         write_unlock_bh(&hci_cb_list_lock);
1172
1173         return 0;
1174 }
1175 EXPORT_SYMBOL(hci_register_cb);
1176
1177 int hci_unregister_cb(struct hci_cb *cb)
1178 {
1179         BT_DBG("%p name %s", cb, cb->name);
1180
1181         write_lock_bh(&hci_cb_list_lock);
1182         list_del(&cb->list);
1183         write_unlock_bh(&hci_cb_list_lock);
1184
1185         return 0;
1186 }
1187 EXPORT_SYMBOL(hci_unregister_cb);
1188
1189 static int hci_send_frame(struct sk_buff *skb)
1190 {
1191         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1192
1193         if (!hdev) {
1194                 kfree_skb(skb);
1195                 return -ENODEV;
1196         }
1197
1198         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1199
1200         if (atomic_read(&hdev->promisc)) {
1201                 /* Time stamp */
1202                 __net_timestamp(skb);
1203
1204                 hci_send_to_sock(hdev, skb);
1205         }
1206
1207         /* Get rid of skb owner, prior to sending to the driver. */
1208         skb_orphan(skb);
1209
1210         return hdev->send(skb);
1211 }
1212
1213 /* Send HCI command */
1214 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1215 {
1216         int len = HCI_COMMAND_HDR_SIZE + plen;
1217         struct hci_command_hdr *hdr;
1218         struct sk_buff *skb;
1219
1220         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1221
1222         skb = bt_skb_alloc(len, GFP_ATOMIC);
1223         if (!skb) {
1224                 BT_ERR("%s no memory for command", hdev->name);
1225                 return -ENOMEM;
1226         }
1227
1228         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1229         hdr->opcode = cpu_to_le16(opcode);
1230         hdr->plen   = plen;
1231
1232         if (plen)
1233                 memcpy(skb_put(skb, plen), param, plen);
1234
1235         BT_DBG("skb len %d", skb->len);
1236
1237         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1238         skb->dev = (void *) hdev;
1239
1240         skb_queue_tail(&hdev->cmd_q, skb);
1241         tasklet_schedule(&hdev->cmd_task);
1242
1243         return 0;
1244 }
1245
1246 /* Get data from the previously sent command */
1247 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1248 {
1249         struct hci_command_hdr *hdr;
1250
1251         if (!hdev->sent_cmd)
1252                 return NULL;
1253
1254         hdr = (void *) hdev->sent_cmd->data;
1255
1256         if (hdr->opcode != cpu_to_le16(opcode))
1257                 return NULL;
1258
1259         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1260
1261         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1262 }
1263
1264 /* Send ACL data */
1265 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1266 {
1267         struct hci_acl_hdr *hdr;
1268         int len = skb->len;
1269
1270         skb_push(skb, HCI_ACL_HDR_SIZE);
1271         skb_reset_transport_header(skb);
1272         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1273         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1274         hdr->dlen   = cpu_to_le16(len);
1275 }
1276
1277 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1278 {
1279         struct hci_dev *hdev = conn->hdev;
1280         struct sk_buff *list;
1281
1282         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1283
1284         skb->dev = (void *) hdev;
1285         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1286         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1287
1288         if (!(list = skb_shinfo(skb)->frag_list)) {
1289                 /* Non fragmented */
1290                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1291
1292                 skb_queue_tail(&conn->data_q, skb);
1293         } else {
1294                 /* Fragmented */
1295                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1296
1297                 skb_shinfo(skb)->frag_list = NULL;
1298
1299                 /* Queue all fragments atomically */
1300                 spin_lock_bh(&conn->data_q.lock);
1301
1302                 __skb_queue_tail(&conn->data_q, skb);
1303                 do {
1304                         skb = list; list = list->next;
1305
1306                         skb->dev = (void *) hdev;
1307                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1308                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1309
1310                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1311
1312                         __skb_queue_tail(&conn->data_q, skb);
1313                 } while (list);
1314
1315                 spin_unlock_bh(&conn->data_q.lock);
1316         }
1317
1318         tasklet_schedule(&hdev->tx_task);
1319 }
1320 EXPORT_SYMBOL(hci_send_acl);
1321
1322 /* Send SCO data */
1323 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1324 {
1325         struct hci_dev *hdev = conn->hdev;
1326         struct hci_sco_hdr hdr;
1327
1328         BT_DBG("%s len %d", hdev->name, skb->len);
1329
1330         hdr.handle = cpu_to_le16(conn->handle);
1331         hdr.dlen   = skb->len;
1332
1333         skb_push(skb, HCI_SCO_HDR_SIZE);
1334         skb_reset_transport_header(skb);
1335         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1336
1337         skb->dev = (void *) hdev;
1338         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1339
1340         skb_queue_tail(&conn->data_q, skb);
1341         tasklet_schedule(&hdev->tx_task);
1342 }
1343 EXPORT_SYMBOL(hci_send_sco);
1344
1345 /* ---- HCI TX task (outgoing data) ---- */
1346
1347 /* HCI Connection scheduler */
1348 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1349 {
1350         struct hci_conn_hash *h = &hdev->conn_hash;
1351         struct hci_conn *conn = NULL;
1352         int num = 0, min = ~0;
1353         struct list_head *p;
1354
1355         /* We don't have to lock device here. Connections are always
1356          * added and removed with TX task disabled. */
1357         list_for_each(p, &h->list) {
1358                 struct hci_conn *c;
1359                 c = list_entry(p, struct hci_conn, list);
1360
1361                 if (c->type != type || skb_queue_empty(&c->data_q))
1362                         continue;
1363
1364                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1365                         continue;
1366
1367                 num++;
1368
1369                 if (c->sent < min) {
1370                         min  = c->sent;
1371                         conn = c;
1372                 }
1373         }
1374
1375         if (conn) {
1376                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1377                 int q = cnt / num;
1378                 *quote = q ? q : 1;
1379         } else
1380                 *quote = 0;
1381
1382         BT_DBG("conn %p quote %d", conn, *quote);
1383         return conn;
1384 }
1385
1386 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1387 {
1388         struct hci_conn_hash *h = &hdev->conn_hash;
1389         struct list_head *p;
1390         struct hci_conn  *c;
1391
1392         BT_ERR("%s ACL tx timeout", hdev->name);
1393
1394         /* Kill stalled connections */
1395         list_for_each(p, &h->list) {
1396                 c = list_entry(p, struct hci_conn, list);
1397                 if (c->type == ACL_LINK && c->sent) {
1398                         BT_ERR("%s killing stalled ACL connection %s",
1399                                 hdev->name, batostr(&c->dst));
1400                         hci_acl_disconn(c, 0x13);
1401                 }
1402         }
1403 }
1404
1405 static inline void hci_sched_acl(struct hci_dev *hdev)
1406 {
1407         struct hci_conn *conn;
1408         struct sk_buff *skb;
1409         int quote;
1410
1411         BT_DBG("%s", hdev->name);
1412
1413         if (!test_bit(HCI_RAW, &hdev->flags)) {
1414                 /* ACL tx timeout must be longer than maximum
1415                  * link supervision timeout (40.9 seconds) */
1416                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1417                         hci_acl_tx_to(hdev);
1418         }
1419
1420         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1421                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1422                         BT_DBG("skb %p len %d", skb, skb->len);
1423
1424                         hci_conn_enter_active_mode(conn);
1425
1426                         hci_send_frame(skb);
1427                         hdev->acl_last_tx = jiffies;
1428
1429                         hdev->acl_cnt--;
1430                         conn->sent++;
1431                 }
1432         }
1433 }
1434
1435 /* Schedule SCO */
1436 static inline void hci_sched_sco(struct hci_dev *hdev)
1437 {
1438         struct hci_conn *conn;
1439         struct sk_buff *skb;
1440         int quote;
1441
1442         BT_DBG("%s", hdev->name);
1443
1444         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1445                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1446                         BT_DBG("skb %p len %d", skb, skb->len);
1447                         hci_send_frame(skb);
1448
1449                         conn->sent++;
1450                         if (conn->sent == ~0)
1451                                 conn->sent = 0;
1452                 }
1453         }
1454 }
1455
1456 static inline void hci_sched_esco(struct hci_dev *hdev)
1457 {
1458         struct hci_conn *conn;
1459         struct sk_buff *skb;
1460         int quote;
1461
1462         BT_DBG("%s", hdev->name);
1463
1464         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1465                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1466                         BT_DBG("skb %p len %d", skb, skb->len);
1467                         hci_send_frame(skb);
1468
1469                         conn->sent++;
1470                         if (conn->sent == ~0)
1471                                 conn->sent = 0;
1472                 }
1473         }
1474 }
1475
1476 static void hci_tx_task(unsigned long arg)
1477 {
1478         struct hci_dev *hdev = (struct hci_dev *) arg;
1479         struct sk_buff *skb;
1480
1481         read_lock(&hci_task_lock);
1482
1483         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1484
1485         /* Schedule queues and send stuff to HCI driver */
1486
1487         hci_sched_acl(hdev);
1488
1489         hci_sched_sco(hdev);
1490
1491         hci_sched_esco(hdev);
1492
1493         /* Send next queued raw (unknown type) packet */
1494         while ((skb = skb_dequeue(&hdev->raw_q)))
1495                 hci_send_frame(skb);
1496
1497         read_unlock(&hci_task_lock);
1498 }
1499
1500 /* ----- HCI RX task (incoming data proccessing) ----- */
1501
1502 /* ACL data packet */
1503 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1504 {
1505         struct hci_acl_hdr *hdr = (void *) skb->data;
1506         struct hci_conn *conn;
1507         __u16 handle, flags;
1508
1509         skb_pull(skb, HCI_ACL_HDR_SIZE);
1510
1511         handle = __le16_to_cpu(hdr->handle);
1512         flags  = hci_flags(handle);
1513         handle = hci_handle(handle);
1514
1515         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1516
1517         hdev->stat.acl_rx++;
1518
1519         hci_dev_lock(hdev);
1520         conn = hci_conn_hash_lookup_handle(hdev, handle);
1521         hci_dev_unlock(hdev);
1522
1523         if (conn) {
1524                 register struct hci_proto *hp;
1525
1526                 hci_conn_enter_active_mode(conn);
1527
1528                 /* Send to upper protocol */
1529                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1530                         hp->recv_acldata(conn, skb, flags);
1531                         return;
1532                 }
1533         } else {
1534                 BT_ERR("%s ACL packet for unknown connection handle %d",
1535                         hdev->name, handle);
1536         }
1537
1538         kfree_skb(skb);
1539 }
1540
1541 /* SCO data packet */
1542 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1543 {
1544         struct hci_sco_hdr *hdr = (void *) skb->data;
1545         struct hci_conn *conn;
1546         __u16 handle;
1547
1548         skb_pull(skb, HCI_SCO_HDR_SIZE);
1549
1550         handle = __le16_to_cpu(hdr->handle);
1551
1552         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1553
1554         hdev->stat.sco_rx++;
1555
1556         hci_dev_lock(hdev);
1557         conn = hci_conn_hash_lookup_handle(hdev, handle);
1558         hci_dev_unlock(hdev);
1559
1560         if (conn) {
1561                 register struct hci_proto *hp;
1562
1563                 /* Send to upper protocol */
1564                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1565                         hp->recv_scodata(conn, skb);
1566                         return;
1567                 }
1568         } else {
1569                 BT_ERR("%s SCO packet for unknown connection handle %d",
1570                         hdev->name, handle);
1571         }
1572
1573         kfree_skb(skb);
1574 }
1575
1576 static void hci_rx_task(unsigned long arg)
1577 {
1578         struct hci_dev *hdev = (struct hci_dev *) arg;
1579         struct sk_buff *skb;
1580
1581         BT_DBG("%s", hdev->name);
1582
1583         read_lock(&hci_task_lock);
1584
1585         while ((skb = skb_dequeue(&hdev->rx_q))) {
1586                 if (atomic_read(&hdev->promisc)) {
1587                         /* Send copy to the sockets */
1588                         hci_send_to_sock(hdev, skb);
1589                 }
1590
1591                 if (test_bit(HCI_RAW, &hdev->flags)) {
1592                         kfree_skb(skb);
1593                         continue;
1594                 }
1595
1596                 if (test_bit(HCI_INIT, &hdev->flags)) {
1597                         /* Don't process data packets in this states. */
1598                         switch (bt_cb(skb)->pkt_type) {
1599                         case HCI_ACLDATA_PKT:
1600                         case HCI_SCODATA_PKT:
1601                                 kfree_skb(skb);
1602                                 continue;
1603                         }
1604                 }
1605
1606                 /* Process frame */
1607                 switch (bt_cb(skb)->pkt_type) {
1608                 case HCI_EVENT_PKT:
1609                         hci_event_packet(hdev, skb);
1610                         break;
1611
1612                 case HCI_ACLDATA_PKT:
1613                         BT_DBG("%s ACL data packet", hdev->name);
1614                         hci_acldata_packet(hdev, skb);
1615                         break;
1616
1617                 case HCI_SCODATA_PKT:
1618                         BT_DBG("%s SCO data packet", hdev->name);
1619                         hci_scodata_packet(hdev, skb);
1620                         break;
1621
1622                 default:
1623                         kfree_skb(skb);
1624                         break;
1625                 }
1626         }
1627
1628         read_unlock(&hci_task_lock);
1629 }
1630
1631 static void hci_cmd_task(unsigned long arg)
1632 {
1633         struct hci_dev *hdev = (struct hci_dev *) arg;
1634         struct sk_buff *skb;
1635
1636         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1637
1638         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1639                 BT_ERR("%s command tx timeout", hdev->name);
1640                 atomic_set(&hdev->cmd_cnt, 1);
1641         }
1642
1643         /* Send queued commands */
1644         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1645                 kfree_skb(hdev->sent_cmd);
1646
1647                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1648                         atomic_dec(&hdev->cmd_cnt);
1649                         hci_send_frame(skb);
1650                         hdev->cmd_last_tx = jiffies;
1651                 } else {
1652                         skb_queue_head(&hdev->cmd_q, skb);
1653                         tasklet_schedule(&hdev->cmd_task);
1654                 }
1655         }
1656 }