hwmon: (acpi_power_meter) Fix acpi_bus_get_device() return value check
[linux-drm-fsl-dcu.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
36 /* ----- HCI socket interface ----- */
37
38 static inline int hci_test_bit(int nr, void *addr)
39 {
40         return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41 }
42
43 /* Security filter */
44 static struct hci_sec_filter hci_sec_filter = {
45         /* Packet types */
46         0x10,
47         /* Events */
48         { 0x1000d9fe, 0x0000b00c },
49         /* Commands */
50         {
51                 { 0x0 },
52                 /* OGF_LINK_CTL */
53                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
54                 /* OGF_LINK_POLICY */
55                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
56                 /* OGF_HOST_CTL */
57                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
58                 /* OGF_INFO_PARAM */
59                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
60                 /* OGF_STATUS_PARAM */
61                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
62         }
63 };
64
65 static struct bt_sock_list hci_sk_list = {
66         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
67 };
68
69 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
70 {
71         struct hci_filter *flt;
72         int flt_type, flt_event;
73
74         /* Apply filter */
75         flt = &hci_pi(sk)->filter;
76
77         if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
78                 flt_type = 0;
79         else
80                 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
81
82         if (!test_bit(flt_type, &flt->type_mask))
83                 return true;
84
85         /* Extra filter for event packets only */
86         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
87                 return false;
88
89         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
90
91         if (!hci_test_bit(flt_event, &flt->event_mask))
92                 return true;
93
94         /* Check filter only when opcode is set */
95         if (!flt->opcode)
96                 return false;
97
98         if (flt_event == HCI_EV_CMD_COMPLETE &&
99             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
100                 return true;
101
102         if (flt_event == HCI_EV_CMD_STATUS &&
103             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
104                 return true;
105
106         return false;
107 }
108
109 /* Send frame to RAW socket */
110 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
111 {
112         struct sock *sk;
113         struct sk_buff *skb_copy = NULL;
114
115         BT_DBG("hdev %p len %d", hdev, skb->len);
116
117         read_lock(&hci_sk_list.lock);
118
119         sk_for_each(sk, &hci_sk_list.head) {
120                 struct sk_buff *nskb;
121
122                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
123                         continue;
124
125                 /* Don't send frame to the socket it came from */
126                 if (skb->sk == sk)
127                         continue;
128
129                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
130                         if (is_filtered_packet(sk, skb))
131                                 continue;
132                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
133                         if (!bt_cb(skb)->incoming)
134                                 continue;
135                         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
136                             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
137                             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
138                                 continue;
139                 } else {
140                         /* Don't send frame to other channel types */
141                         continue;
142                 }
143
144                 if (!skb_copy) {
145                         /* Create a private copy with headroom */
146                         skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
147                         if (!skb_copy)
148                                 continue;
149
150                         /* Put type byte before the data */
151                         memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
152                 }
153
154                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
155                 if (!nskb)
156                         continue;
157
158                 if (sock_queue_rcv_skb(sk, nskb))
159                         kfree_skb(nskb);
160         }
161
162         read_unlock(&hci_sk_list.lock);
163
164         kfree_skb(skb_copy);
165 }
166
167 /* Send frame to control socket */
168 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
169 {
170         struct sock *sk;
171
172         BT_DBG("len %d", skb->len);
173
174         read_lock(&hci_sk_list.lock);
175
176         sk_for_each(sk, &hci_sk_list.head) {
177                 struct sk_buff *nskb;
178
179                 /* Skip the original socket */
180                 if (sk == skip_sk)
181                         continue;
182
183                 if (sk->sk_state != BT_BOUND)
184                         continue;
185
186                 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
187                         continue;
188
189                 nskb = skb_clone(skb, GFP_ATOMIC);
190                 if (!nskb)
191                         continue;
192
193                 if (sock_queue_rcv_skb(sk, nskb))
194                         kfree_skb(nskb);
195         }
196
197         read_unlock(&hci_sk_list.lock);
198 }
199
200 /* Send frame to monitor socket */
201 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
202 {
203         struct sock *sk;
204         struct sk_buff *skb_copy = NULL;
205         __le16 opcode;
206
207         if (!atomic_read(&monitor_promisc))
208                 return;
209
210         BT_DBG("hdev %p len %d", hdev, skb->len);
211
212         switch (bt_cb(skb)->pkt_type) {
213         case HCI_COMMAND_PKT:
214                 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
215                 break;
216         case HCI_EVENT_PKT:
217                 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
218                 break;
219         case HCI_ACLDATA_PKT:
220                 if (bt_cb(skb)->incoming)
221                         opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
222                 else
223                         opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
224                 break;
225         case HCI_SCODATA_PKT:
226                 if (bt_cb(skb)->incoming)
227                         opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
228                 else
229                         opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
230                 break;
231         default:
232                 return;
233         }
234
235         read_lock(&hci_sk_list.lock);
236
237         sk_for_each(sk, &hci_sk_list.head) {
238                 struct sk_buff *nskb;
239
240                 if (sk->sk_state != BT_BOUND)
241                         continue;
242
243                 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
244                         continue;
245
246                 if (!skb_copy) {
247                         struct hci_mon_hdr *hdr;
248
249                         /* Create a private copy with headroom */
250                         skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
251                                                GFP_ATOMIC);
252                         if (!skb_copy)
253                                 continue;
254
255                         /* Put header before the data */
256                         hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
257                         hdr->opcode = opcode;
258                         hdr->index = cpu_to_le16(hdev->id);
259                         hdr->len = cpu_to_le16(skb->len);
260                 }
261
262                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
263                 if (!nskb)
264                         continue;
265
266                 if (sock_queue_rcv_skb(sk, nskb))
267                         kfree_skb(nskb);
268         }
269
270         read_unlock(&hci_sk_list.lock);
271
272         kfree_skb(skb_copy);
273 }
274
275 static void send_monitor_event(struct sk_buff *skb)
276 {
277         struct sock *sk;
278
279         BT_DBG("len %d", skb->len);
280
281         read_lock(&hci_sk_list.lock);
282
283         sk_for_each(sk, &hci_sk_list.head) {
284                 struct sk_buff *nskb;
285
286                 if (sk->sk_state != BT_BOUND)
287                         continue;
288
289                 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
290                         continue;
291
292                 nskb = skb_clone(skb, GFP_ATOMIC);
293                 if (!nskb)
294                         continue;
295
296                 if (sock_queue_rcv_skb(sk, nskb))
297                         kfree_skb(nskb);
298         }
299
300         read_unlock(&hci_sk_list.lock);
301 }
302
303 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
304 {
305         struct hci_mon_hdr *hdr;
306         struct hci_mon_new_index *ni;
307         struct sk_buff *skb;
308         __le16 opcode;
309
310         switch (event) {
311         case HCI_DEV_REG:
312                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
313                 if (!skb)
314                         return NULL;
315
316                 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
317                 ni->type = hdev->dev_type;
318                 ni->bus = hdev->bus;
319                 bacpy(&ni->bdaddr, &hdev->bdaddr);
320                 memcpy(ni->name, hdev->name, 8);
321
322                 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
323                 break;
324
325         case HCI_DEV_UNREG:
326                 skb = bt_skb_alloc(0, GFP_ATOMIC);
327                 if (!skb)
328                         return NULL;
329
330                 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
331                 break;
332
333         default:
334                 return NULL;
335         }
336
337         __net_timestamp(skb);
338
339         hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
340         hdr->opcode = opcode;
341         hdr->index = cpu_to_le16(hdev->id);
342         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
343
344         return skb;
345 }
346
347 static void send_monitor_replay(struct sock *sk)
348 {
349         struct hci_dev *hdev;
350
351         read_lock(&hci_dev_list_lock);
352
353         list_for_each_entry(hdev, &hci_dev_list, list) {
354                 struct sk_buff *skb;
355
356                 skb = create_monitor_event(hdev, HCI_DEV_REG);
357                 if (!skb)
358                         continue;
359
360                 if (sock_queue_rcv_skb(sk, skb))
361                         kfree_skb(skb);
362         }
363
364         read_unlock(&hci_dev_list_lock);
365 }
366
367 /* Generate internal stack event */
368 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
369 {
370         struct hci_event_hdr *hdr;
371         struct hci_ev_stack_internal *ev;
372         struct sk_buff *skb;
373
374         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
375         if (!skb)
376                 return;
377
378         hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
379         hdr->evt  = HCI_EV_STACK_INTERNAL;
380         hdr->plen = sizeof(*ev) + dlen;
381
382         ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
383         ev->type = type;
384         memcpy(ev->data, data, dlen);
385
386         bt_cb(skb)->incoming = 1;
387         __net_timestamp(skb);
388
389         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
390         hci_send_to_sock(hdev, skb);
391         kfree_skb(skb);
392 }
393
394 void hci_sock_dev_event(struct hci_dev *hdev, int event)
395 {
396         struct hci_ev_si_device ev;
397
398         BT_DBG("hdev %s event %d", hdev->name, event);
399
400         /* Send event to monitor */
401         if (atomic_read(&monitor_promisc)) {
402                 struct sk_buff *skb;
403
404                 skb = create_monitor_event(hdev, event);
405                 if (skb) {
406                         send_monitor_event(skb);
407                         kfree_skb(skb);
408                 }
409         }
410
411         /* Send event to sockets */
412         ev.event  = event;
413         ev.dev_id = hdev->id;
414         hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
415
416         if (event == HCI_DEV_UNREG) {
417                 struct sock *sk;
418
419                 /* Detach sockets from device */
420                 read_lock(&hci_sk_list.lock);
421                 sk_for_each(sk, &hci_sk_list.head) {
422                         bh_lock_sock_nested(sk);
423                         if (hci_pi(sk)->hdev == hdev) {
424                                 hci_pi(sk)->hdev = NULL;
425                                 sk->sk_err = EPIPE;
426                                 sk->sk_state = BT_OPEN;
427                                 sk->sk_state_change(sk);
428
429                                 hci_dev_put(hdev);
430                         }
431                         bh_unlock_sock(sk);
432                 }
433                 read_unlock(&hci_sk_list.lock);
434         }
435 }
436
437 static int hci_sock_release(struct socket *sock)
438 {
439         struct sock *sk = sock->sk;
440         struct hci_dev *hdev;
441
442         BT_DBG("sock %p sk %p", sock, sk);
443
444         if (!sk)
445                 return 0;
446
447         hdev = hci_pi(sk)->hdev;
448
449         if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
450                 atomic_dec(&monitor_promisc);
451
452         bt_sock_unlink(&hci_sk_list, sk);
453
454         if (hdev) {
455                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
456                         mgmt_index_added(hdev);
457                         clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
458                         hci_dev_close(hdev->id);
459                 }
460
461                 atomic_dec(&hdev->promisc);
462                 hci_dev_put(hdev);
463         }
464
465         sock_orphan(sk);
466
467         skb_queue_purge(&sk->sk_receive_queue);
468         skb_queue_purge(&sk->sk_write_queue);
469
470         sock_put(sk);
471         return 0;
472 }
473
474 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
475 {
476         bdaddr_t bdaddr;
477         int err;
478
479         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
480                 return -EFAULT;
481
482         hci_dev_lock(hdev);
483
484         err = hci_blacklist_add(hdev, &bdaddr, BDADDR_BREDR);
485
486         hci_dev_unlock(hdev);
487
488         return err;
489 }
490
491 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
492 {
493         bdaddr_t bdaddr;
494         int err;
495
496         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
497                 return -EFAULT;
498
499         hci_dev_lock(hdev);
500
501         err = hci_blacklist_del(hdev, &bdaddr, BDADDR_BREDR);
502
503         hci_dev_unlock(hdev);
504
505         return err;
506 }
507
508 /* Ioctls that require bound socket */
509 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
510                                 unsigned long arg)
511 {
512         struct hci_dev *hdev = hci_pi(sk)->hdev;
513
514         if (!hdev)
515                 return -EBADFD;
516
517         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
518                 return -EBUSY;
519
520         if (hdev->dev_type != HCI_BREDR)
521                 return -EOPNOTSUPP;
522
523         switch (cmd) {
524         case HCISETRAW:
525                 if (!capable(CAP_NET_ADMIN))
526                         return -EPERM;
527
528                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
529                         return -EPERM;
530
531                 if (arg)
532                         set_bit(HCI_RAW, &hdev->flags);
533                 else
534                         clear_bit(HCI_RAW, &hdev->flags);
535
536                 return 0;
537
538         case HCIGETCONNINFO:
539                 return hci_get_conn_info(hdev, (void __user *) arg);
540
541         case HCIGETAUTHINFO:
542                 return hci_get_auth_info(hdev, (void __user *) arg);
543
544         case HCIBLOCKADDR:
545                 if (!capable(CAP_NET_ADMIN))
546                         return -EPERM;
547                 return hci_sock_blacklist_add(hdev, (void __user *) arg);
548
549         case HCIUNBLOCKADDR:
550                 if (!capable(CAP_NET_ADMIN))
551                         return -EPERM;
552                 return hci_sock_blacklist_del(hdev, (void __user *) arg);
553         }
554
555         return -ENOIOCTLCMD;
556 }
557
558 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
559                           unsigned long arg)
560 {
561         void __user *argp = (void __user *) arg;
562         struct sock *sk = sock->sk;
563         int err;
564
565         BT_DBG("cmd %x arg %lx", cmd, arg);
566
567         lock_sock(sk);
568
569         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
570                 err = -EBADFD;
571                 goto done;
572         }
573
574         release_sock(sk);
575
576         switch (cmd) {
577         case HCIGETDEVLIST:
578                 return hci_get_dev_list(argp);
579
580         case HCIGETDEVINFO:
581                 return hci_get_dev_info(argp);
582
583         case HCIGETCONNLIST:
584                 return hci_get_conn_list(argp);
585
586         case HCIDEVUP:
587                 if (!capable(CAP_NET_ADMIN))
588                         return -EPERM;
589                 return hci_dev_open(arg);
590
591         case HCIDEVDOWN:
592                 if (!capable(CAP_NET_ADMIN))
593                         return -EPERM;
594                 return hci_dev_close(arg);
595
596         case HCIDEVRESET:
597                 if (!capable(CAP_NET_ADMIN))
598                         return -EPERM;
599                 return hci_dev_reset(arg);
600
601         case HCIDEVRESTAT:
602                 if (!capable(CAP_NET_ADMIN))
603                         return -EPERM;
604                 return hci_dev_reset_stat(arg);
605
606         case HCISETSCAN:
607         case HCISETAUTH:
608         case HCISETENCRYPT:
609         case HCISETPTYPE:
610         case HCISETLINKPOL:
611         case HCISETLINKMODE:
612         case HCISETACLMTU:
613         case HCISETSCOMTU:
614                 if (!capable(CAP_NET_ADMIN))
615                         return -EPERM;
616                 return hci_dev_cmd(cmd, argp);
617
618         case HCIINQUIRY:
619                 return hci_inquiry(argp);
620         }
621
622         lock_sock(sk);
623
624         err = hci_sock_bound_ioctl(sk, cmd, arg);
625
626 done:
627         release_sock(sk);
628         return err;
629 }
630
631 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
632                          int addr_len)
633 {
634         struct sockaddr_hci haddr;
635         struct sock *sk = sock->sk;
636         struct hci_dev *hdev = NULL;
637         int len, err = 0;
638
639         BT_DBG("sock %p sk %p", sock, sk);
640
641         if (!addr)
642                 return -EINVAL;
643
644         memset(&haddr, 0, sizeof(haddr));
645         len = min_t(unsigned int, sizeof(haddr), addr_len);
646         memcpy(&haddr, addr, len);
647
648         if (haddr.hci_family != AF_BLUETOOTH)
649                 return -EINVAL;
650
651         lock_sock(sk);
652
653         if (sk->sk_state == BT_BOUND) {
654                 err = -EALREADY;
655                 goto done;
656         }
657
658         switch (haddr.hci_channel) {
659         case HCI_CHANNEL_RAW:
660                 if (hci_pi(sk)->hdev) {
661                         err = -EALREADY;
662                         goto done;
663                 }
664
665                 if (haddr.hci_dev != HCI_DEV_NONE) {
666                         hdev = hci_dev_get(haddr.hci_dev);
667                         if (!hdev) {
668                                 err = -ENODEV;
669                                 goto done;
670                         }
671
672                         atomic_inc(&hdev->promisc);
673                 }
674
675                 hci_pi(sk)->hdev = hdev;
676                 break;
677
678         case HCI_CHANNEL_USER:
679                 if (hci_pi(sk)->hdev) {
680                         err = -EALREADY;
681                         goto done;
682                 }
683
684                 if (haddr.hci_dev == HCI_DEV_NONE) {
685                         err = -EINVAL;
686                         goto done;
687                 }
688
689                 if (!capable(CAP_NET_ADMIN)) {
690                         err = -EPERM;
691                         goto done;
692                 }
693
694                 hdev = hci_dev_get(haddr.hci_dev);
695                 if (!hdev) {
696                         err = -ENODEV;
697                         goto done;
698                 }
699
700                 if (test_bit(HCI_UP, &hdev->flags) ||
701                     test_bit(HCI_INIT, &hdev->flags) ||
702                     test_bit(HCI_SETUP, &hdev->dev_flags)) {
703                         err = -EBUSY;
704                         hci_dev_put(hdev);
705                         goto done;
706                 }
707
708                 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
709                         err = -EUSERS;
710                         hci_dev_put(hdev);
711                         goto done;
712                 }
713
714                 mgmt_index_removed(hdev);
715
716                 err = hci_dev_open(hdev->id);
717                 if (err) {
718                         clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
719                         hci_dev_put(hdev);
720                         goto done;
721                 }
722
723                 atomic_inc(&hdev->promisc);
724
725                 hci_pi(sk)->hdev = hdev;
726                 break;
727
728         case HCI_CHANNEL_CONTROL:
729                 if (haddr.hci_dev != HCI_DEV_NONE) {
730                         err = -EINVAL;
731                         goto done;
732                 }
733
734                 if (!capable(CAP_NET_ADMIN)) {
735                         err = -EPERM;
736                         goto done;
737                 }
738
739                 break;
740
741         case HCI_CHANNEL_MONITOR:
742                 if (haddr.hci_dev != HCI_DEV_NONE) {
743                         err = -EINVAL;
744                         goto done;
745                 }
746
747                 if (!capable(CAP_NET_RAW)) {
748                         err = -EPERM;
749                         goto done;
750                 }
751
752                 send_monitor_replay(sk);
753
754                 atomic_inc(&monitor_promisc);
755                 break;
756
757         default:
758                 err = -EINVAL;
759                 goto done;
760         }
761
762
763         hci_pi(sk)->channel = haddr.hci_channel;
764         sk->sk_state = BT_BOUND;
765
766 done:
767         release_sock(sk);
768         return err;
769 }
770
771 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
772                             int *addr_len, int peer)
773 {
774         struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
775         struct sock *sk = sock->sk;
776         struct hci_dev *hdev;
777         int err = 0;
778
779         BT_DBG("sock %p sk %p", sock, sk);
780
781         if (peer)
782                 return -EOPNOTSUPP;
783
784         lock_sock(sk);
785
786         hdev = hci_pi(sk)->hdev;
787         if (!hdev) {
788                 err = -EBADFD;
789                 goto done;
790         }
791
792         *addr_len = sizeof(*haddr);
793         haddr->hci_family = AF_BLUETOOTH;
794         haddr->hci_dev    = hdev->id;
795         haddr->hci_channel= hci_pi(sk)->channel;
796
797 done:
798         release_sock(sk);
799         return err;
800 }
801
802 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
803                           struct sk_buff *skb)
804 {
805         __u32 mask = hci_pi(sk)->cmsg_mask;
806
807         if (mask & HCI_CMSG_DIR) {
808                 int incoming = bt_cb(skb)->incoming;
809                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
810                          &incoming);
811         }
812
813         if (mask & HCI_CMSG_TSTAMP) {
814 #ifdef CONFIG_COMPAT
815                 struct compat_timeval ctv;
816 #endif
817                 struct timeval tv;
818                 void *data;
819                 int len;
820
821                 skb_get_timestamp(skb, &tv);
822
823                 data = &tv;
824                 len = sizeof(tv);
825 #ifdef CONFIG_COMPAT
826                 if (!COMPAT_USE_64BIT_TIME &&
827                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
828                         ctv.tv_sec = tv.tv_sec;
829                         ctv.tv_usec = tv.tv_usec;
830                         data = &ctv;
831                         len = sizeof(ctv);
832                 }
833 #endif
834
835                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
836         }
837 }
838
839 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
840                             struct msghdr *msg, size_t len, int flags)
841 {
842         int noblock = flags & MSG_DONTWAIT;
843         struct sock *sk = sock->sk;
844         struct sk_buff *skb;
845         int copied, err;
846
847         BT_DBG("sock %p, sk %p", sock, sk);
848
849         if (flags & (MSG_OOB))
850                 return -EOPNOTSUPP;
851
852         if (sk->sk_state == BT_CLOSED)
853                 return 0;
854
855         skb = skb_recv_datagram(sk, flags, noblock, &err);
856         if (!skb)
857                 return err;
858
859         msg->msg_namelen = 0;
860
861         copied = skb->len;
862         if (len < copied) {
863                 msg->msg_flags |= MSG_TRUNC;
864                 copied = len;
865         }
866
867         skb_reset_transport_header(skb);
868         err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
869
870         switch (hci_pi(sk)->channel) {
871         case HCI_CHANNEL_RAW:
872                 hci_sock_cmsg(sk, msg, skb);
873                 break;
874         case HCI_CHANNEL_USER:
875         case HCI_CHANNEL_CONTROL:
876         case HCI_CHANNEL_MONITOR:
877                 sock_recv_timestamp(msg, sk, skb);
878                 break;
879         }
880
881         skb_free_datagram(sk, skb);
882
883         return err ? : copied;
884 }
885
886 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
887                             struct msghdr *msg, size_t len)
888 {
889         struct sock *sk = sock->sk;
890         struct hci_dev *hdev;
891         struct sk_buff *skb;
892         int err;
893
894         BT_DBG("sock %p sk %p", sock, sk);
895
896         if (msg->msg_flags & MSG_OOB)
897                 return -EOPNOTSUPP;
898
899         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
900                 return -EINVAL;
901
902         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
903                 return -EINVAL;
904
905         lock_sock(sk);
906
907         switch (hci_pi(sk)->channel) {
908         case HCI_CHANNEL_RAW:
909         case HCI_CHANNEL_USER:
910                 break;
911         case HCI_CHANNEL_CONTROL:
912                 err = mgmt_control(sk, msg, len);
913                 goto done;
914         case HCI_CHANNEL_MONITOR:
915                 err = -EOPNOTSUPP;
916                 goto done;
917         default:
918                 err = -EINVAL;
919                 goto done;
920         }
921
922         hdev = hci_pi(sk)->hdev;
923         if (!hdev) {
924                 err = -EBADFD;
925                 goto done;
926         }
927
928         if (!test_bit(HCI_UP, &hdev->flags)) {
929                 err = -ENETDOWN;
930                 goto done;
931         }
932
933         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
934         if (!skb)
935                 goto done;
936
937         if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
938                 err = -EFAULT;
939                 goto drop;
940         }
941
942         bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
943         skb_pull(skb, 1);
944
945         if (hci_pi(sk)->channel == HCI_CHANNEL_RAW &&
946             bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
947                 u16 opcode = get_unaligned_le16(skb->data);
948                 u16 ogf = hci_opcode_ogf(opcode);
949                 u16 ocf = hci_opcode_ocf(opcode);
950
951                 if (((ogf > HCI_SFLT_MAX_OGF) ||
952                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
953                                    &hci_sec_filter.ocf_mask[ogf])) &&
954                     !capable(CAP_NET_RAW)) {
955                         err = -EPERM;
956                         goto drop;
957                 }
958
959                 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
960                         skb_queue_tail(&hdev->raw_q, skb);
961                         queue_work(hdev->workqueue, &hdev->tx_work);
962                 } else {
963                         /* Stand-alone HCI commands must be flaged as
964                          * single-command requests.
965                          */
966                         bt_cb(skb)->req.start = true;
967
968                         skb_queue_tail(&hdev->cmd_q, skb);
969                         queue_work(hdev->workqueue, &hdev->cmd_work);
970                 }
971         } else {
972                 if (!capable(CAP_NET_RAW)) {
973                         err = -EPERM;
974                         goto drop;
975                 }
976
977                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
978                     bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
979                     bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
980                     bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
981                         err = -EINVAL;
982                         goto drop;
983                 }
984
985                 skb_queue_tail(&hdev->raw_q, skb);
986                 queue_work(hdev->workqueue, &hdev->tx_work);
987         }
988
989         err = len;
990
991 done:
992         release_sock(sk);
993         return err;
994
995 drop:
996         kfree_skb(skb);
997         goto done;
998 }
999
1000 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1001                                char __user *optval, unsigned int len)
1002 {
1003         struct hci_ufilter uf = { .opcode = 0 };
1004         struct sock *sk = sock->sk;
1005         int err = 0, opt = 0;
1006
1007         BT_DBG("sk %p, opt %d", sk, optname);
1008
1009         lock_sock(sk);
1010
1011         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1012                 err = -EBADFD;
1013                 goto done;
1014         }
1015
1016         switch (optname) {
1017         case HCI_DATA_DIR:
1018                 if (get_user(opt, (int __user *)optval)) {
1019                         err = -EFAULT;
1020                         break;
1021                 }
1022
1023                 if (opt)
1024                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1025                 else
1026                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1027                 break;
1028
1029         case HCI_TIME_STAMP:
1030                 if (get_user(opt, (int __user *)optval)) {
1031                         err = -EFAULT;
1032                         break;
1033                 }
1034
1035                 if (opt)
1036                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1037                 else
1038                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1039                 break;
1040
1041         case HCI_FILTER:
1042                 {
1043                         struct hci_filter *f = &hci_pi(sk)->filter;
1044
1045                         uf.type_mask = f->type_mask;
1046                         uf.opcode    = f->opcode;
1047                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1048                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1049                 }
1050
1051                 len = min_t(unsigned int, len, sizeof(uf));
1052                 if (copy_from_user(&uf, optval, len)) {
1053                         err = -EFAULT;
1054                         break;
1055                 }
1056
1057                 if (!capable(CAP_NET_RAW)) {
1058                         uf.type_mask &= hci_sec_filter.type_mask;
1059                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1060                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1061                 }
1062
1063                 {
1064                         struct hci_filter *f = &hci_pi(sk)->filter;
1065
1066                         f->type_mask = uf.type_mask;
1067                         f->opcode    = uf.opcode;
1068                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1069                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1070                 }
1071                 break;
1072
1073         default:
1074                 err = -ENOPROTOOPT;
1075                 break;
1076         }
1077
1078 done:
1079         release_sock(sk);
1080         return err;
1081 }
1082
1083 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1084                                char __user *optval, int __user *optlen)
1085 {
1086         struct hci_ufilter uf;
1087         struct sock *sk = sock->sk;
1088         int len, opt, err = 0;
1089
1090         BT_DBG("sk %p, opt %d", sk, optname);
1091
1092         if (get_user(len, optlen))
1093                 return -EFAULT;
1094
1095         lock_sock(sk);
1096
1097         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1098                 err = -EBADFD;
1099                 goto done;
1100         }
1101
1102         switch (optname) {
1103         case HCI_DATA_DIR:
1104                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1105                         opt = 1;
1106                 else
1107                         opt = 0;
1108
1109                 if (put_user(opt, optval))
1110                         err = -EFAULT;
1111                 break;
1112
1113         case HCI_TIME_STAMP:
1114                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1115                         opt = 1;
1116                 else
1117                         opt = 0;
1118
1119                 if (put_user(opt, optval))
1120                         err = -EFAULT;
1121                 break;
1122
1123         case HCI_FILTER:
1124                 {
1125                         struct hci_filter *f = &hci_pi(sk)->filter;
1126
1127                         memset(&uf, 0, sizeof(uf));
1128                         uf.type_mask = f->type_mask;
1129                         uf.opcode    = f->opcode;
1130                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1131                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1132                 }
1133
1134                 len = min_t(unsigned int, len, sizeof(uf));
1135                 if (copy_to_user(optval, &uf, len))
1136                         err = -EFAULT;
1137                 break;
1138
1139         default:
1140                 err = -ENOPROTOOPT;
1141                 break;
1142         }
1143
1144 done:
1145         release_sock(sk);
1146         return err;
1147 }
1148
1149 static const struct proto_ops hci_sock_ops = {
1150         .family         = PF_BLUETOOTH,
1151         .owner          = THIS_MODULE,
1152         .release        = hci_sock_release,
1153         .bind           = hci_sock_bind,
1154         .getname        = hci_sock_getname,
1155         .sendmsg        = hci_sock_sendmsg,
1156         .recvmsg        = hci_sock_recvmsg,
1157         .ioctl          = hci_sock_ioctl,
1158         .poll           = datagram_poll,
1159         .listen         = sock_no_listen,
1160         .shutdown       = sock_no_shutdown,
1161         .setsockopt     = hci_sock_setsockopt,
1162         .getsockopt     = hci_sock_getsockopt,
1163         .connect        = sock_no_connect,
1164         .socketpair     = sock_no_socketpair,
1165         .accept         = sock_no_accept,
1166         .mmap           = sock_no_mmap
1167 };
1168
1169 static struct proto hci_sk_proto = {
1170         .name           = "HCI",
1171         .owner          = THIS_MODULE,
1172         .obj_size       = sizeof(struct hci_pinfo)
1173 };
1174
1175 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1176                            int kern)
1177 {
1178         struct sock *sk;
1179
1180         BT_DBG("sock %p", sock);
1181
1182         if (sock->type != SOCK_RAW)
1183                 return -ESOCKTNOSUPPORT;
1184
1185         sock->ops = &hci_sock_ops;
1186
1187         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1188         if (!sk)
1189                 return -ENOMEM;
1190
1191         sock_init_data(sock, sk);
1192
1193         sock_reset_flag(sk, SOCK_ZAPPED);
1194
1195         sk->sk_protocol = protocol;
1196
1197         sock->state = SS_UNCONNECTED;
1198         sk->sk_state = BT_OPEN;
1199
1200         bt_sock_link(&hci_sk_list, sk);
1201         return 0;
1202 }
1203
1204 static const struct net_proto_family hci_sock_family_ops = {
1205         .family = PF_BLUETOOTH,
1206         .owner  = THIS_MODULE,
1207         .create = hci_sock_create,
1208 };
1209
1210 int __init hci_sock_init(void)
1211 {
1212         int err;
1213
1214         err = proto_register(&hci_sk_proto, 0);
1215         if (err < 0)
1216                 return err;
1217
1218         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1219         if (err < 0) {
1220                 BT_ERR("HCI socket registration failed");
1221                 goto error;
1222         }
1223
1224         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1225         if (err < 0) {
1226                 BT_ERR("Failed to create HCI proc file");
1227                 bt_sock_unregister(BTPROTO_HCI);
1228                 goto error;
1229         }
1230
1231         BT_INFO("HCI socket layer initialized");
1232
1233         return 0;
1234
1235 error:
1236         proto_unregister(&hci_sk_proto);
1237         return err;
1238 }
1239
1240 void hci_sock_cleanup(void)
1241 {
1242         bt_procfs_cleanup(&init_net, "hci");
1243         bt_sock_unregister(BTPROTO_HCI);
1244         proto_unregister(&hci_sk_proto);
1245 }