2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
68 list_for_each_entry(c, &conn->chan_l, list) {
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
80 list_for_each_entry(c, &conn->chan_l, list) {
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
98 mutex_unlock(&conn->chan_lock);
103 /* Find channel with given DCID.
104 * Returns locked channel.
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
109 struct l2cap_chan *c;
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
115 mutex_unlock(&conn->chan_lock);
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
123 struct l2cap_chan *c;
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
135 struct l2cap_chan *c;
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
141 mutex_unlock(&conn->chan_lock);
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
148 struct l2cap_chan *c;
150 list_for_each_entry(c, &chan_list, global_l) {
151 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
161 write_lock(&chan_list_lock);
163 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
176 for (p = 0x1001; p < 0x1100; p += 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 chan->psm = cpu_to_le16(p);
179 chan->sport = cpu_to_le16(p);
186 write_unlock(&chan_list_lock);
190 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
192 write_lock(&chan_list_lock);
196 write_unlock(&chan_list_lock);
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
203 u16 cid = L2CAP_CID_DYN_START;
205 for (; cid < L2CAP_CID_DYN_END; cid++) {
206 if (!__l2cap_get_chan_by_scid(conn, cid))
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 state_to_string(state));
219 chan->ops->state_change(chan, state);
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
224 struct sock *sk = chan->sk;
227 __l2cap_state_change(chan, state);
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
233 struct sock *sk = chan->sk;
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
240 struct sock *sk = chan->sk;
243 __l2cap_chan_set_err(chan, err);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 struct l2cap_conn *conn = chan->conn;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 if (chan->dcid == L2CAP_CID_ATT)
508 chan->scid = L2CAP_CID_ATT;
510 chan->scid = l2cap_alloc_cid(conn);
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 chan->omtu = L2CAP_DEFAULT_MTU;
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
525 case L2CAP_CHAN_CONN_FIX_A2MP:
526 chan->scid = L2CAP_CID_A2MP;
527 chan->dcid = L2CAP_CID_A2MP;
528 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
529 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
533 /* Raw socket can send/recv signalling messages only */
534 chan->scid = L2CAP_CID_SIGNALING;
535 chan->dcid = L2CAP_CID_SIGNALING;
536 chan->omtu = L2CAP_DEFAULT_MTU;
539 chan->local_id = L2CAP_BESTEFFORT_ID;
540 chan->local_stype = L2CAP_SERV_BESTEFFORT;
541 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
542 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
543 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
544 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
546 l2cap_chan_hold(chan);
548 hci_conn_hold(conn->hcon);
550 list_add(&chan->list, &conn->chan_l);
553 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
555 mutex_lock(&conn->chan_lock);
556 __l2cap_chan_add(conn, chan);
557 mutex_unlock(&conn->chan_lock);
560 void l2cap_chan_del(struct l2cap_chan *chan, int err)
562 struct l2cap_conn *conn = chan->conn;
564 __clear_chan_timer(chan);
566 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
569 struct amp_mgr *mgr = conn->hcon->amp_mgr;
570 /* Delete from channel list */
571 list_del(&chan->list);
573 l2cap_chan_put(chan);
577 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
578 hci_conn_drop(conn->hcon);
580 if (mgr && mgr->bredr_chan == chan)
581 mgr->bredr_chan = NULL;
584 if (chan->hs_hchan) {
585 struct hci_chan *hs_hchan = chan->hs_hchan;
587 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
588 amp_disconnect_logical_link(hs_hchan);
591 chan->ops->teardown(chan, err);
593 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
597 case L2CAP_MODE_BASIC:
600 case L2CAP_MODE_ERTM:
601 __clear_retrans_timer(chan);
602 __clear_monitor_timer(chan);
603 __clear_ack_timer(chan);
605 skb_queue_purge(&chan->srej_q);
607 l2cap_seq_list_free(&chan->srej_list);
608 l2cap_seq_list_free(&chan->retrans_list);
612 case L2CAP_MODE_STREAMING:
613 skb_queue_purge(&chan->tx_q);
620 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
622 struct l2cap_conn *conn = chan->conn;
623 struct sock *sk = chan->sk;
625 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
628 switch (chan->state) {
630 chan->ops->teardown(chan, 0);
635 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
636 conn->hcon->type == ACL_LINK) {
637 __set_chan_timer(chan, sk->sk_sndtimeo);
638 l2cap_send_disconn_req(chan, reason);
640 l2cap_chan_del(chan, reason);
644 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
645 conn->hcon->type == ACL_LINK) {
646 struct l2cap_conn_rsp rsp;
649 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
650 result = L2CAP_CR_SEC_BLOCK;
652 result = L2CAP_CR_BAD_PSM;
653 l2cap_state_change(chan, BT_DISCONN);
655 rsp.scid = cpu_to_le16(chan->dcid);
656 rsp.dcid = cpu_to_le16(chan->scid);
657 rsp.result = cpu_to_le16(result);
658 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
659 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
663 l2cap_chan_del(chan, reason);
668 l2cap_chan_del(chan, reason);
672 chan->ops->teardown(chan, 0);
677 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
679 if (chan->chan_type == L2CAP_CHAN_RAW) {
680 switch (chan->sec_level) {
681 case BT_SECURITY_HIGH:
682 return HCI_AT_DEDICATED_BONDING_MITM;
683 case BT_SECURITY_MEDIUM:
684 return HCI_AT_DEDICATED_BONDING;
686 return HCI_AT_NO_BONDING;
688 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
689 if (chan->sec_level == BT_SECURITY_LOW)
690 chan->sec_level = BT_SECURITY_SDP;
692 if (chan->sec_level == BT_SECURITY_HIGH)
693 return HCI_AT_NO_BONDING_MITM;
695 return HCI_AT_NO_BONDING;
697 switch (chan->sec_level) {
698 case BT_SECURITY_HIGH:
699 return HCI_AT_GENERAL_BONDING_MITM;
700 case BT_SECURITY_MEDIUM:
701 return HCI_AT_GENERAL_BONDING;
703 return HCI_AT_NO_BONDING;
708 /* Service level security */
709 int l2cap_chan_check_security(struct l2cap_chan *chan)
711 struct l2cap_conn *conn = chan->conn;
714 auth_type = l2cap_get_auth_type(chan);
716 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
719 static u8 l2cap_get_ident(struct l2cap_conn *conn)
723 /* Get next available identificator.
724 * 1 - 128 are used by kernel.
725 * 129 - 199 are reserved.
726 * 200 - 254 are used by utilities like l2ping, etc.
729 spin_lock(&conn->lock);
731 if (++conn->tx_ident > 128)
736 spin_unlock(&conn->lock);
741 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
744 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
747 BT_DBG("code 0x%2.2x", code);
752 if (lmp_no_flush_capable(conn->hcon->hdev))
753 flags = ACL_START_NO_FLUSH;
757 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
758 skb->priority = HCI_PRIO_MAX;
760 hci_send_acl(conn->hchan, skb, flags);
763 static bool __chan_is_moving(struct l2cap_chan *chan)
765 return chan->move_state != L2CAP_MOVE_STABLE &&
766 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
769 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
771 struct hci_conn *hcon = chan->conn->hcon;
774 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
777 if (chan->hs_hcon && !__chan_is_moving(chan)) {
779 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
786 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
787 lmp_no_flush_capable(hcon->hdev))
788 flags = ACL_START_NO_FLUSH;
792 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
793 hci_send_acl(chan->conn->hchan, skb, flags);
796 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
798 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
799 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
801 if (enh & L2CAP_CTRL_FRAME_TYPE) {
804 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
805 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
812 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
813 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
820 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
822 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
823 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
825 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
828 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
829 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
836 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
837 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
844 static inline void __unpack_control(struct l2cap_chan *chan,
847 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
848 __unpack_extended_control(get_unaligned_le32(skb->data),
849 &bt_cb(skb)->control);
850 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
852 __unpack_enhanced_control(get_unaligned_le16(skb->data),
853 &bt_cb(skb)->control);
854 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
858 static u32 __pack_extended_control(struct l2cap_ctrl *control)
862 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
863 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
865 if (control->sframe) {
866 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
867 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
868 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
870 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
871 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
877 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
881 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
882 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
884 if (control->sframe) {
885 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
886 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
887 packed |= L2CAP_CTRL_FRAME_TYPE;
889 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
890 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
896 static inline void __pack_control(struct l2cap_chan *chan,
897 struct l2cap_ctrl *control,
900 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
901 put_unaligned_le32(__pack_extended_control(control),
902 skb->data + L2CAP_HDR_SIZE);
904 put_unaligned_le16(__pack_enhanced_control(control),
905 skb->data + L2CAP_HDR_SIZE);
909 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
911 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
912 return L2CAP_EXT_HDR_SIZE;
914 return L2CAP_ENH_HDR_SIZE;
917 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
921 struct l2cap_hdr *lh;
922 int hlen = __ertm_hdr_size(chan);
924 if (chan->fcs == L2CAP_FCS_CRC16)
925 hlen += L2CAP_FCS_SIZE;
927 skb = bt_skb_alloc(hlen, GFP_KERNEL);
930 return ERR_PTR(-ENOMEM);
932 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
933 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
934 lh->cid = cpu_to_le16(chan->dcid);
936 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
937 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
939 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
941 if (chan->fcs == L2CAP_FCS_CRC16) {
942 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
943 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
946 skb->priority = HCI_PRIO_MAX;
950 static void l2cap_send_sframe(struct l2cap_chan *chan,
951 struct l2cap_ctrl *control)
956 BT_DBG("chan %p, control %p", chan, control);
958 if (!control->sframe)
961 if (__chan_is_moving(chan))
964 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
968 if (control->super == L2CAP_SUPER_RR)
969 clear_bit(CONN_RNR_SENT, &chan->conn_state);
970 else if (control->super == L2CAP_SUPER_RNR)
971 set_bit(CONN_RNR_SENT, &chan->conn_state);
973 if (control->super != L2CAP_SUPER_SREJ) {
974 chan->last_acked_seq = control->reqseq;
975 __clear_ack_timer(chan);
978 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
979 control->final, control->poll, control->super);
981 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
982 control_field = __pack_extended_control(control);
984 control_field = __pack_enhanced_control(control);
986 skb = l2cap_create_sframe_pdu(chan, control_field);
988 l2cap_do_send(chan, skb);
991 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
993 struct l2cap_ctrl control;
995 BT_DBG("chan %p, poll %d", chan, poll);
997 memset(&control, 0, sizeof(control));
1001 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1002 control.super = L2CAP_SUPER_RNR;
1004 control.super = L2CAP_SUPER_RR;
1006 control.reqseq = chan->buffer_seq;
1007 l2cap_send_sframe(chan, &control);
1010 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1012 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1015 static bool __amp_capable(struct l2cap_chan *chan)
1017 struct l2cap_conn *conn = chan->conn;
1020 hci_amp_capable() &&
1021 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1022 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1028 static bool l2cap_check_efs(struct l2cap_chan *chan)
1030 /* Check EFS parameters */
1034 void l2cap_send_conn_req(struct l2cap_chan *chan)
1036 struct l2cap_conn *conn = chan->conn;
1037 struct l2cap_conn_req req;
1039 req.scid = cpu_to_le16(chan->scid);
1040 req.psm = chan->psm;
1042 chan->ident = l2cap_get_ident(conn);
1044 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1046 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1049 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1051 struct l2cap_create_chan_req req;
1052 req.scid = cpu_to_le16(chan->scid);
1053 req.psm = chan->psm;
1054 req.amp_id = amp_id;
1056 chan->ident = l2cap_get_ident(chan->conn);
1058 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1062 static void l2cap_move_setup(struct l2cap_chan *chan)
1064 struct sk_buff *skb;
1066 BT_DBG("chan %p", chan);
1068 if (chan->mode != L2CAP_MODE_ERTM)
1071 __clear_retrans_timer(chan);
1072 __clear_monitor_timer(chan);
1073 __clear_ack_timer(chan);
1075 chan->retry_count = 0;
1076 skb_queue_walk(&chan->tx_q, skb) {
1077 if (bt_cb(skb)->control.retries)
1078 bt_cb(skb)->control.retries = 1;
1083 chan->expected_tx_seq = chan->buffer_seq;
1085 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1086 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1087 l2cap_seq_list_clear(&chan->retrans_list);
1088 l2cap_seq_list_clear(&chan->srej_list);
1089 skb_queue_purge(&chan->srej_q);
1091 chan->tx_state = L2CAP_TX_STATE_XMIT;
1092 chan->rx_state = L2CAP_RX_STATE_MOVE;
1094 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1097 static void l2cap_move_done(struct l2cap_chan *chan)
1099 u8 move_role = chan->move_role;
1100 BT_DBG("chan %p", chan);
1102 chan->move_state = L2CAP_MOVE_STABLE;
1103 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1105 if (chan->mode != L2CAP_MODE_ERTM)
1108 switch (move_role) {
1109 case L2CAP_MOVE_ROLE_INITIATOR:
1110 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1111 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1113 case L2CAP_MOVE_ROLE_RESPONDER:
1114 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1119 static void l2cap_chan_ready(struct l2cap_chan *chan)
1121 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1122 chan->conf_state = 0;
1123 __clear_chan_timer(chan);
1125 chan->state = BT_CONNECTED;
1127 chan->ops->ready(chan);
1130 static void l2cap_start_connection(struct l2cap_chan *chan)
1132 if (__amp_capable(chan)) {
1133 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1134 a2mp_discover_amp(chan);
1136 l2cap_send_conn_req(chan);
1140 static void l2cap_do_start(struct l2cap_chan *chan)
1142 struct l2cap_conn *conn = chan->conn;
1144 if (conn->hcon->type == LE_LINK) {
1145 l2cap_chan_ready(chan);
1149 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1150 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1153 if (l2cap_chan_check_security(chan) &&
1154 __l2cap_no_conn_pending(chan)) {
1155 l2cap_start_connection(chan);
1158 struct l2cap_info_req req;
1159 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1161 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1162 conn->info_ident = l2cap_get_ident(conn);
1164 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1166 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1171 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1173 u32 local_feat_mask = l2cap_feat_mask;
1175 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1178 case L2CAP_MODE_ERTM:
1179 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1180 case L2CAP_MODE_STREAMING:
1181 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1187 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1189 struct sock *sk = chan->sk;
1190 struct l2cap_conn *conn = chan->conn;
1191 struct l2cap_disconn_req req;
1196 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1197 __clear_retrans_timer(chan);
1198 __clear_monitor_timer(chan);
1199 __clear_ack_timer(chan);
1202 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1203 l2cap_state_change(chan, BT_DISCONN);
1207 req.dcid = cpu_to_le16(chan->dcid);
1208 req.scid = cpu_to_le16(chan->scid);
1209 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1213 __l2cap_state_change(chan, BT_DISCONN);
1214 __l2cap_chan_set_err(chan, err);
1218 /* ---- L2CAP connections ---- */
1219 static void l2cap_conn_start(struct l2cap_conn *conn)
1221 struct l2cap_chan *chan, *tmp;
1223 BT_DBG("conn %p", conn);
1225 mutex_lock(&conn->chan_lock);
1227 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1228 struct sock *sk = chan->sk;
1230 l2cap_chan_lock(chan);
1232 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1233 l2cap_chan_unlock(chan);
1237 if (chan->state == BT_CONNECT) {
1238 if (!l2cap_chan_check_security(chan) ||
1239 !__l2cap_no_conn_pending(chan)) {
1240 l2cap_chan_unlock(chan);
1244 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1245 && test_bit(CONF_STATE2_DEVICE,
1246 &chan->conf_state)) {
1247 l2cap_chan_close(chan, ECONNRESET);
1248 l2cap_chan_unlock(chan);
1252 l2cap_start_connection(chan);
1254 } else if (chan->state == BT_CONNECT2) {
1255 struct l2cap_conn_rsp rsp;
1257 rsp.scid = cpu_to_le16(chan->dcid);
1258 rsp.dcid = cpu_to_le16(chan->scid);
1260 if (l2cap_chan_check_security(chan)) {
1262 if (test_bit(BT_SK_DEFER_SETUP,
1263 &bt_sk(sk)->flags)) {
1264 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1265 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1266 chan->ops->defer(chan);
1269 __l2cap_state_change(chan, BT_CONFIG);
1270 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1271 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1275 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1276 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1279 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1282 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1283 rsp.result != L2CAP_CR_SUCCESS) {
1284 l2cap_chan_unlock(chan);
1288 set_bit(CONF_REQ_SENT, &chan->conf_state);
1289 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1290 l2cap_build_conf_req(chan, buf), buf);
1291 chan->num_conf_req++;
1294 l2cap_chan_unlock(chan);
1297 mutex_unlock(&conn->chan_lock);
1300 /* Find socket with cid and source/destination bdaddr.
1301 * Returns closest match, locked.
1303 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1307 struct l2cap_chan *c, *c1 = NULL;
1309 read_lock(&chan_list_lock);
1311 list_for_each_entry(c, &chan_list, global_l) {
1312 struct sock *sk = c->sk;
1314 if (state && c->state != state)
1317 if (c->scid == cid) {
1318 int src_match, dst_match;
1319 int src_any, dst_any;
1322 src_match = !bacmp(&bt_sk(sk)->src, src);
1323 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1324 if (src_match && dst_match) {
1325 read_unlock(&chan_list_lock);
1330 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1331 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1332 if ((src_match && dst_any) || (src_any && dst_match) ||
1333 (src_any && dst_any))
1338 read_unlock(&chan_list_lock);
1343 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1345 struct sock *parent;
1346 struct l2cap_chan *chan, *pchan;
1350 /* Check if we have socket listening on cid */
1351 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1352 conn->src, conn->dst);
1356 /* Client ATT sockets should override the server one */
1357 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1364 chan = pchan->ops->new_connection(pchan);
1368 chan->dcid = L2CAP_CID_ATT;
1370 bacpy(&bt_sk(chan->sk)->src, conn->src);
1371 bacpy(&bt_sk(chan->sk)->dst, conn->dst);
1373 __l2cap_chan_add(conn, chan);
1376 release_sock(parent);
1379 static void l2cap_conn_ready(struct l2cap_conn *conn)
1381 struct l2cap_chan *chan;
1382 struct hci_conn *hcon = conn->hcon;
1384 BT_DBG("conn %p", conn);
1386 /* For outgoing pairing which doesn't necessarily have an
1387 * associated socket (e.g. mgmt_pair_device).
1389 if (hcon->out && hcon->type == LE_LINK)
1390 smp_conn_security(hcon, hcon->pending_sec_level);
1392 mutex_lock(&conn->chan_lock);
1394 if (hcon->type == LE_LINK)
1395 l2cap_le_conn_ready(conn);
1397 list_for_each_entry(chan, &conn->chan_l, list) {
1399 l2cap_chan_lock(chan);
1401 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1402 l2cap_chan_unlock(chan);
1406 if (hcon->type == LE_LINK) {
1407 if (smp_conn_security(hcon, chan->sec_level))
1408 l2cap_chan_ready(chan);
1410 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1411 struct sock *sk = chan->sk;
1412 __clear_chan_timer(chan);
1414 __l2cap_state_change(chan, BT_CONNECTED);
1415 sk->sk_state_change(sk);
1418 } else if (chan->state == BT_CONNECT) {
1419 l2cap_do_start(chan);
1422 l2cap_chan_unlock(chan);
1425 mutex_unlock(&conn->chan_lock);
1428 /* Notify sockets that we cannot guaranty reliability anymore */
1429 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1431 struct l2cap_chan *chan;
1433 BT_DBG("conn %p", conn);
1435 mutex_lock(&conn->chan_lock);
1437 list_for_each_entry(chan, &conn->chan_l, list) {
1438 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1439 l2cap_chan_set_err(chan, err);
1442 mutex_unlock(&conn->chan_lock);
1445 static void l2cap_info_timeout(struct work_struct *work)
1447 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1450 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1451 conn->info_ident = 0;
1453 l2cap_conn_start(conn);
1458 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1459 * callback is called during registration. The ->remove callback is called
1460 * during unregistration.
1461 * An l2cap_user object can either be explicitly unregistered or when the
1462 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1463 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1464 * External modules must own a reference to the l2cap_conn object if they intend
1465 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1466 * any time if they don't.
1469 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1471 struct hci_dev *hdev = conn->hcon->hdev;
1474 /* We need to check whether l2cap_conn is registered. If it is not, we
1475 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1476 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1477 * relies on the parent hci_conn object to be locked. This itself relies
1478 * on the hci_dev object to be locked. So we must lock the hci device
1483 if (user->list.next || user->list.prev) {
1488 /* conn->hchan is NULL after l2cap_conn_del() was called */
1494 ret = user->probe(conn, user);
1498 list_add(&user->list, &conn->users);
1502 hci_dev_unlock(hdev);
1505 EXPORT_SYMBOL(l2cap_register_user);
1507 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1509 struct hci_dev *hdev = conn->hcon->hdev;
1513 if (!user->list.next || !user->list.prev)
1516 list_del(&user->list);
1517 user->list.next = NULL;
1518 user->list.prev = NULL;
1519 user->remove(conn, user);
1522 hci_dev_unlock(hdev);
1524 EXPORT_SYMBOL(l2cap_unregister_user);
1526 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1528 struct l2cap_user *user;
1530 while (!list_empty(&conn->users)) {
1531 user = list_first_entry(&conn->users, struct l2cap_user, list);
1532 list_del(&user->list);
1533 user->list.next = NULL;
1534 user->list.prev = NULL;
1535 user->remove(conn, user);
1539 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1541 struct l2cap_conn *conn = hcon->l2cap_data;
1542 struct l2cap_chan *chan, *l;
1547 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1549 kfree_skb(conn->rx_skb);
1551 l2cap_unregister_all_users(conn);
1553 mutex_lock(&conn->chan_lock);
1556 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1557 l2cap_chan_hold(chan);
1558 l2cap_chan_lock(chan);
1560 l2cap_chan_del(chan, err);
1562 l2cap_chan_unlock(chan);
1564 chan->ops->close(chan);
1565 l2cap_chan_put(chan);
1568 mutex_unlock(&conn->chan_lock);
1570 hci_chan_del(conn->hchan);
1572 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1573 cancel_delayed_work_sync(&conn->info_timer);
1575 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1576 cancel_delayed_work_sync(&conn->security_timer);
1577 smp_chan_destroy(conn);
1580 hcon->l2cap_data = NULL;
1582 l2cap_conn_put(conn);
1585 static void security_timeout(struct work_struct *work)
1587 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1588 security_timer.work);
1590 BT_DBG("conn %p", conn);
1592 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1593 smp_chan_destroy(conn);
1594 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1598 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1600 struct l2cap_conn *conn = hcon->l2cap_data;
1601 struct hci_chan *hchan;
1606 hchan = hci_chan_create(hcon);
1610 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1612 hci_chan_del(hchan);
1616 kref_init(&conn->ref);
1617 hcon->l2cap_data = conn;
1619 hci_conn_get(conn->hcon);
1620 conn->hchan = hchan;
1622 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1624 switch (hcon->type) {
1626 if (hcon->hdev->le_mtu) {
1627 conn->mtu = hcon->hdev->le_mtu;
1632 conn->mtu = hcon->hdev->acl_mtu;
1636 conn->src = &hcon->hdev->bdaddr;
1637 conn->dst = &hcon->dst;
1639 conn->feat_mask = 0;
1641 spin_lock_init(&conn->lock);
1642 mutex_init(&conn->chan_lock);
1644 INIT_LIST_HEAD(&conn->chan_l);
1645 INIT_LIST_HEAD(&conn->users);
1647 if (hcon->type == LE_LINK)
1648 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1650 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1652 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1657 static void l2cap_conn_free(struct kref *ref)
1659 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1661 hci_conn_put(conn->hcon);
1665 void l2cap_conn_get(struct l2cap_conn *conn)
1667 kref_get(&conn->ref);
1669 EXPORT_SYMBOL(l2cap_conn_get);
1671 void l2cap_conn_put(struct l2cap_conn *conn)
1673 kref_put(&conn->ref, l2cap_conn_free);
1675 EXPORT_SYMBOL(l2cap_conn_put);
1677 /* ---- Socket interface ---- */
1679 /* Find socket with psm and source / destination bdaddr.
1680 * Returns closest match.
1682 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1686 struct l2cap_chan *c, *c1 = NULL;
1688 read_lock(&chan_list_lock);
1690 list_for_each_entry(c, &chan_list, global_l) {
1691 struct sock *sk = c->sk;
1693 if (state && c->state != state)
1696 if (c->psm == psm) {
1697 int src_match, dst_match;
1698 int src_any, dst_any;
1701 src_match = !bacmp(&bt_sk(sk)->src, src);
1702 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1703 if (src_match && dst_match) {
1704 read_unlock(&chan_list_lock);
1709 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1710 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1711 if ((src_match && dst_any) || (src_any && dst_match) ||
1712 (src_any && dst_any))
1717 read_unlock(&chan_list_lock);
1722 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1723 bdaddr_t *dst, u8 dst_type)
1725 struct sock *sk = chan->sk;
1726 bdaddr_t *src = &bt_sk(sk)->src;
1727 struct l2cap_conn *conn;
1728 struct hci_conn *hcon;
1729 struct hci_dev *hdev;
1733 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1734 dst_type, __le16_to_cpu(psm));
1736 hdev = hci_get_route(dst, src);
1738 return -EHOSTUNREACH;
1742 l2cap_chan_lock(chan);
1744 /* PSM must be odd and lsb of upper byte must be 0 */
1745 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1746 chan->chan_type != L2CAP_CHAN_RAW) {
1751 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1756 switch (chan->mode) {
1757 case L2CAP_MODE_BASIC:
1759 case L2CAP_MODE_ERTM:
1760 case L2CAP_MODE_STREAMING:
1769 switch (chan->state) {
1773 /* Already connecting */
1778 /* Already connected */
1792 /* Set destination address and psm */
1794 bacpy(&bt_sk(sk)->dst, dst);
1800 auth_type = l2cap_get_auth_type(chan);
1802 if (bdaddr_type_is_le(dst_type))
1803 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1804 chan->sec_level, auth_type);
1806 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1807 chan->sec_level, auth_type);
1810 err = PTR_ERR(hcon);
1814 conn = l2cap_conn_add(hcon);
1816 hci_conn_drop(hcon);
1821 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1822 hci_conn_drop(hcon);
1827 /* Update source addr of the socket */
1828 bacpy(src, conn->src);
1830 l2cap_chan_unlock(chan);
1831 l2cap_chan_add(conn, chan);
1832 l2cap_chan_lock(chan);
1834 /* l2cap_chan_add takes its own ref so we can drop this one */
1835 hci_conn_drop(hcon);
1837 l2cap_state_change(chan, BT_CONNECT);
1838 __set_chan_timer(chan, sk->sk_sndtimeo);
1840 if (hcon->state == BT_CONNECTED) {
1841 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1842 __clear_chan_timer(chan);
1843 if (l2cap_chan_check_security(chan))
1844 l2cap_state_change(chan, BT_CONNECTED);
1846 l2cap_do_start(chan);
1852 l2cap_chan_unlock(chan);
1853 hci_dev_unlock(hdev);
1858 int __l2cap_wait_ack(struct sock *sk)
1860 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1861 DECLARE_WAITQUEUE(wait, current);
1865 add_wait_queue(sk_sleep(sk), &wait);
1866 set_current_state(TASK_INTERRUPTIBLE);
1867 while (chan->unacked_frames > 0 && chan->conn) {
1871 if (signal_pending(current)) {
1872 err = sock_intr_errno(timeo);
1877 timeo = schedule_timeout(timeo);
1879 set_current_state(TASK_INTERRUPTIBLE);
1881 err = sock_error(sk);
1885 set_current_state(TASK_RUNNING);
1886 remove_wait_queue(sk_sleep(sk), &wait);
1890 static void l2cap_monitor_timeout(struct work_struct *work)
1892 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1893 monitor_timer.work);
1895 BT_DBG("chan %p", chan);
1897 l2cap_chan_lock(chan);
1900 l2cap_chan_unlock(chan);
1901 l2cap_chan_put(chan);
1905 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1907 l2cap_chan_unlock(chan);
1908 l2cap_chan_put(chan);
1911 static void l2cap_retrans_timeout(struct work_struct *work)
1913 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1914 retrans_timer.work);
1916 BT_DBG("chan %p", chan);
1918 l2cap_chan_lock(chan);
1921 l2cap_chan_unlock(chan);
1922 l2cap_chan_put(chan);
1926 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1927 l2cap_chan_unlock(chan);
1928 l2cap_chan_put(chan);
1931 static void l2cap_streaming_send(struct l2cap_chan *chan,
1932 struct sk_buff_head *skbs)
1934 struct sk_buff *skb;
1935 struct l2cap_ctrl *control;
1937 BT_DBG("chan %p, skbs %p", chan, skbs);
1939 if (__chan_is_moving(chan))
1942 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1944 while (!skb_queue_empty(&chan->tx_q)) {
1946 skb = skb_dequeue(&chan->tx_q);
1948 bt_cb(skb)->control.retries = 1;
1949 control = &bt_cb(skb)->control;
1951 control->reqseq = 0;
1952 control->txseq = chan->next_tx_seq;
1954 __pack_control(chan, control, skb);
1956 if (chan->fcs == L2CAP_FCS_CRC16) {
1957 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1958 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1961 l2cap_do_send(chan, skb);
1963 BT_DBG("Sent txseq %u", control->txseq);
1965 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1966 chan->frames_sent++;
1970 static int l2cap_ertm_send(struct l2cap_chan *chan)
1972 struct sk_buff *skb, *tx_skb;
1973 struct l2cap_ctrl *control;
1976 BT_DBG("chan %p", chan);
1978 if (chan->state != BT_CONNECTED)
1981 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1984 if (__chan_is_moving(chan))
1987 while (chan->tx_send_head &&
1988 chan->unacked_frames < chan->remote_tx_win &&
1989 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1991 skb = chan->tx_send_head;
1993 bt_cb(skb)->control.retries = 1;
1994 control = &bt_cb(skb)->control;
1996 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1999 control->reqseq = chan->buffer_seq;
2000 chan->last_acked_seq = chan->buffer_seq;
2001 control->txseq = chan->next_tx_seq;
2003 __pack_control(chan, control, skb);
2005 if (chan->fcs == L2CAP_FCS_CRC16) {
2006 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2007 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2010 /* Clone after data has been modified. Data is assumed to be
2011 read-only (for locking purposes) on cloned sk_buffs.
2013 tx_skb = skb_clone(skb, GFP_KERNEL);
2018 __set_retrans_timer(chan);
2020 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2021 chan->unacked_frames++;
2022 chan->frames_sent++;
2025 if (skb_queue_is_last(&chan->tx_q, skb))
2026 chan->tx_send_head = NULL;
2028 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2030 l2cap_do_send(chan, tx_skb);
2031 BT_DBG("Sent txseq %u", control->txseq);
2034 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2035 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2040 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2042 struct l2cap_ctrl control;
2043 struct sk_buff *skb;
2044 struct sk_buff *tx_skb;
2047 BT_DBG("chan %p", chan);
2049 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2052 if (__chan_is_moving(chan))
2055 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2056 seq = l2cap_seq_list_pop(&chan->retrans_list);
2058 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2060 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2065 bt_cb(skb)->control.retries++;
2066 control = bt_cb(skb)->control;
2068 if (chan->max_tx != 0 &&
2069 bt_cb(skb)->control.retries > chan->max_tx) {
2070 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2071 l2cap_send_disconn_req(chan, ECONNRESET);
2072 l2cap_seq_list_clear(&chan->retrans_list);
2076 control.reqseq = chan->buffer_seq;
2077 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2082 if (skb_cloned(skb)) {
2083 /* Cloned sk_buffs are read-only, so we need a
2086 tx_skb = skb_copy(skb, GFP_KERNEL);
2088 tx_skb = skb_clone(skb, GFP_KERNEL);
2092 l2cap_seq_list_clear(&chan->retrans_list);
2096 /* Update skb contents */
2097 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2098 put_unaligned_le32(__pack_extended_control(&control),
2099 tx_skb->data + L2CAP_HDR_SIZE);
2101 put_unaligned_le16(__pack_enhanced_control(&control),
2102 tx_skb->data + L2CAP_HDR_SIZE);
2105 if (chan->fcs == L2CAP_FCS_CRC16) {
2106 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2107 put_unaligned_le16(fcs, skb_put(tx_skb,
2111 l2cap_do_send(chan, tx_skb);
2113 BT_DBG("Resent txseq %d", control.txseq);
2115 chan->last_acked_seq = chan->buffer_seq;
2119 static void l2cap_retransmit(struct l2cap_chan *chan,
2120 struct l2cap_ctrl *control)
2122 BT_DBG("chan %p, control %p", chan, control);
2124 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2125 l2cap_ertm_resend(chan);
2128 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2129 struct l2cap_ctrl *control)
2131 struct sk_buff *skb;
2133 BT_DBG("chan %p, control %p", chan, control);
2136 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2138 l2cap_seq_list_clear(&chan->retrans_list);
2140 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2143 if (chan->unacked_frames) {
2144 skb_queue_walk(&chan->tx_q, skb) {
2145 if (bt_cb(skb)->control.txseq == control->reqseq ||
2146 skb == chan->tx_send_head)
2150 skb_queue_walk_from(&chan->tx_q, skb) {
2151 if (skb == chan->tx_send_head)
2154 l2cap_seq_list_append(&chan->retrans_list,
2155 bt_cb(skb)->control.txseq);
2158 l2cap_ertm_resend(chan);
2162 static void l2cap_send_ack(struct l2cap_chan *chan)
2164 struct l2cap_ctrl control;
2165 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2166 chan->last_acked_seq);
2169 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2170 chan, chan->last_acked_seq, chan->buffer_seq);
2172 memset(&control, 0, sizeof(control));
2175 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2176 chan->rx_state == L2CAP_RX_STATE_RECV) {
2177 __clear_ack_timer(chan);
2178 control.super = L2CAP_SUPER_RNR;
2179 control.reqseq = chan->buffer_seq;
2180 l2cap_send_sframe(chan, &control);
2182 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2183 l2cap_ertm_send(chan);
2184 /* If any i-frames were sent, they included an ack */
2185 if (chan->buffer_seq == chan->last_acked_seq)
2189 /* Ack now if the window is 3/4ths full.
2190 * Calculate without mul or div
2192 threshold = chan->ack_win;
2193 threshold += threshold << 1;
2196 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2199 if (frames_to_ack >= threshold) {
2200 __clear_ack_timer(chan);
2201 control.super = L2CAP_SUPER_RR;
2202 control.reqseq = chan->buffer_seq;
2203 l2cap_send_sframe(chan, &control);
2208 __set_ack_timer(chan);
2212 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2213 struct msghdr *msg, int len,
2214 int count, struct sk_buff *skb)
2216 struct l2cap_conn *conn = chan->conn;
2217 struct sk_buff **frag;
2220 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2226 /* Continuation fragments (no L2CAP header) */
2227 frag = &skb_shinfo(skb)->frag_list;
2229 struct sk_buff *tmp;
2231 count = min_t(unsigned int, conn->mtu, len);
2233 tmp = chan->ops->alloc_skb(chan, count,
2234 msg->msg_flags & MSG_DONTWAIT);
2236 return PTR_ERR(tmp);
2240 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2243 (*frag)->priority = skb->priority;
2248 skb->len += (*frag)->len;
2249 skb->data_len += (*frag)->len;
2251 frag = &(*frag)->next;
2257 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2258 struct msghdr *msg, size_t len,
2261 struct l2cap_conn *conn = chan->conn;
2262 struct sk_buff *skb;
2263 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2264 struct l2cap_hdr *lh;
2266 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2268 count = min_t(unsigned int, (conn->mtu - hlen), len);
2270 skb = chan->ops->alloc_skb(chan, count + hlen,
2271 msg->msg_flags & MSG_DONTWAIT);
2275 skb->priority = priority;
2277 /* Create L2CAP header */
2278 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2279 lh->cid = cpu_to_le16(chan->dcid);
2280 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2281 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2283 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2284 if (unlikely(err < 0)) {
2286 return ERR_PTR(err);
2291 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2292 struct msghdr *msg, size_t len,
2295 struct l2cap_conn *conn = chan->conn;
2296 struct sk_buff *skb;
2298 struct l2cap_hdr *lh;
2300 BT_DBG("chan %p len %zu", chan, len);
2302 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2304 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2305 msg->msg_flags & MSG_DONTWAIT);
2309 skb->priority = priority;
2311 /* Create L2CAP header */
2312 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2313 lh->cid = cpu_to_le16(chan->dcid);
2314 lh->len = cpu_to_le16(len);
2316 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2317 if (unlikely(err < 0)) {
2319 return ERR_PTR(err);
2324 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2325 struct msghdr *msg, size_t len,
2328 struct l2cap_conn *conn = chan->conn;
2329 struct sk_buff *skb;
2330 int err, count, hlen;
2331 struct l2cap_hdr *lh;
2333 BT_DBG("chan %p len %zu", chan, len);
2336 return ERR_PTR(-ENOTCONN);
2338 hlen = __ertm_hdr_size(chan);
2341 hlen += L2CAP_SDULEN_SIZE;
2343 if (chan->fcs == L2CAP_FCS_CRC16)
2344 hlen += L2CAP_FCS_SIZE;
2346 count = min_t(unsigned int, (conn->mtu - hlen), len);
2348 skb = chan->ops->alloc_skb(chan, count + hlen,
2349 msg->msg_flags & MSG_DONTWAIT);
2353 /* Create L2CAP header */
2354 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2355 lh->cid = cpu_to_le16(chan->dcid);
2356 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2358 /* Control header is populated later */
2359 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2360 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2362 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2365 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2367 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2368 if (unlikely(err < 0)) {
2370 return ERR_PTR(err);
2373 bt_cb(skb)->control.fcs = chan->fcs;
2374 bt_cb(skb)->control.retries = 0;
2378 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2379 struct sk_buff_head *seg_queue,
2380 struct msghdr *msg, size_t len)
2382 struct sk_buff *skb;
2387 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2389 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2390 * so fragmented skbs are not used. The HCI layer's handling
2391 * of fragmented skbs is not compatible with ERTM's queueing.
2394 /* PDU size is derived from the HCI MTU */
2395 pdu_len = chan->conn->mtu;
2397 /* Constrain PDU size for BR/EDR connections */
2399 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2401 /* Adjust for largest possible L2CAP overhead. */
2403 pdu_len -= L2CAP_FCS_SIZE;
2405 pdu_len -= __ertm_hdr_size(chan);
2407 /* Remote device may have requested smaller PDUs */
2408 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2410 if (len <= pdu_len) {
2411 sar = L2CAP_SAR_UNSEGMENTED;
2415 sar = L2CAP_SAR_START;
2417 pdu_len -= L2CAP_SDULEN_SIZE;
2421 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2424 __skb_queue_purge(seg_queue);
2425 return PTR_ERR(skb);
2428 bt_cb(skb)->control.sar = sar;
2429 __skb_queue_tail(seg_queue, skb);
2434 pdu_len += L2CAP_SDULEN_SIZE;
2437 if (len <= pdu_len) {
2438 sar = L2CAP_SAR_END;
2441 sar = L2CAP_SAR_CONTINUE;
2448 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2451 struct sk_buff *skb;
2453 struct sk_buff_head seg_queue;
2458 /* Connectionless channel */
2459 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2460 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2462 return PTR_ERR(skb);
2464 l2cap_do_send(chan, skb);
2468 switch (chan->mode) {
2469 case L2CAP_MODE_BASIC:
2470 /* Check outgoing MTU */
2471 if (len > chan->omtu)
2474 /* Create a basic PDU */
2475 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2477 return PTR_ERR(skb);
2479 l2cap_do_send(chan, skb);
2483 case L2CAP_MODE_ERTM:
2484 case L2CAP_MODE_STREAMING:
2485 /* Check outgoing MTU */
2486 if (len > chan->omtu) {
2491 __skb_queue_head_init(&seg_queue);
2493 /* Do segmentation before calling in to the state machine,
2494 * since it's possible to block while waiting for memory
2497 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2499 /* The channel could have been closed while segmenting,
2500 * check that it is still connected.
2502 if (chan->state != BT_CONNECTED) {
2503 __skb_queue_purge(&seg_queue);
2510 if (chan->mode == L2CAP_MODE_ERTM)
2511 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2513 l2cap_streaming_send(chan, &seg_queue);
2517 /* If the skbs were not queued for sending, they'll still be in
2518 * seg_queue and need to be purged.
2520 __skb_queue_purge(&seg_queue);
2524 BT_DBG("bad state %1.1x", chan->mode);
2531 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2533 struct l2cap_ctrl control;
2536 BT_DBG("chan %p, txseq %u", chan, txseq);
2538 memset(&control, 0, sizeof(control));
2540 control.super = L2CAP_SUPER_SREJ;
2542 for (seq = chan->expected_tx_seq; seq != txseq;
2543 seq = __next_seq(chan, seq)) {
2544 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2545 control.reqseq = seq;
2546 l2cap_send_sframe(chan, &control);
2547 l2cap_seq_list_append(&chan->srej_list, seq);
2551 chan->expected_tx_seq = __next_seq(chan, txseq);
2554 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2556 struct l2cap_ctrl control;
2558 BT_DBG("chan %p", chan);
2560 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2563 memset(&control, 0, sizeof(control));
2565 control.super = L2CAP_SUPER_SREJ;
2566 control.reqseq = chan->srej_list.tail;
2567 l2cap_send_sframe(chan, &control);
2570 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2572 struct l2cap_ctrl control;
2576 BT_DBG("chan %p, txseq %u", chan, txseq);
2578 memset(&control, 0, sizeof(control));
2580 control.super = L2CAP_SUPER_SREJ;
2582 /* Capture initial list head to allow only one pass through the list. */
2583 initial_head = chan->srej_list.head;
2586 seq = l2cap_seq_list_pop(&chan->srej_list);
2587 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2590 control.reqseq = seq;
2591 l2cap_send_sframe(chan, &control);
2592 l2cap_seq_list_append(&chan->srej_list, seq);
2593 } while (chan->srej_list.head != initial_head);
2596 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2598 struct sk_buff *acked_skb;
2601 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2603 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2606 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2607 chan->expected_ack_seq, chan->unacked_frames);
2609 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2610 ackseq = __next_seq(chan, ackseq)) {
2612 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2614 skb_unlink(acked_skb, &chan->tx_q);
2615 kfree_skb(acked_skb);
2616 chan->unacked_frames--;
2620 chan->expected_ack_seq = reqseq;
2622 if (chan->unacked_frames == 0)
2623 __clear_retrans_timer(chan);
2625 BT_DBG("unacked_frames %u", chan->unacked_frames);
2628 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2630 BT_DBG("chan %p", chan);
2632 chan->expected_tx_seq = chan->buffer_seq;
2633 l2cap_seq_list_clear(&chan->srej_list);
2634 skb_queue_purge(&chan->srej_q);
2635 chan->rx_state = L2CAP_RX_STATE_RECV;
2638 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2639 struct l2cap_ctrl *control,
2640 struct sk_buff_head *skbs, u8 event)
2642 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2646 case L2CAP_EV_DATA_REQUEST:
2647 if (chan->tx_send_head == NULL)
2648 chan->tx_send_head = skb_peek(skbs);
2650 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2651 l2cap_ertm_send(chan);
2653 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2654 BT_DBG("Enter LOCAL_BUSY");
2655 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2657 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2658 /* The SREJ_SENT state must be aborted if we are to
2659 * enter the LOCAL_BUSY state.
2661 l2cap_abort_rx_srej_sent(chan);
2664 l2cap_send_ack(chan);
2667 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2668 BT_DBG("Exit LOCAL_BUSY");
2669 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2671 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2672 struct l2cap_ctrl local_control;
2674 memset(&local_control, 0, sizeof(local_control));
2675 local_control.sframe = 1;
2676 local_control.super = L2CAP_SUPER_RR;
2677 local_control.poll = 1;
2678 local_control.reqseq = chan->buffer_seq;
2679 l2cap_send_sframe(chan, &local_control);
2681 chan->retry_count = 1;
2682 __set_monitor_timer(chan);
2683 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2686 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2687 l2cap_process_reqseq(chan, control->reqseq);
2689 case L2CAP_EV_EXPLICIT_POLL:
2690 l2cap_send_rr_or_rnr(chan, 1);
2691 chan->retry_count = 1;
2692 __set_monitor_timer(chan);
2693 __clear_ack_timer(chan);
2694 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2696 case L2CAP_EV_RETRANS_TO:
2697 l2cap_send_rr_or_rnr(chan, 1);
2698 chan->retry_count = 1;
2699 __set_monitor_timer(chan);
2700 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2702 case L2CAP_EV_RECV_FBIT:
2703 /* Nothing to process */
2710 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2711 struct l2cap_ctrl *control,
2712 struct sk_buff_head *skbs, u8 event)
2714 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2718 case L2CAP_EV_DATA_REQUEST:
2719 if (chan->tx_send_head == NULL)
2720 chan->tx_send_head = skb_peek(skbs);
2721 /* Queue data, but don't send. */
2722 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2724 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2725 BT_DBG("Enter LOCAL_BUSY");
2726 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2728 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2729 /* The SREJ_SENT state must be aborted if we are to
2730 * enter the LOCAL_BUSY state.
2732 l2cap_abort_rx_srej_sent(chan);
2735 l2cap_send_ack(chan);
2738 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2739 BT_DBG("Exit LOCAL_BUSY");
2740 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2742 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2743 struct l2cap_ctrl local_control;
2744 memset(&local_control, 0, sizeof(local_control));
2745 local_control.sframe = 1;
2746 local_control.super = L2CAP_SUPER_RR;
2747 local_control.poll = 1;
2748 local_control.reqseq = chan->buffer_seq;
2749 l2cap_send_sframe(chan, &local_control);
2751 chan->retry_count = 1;
2752 __set_monitor_timer(chan);
2753 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2756 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2757 l2cap_process_reqseq(chan, control->reqseq);
2761 case L2CAP_EV_RECV_FBIT:
2762 if (control && control->final) {
2763 __clear_monitor_timer(chan);
2764 if (chan->unacked_frames > 0)
2765 __set_retrans_timer(chan);
2766 chan->retry_count = 0;
2767 chan->tx_state = L2CAP_TX_STATE_XMIT;
2768 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2771 case L2CAP_EV_EXPLICIT_POLL:
2774 case L2CAP_EV_MONITOR_TO:
2775 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2776 l2cap_send_rr_or_rnr(chan, 1);
2777 __set_monitor_timer(chan);
2778 chan->retry_count++;
2780 l2cap_send_disconn_req(chan, ECONNABORTED);
2788 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2789 struct sk_buff_head *skbs, u8 event)
2791 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2792 chan, control, skbs, event, chan->tx_state);
2794 switch (chan->tx_state) {
2795 case L2CAP_TX_STATE_XMIT:
2796 l2cap_tx_state_xmit(chan, control, skbs, event);
2798 case L2CAP_TX_STATE_WAIT_F:
2799 l2cap_tx_state_wait_f(chan, control, skbs, event);
2807 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2808 struct l2cap_ctrl *control)
2810 BT_DBG("chan %p, control %p", chan, control);
2811 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2814 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2815 struct l2cap_ctrl *control)
2817 BT_DBG("chan %p, control %p", chan, control);
2818 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2821 /* Copy frame to all raw sockets on that connection */
2822 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2824 struct sk_buff *nskb;
2825 struct l2cap_chan *chan;
2827 BT_DBG("conn %p", conn);
2829 mutex_lock(&conn->chan_lock);
2831 list_for_each_entry(chan, &conn->chan_l, list) {
2832 struct sock *sk = chan->sk;
2833 if (chan->chan_type != L2CAP_CHAN_RAW)
2836 /* Don't send frame to the socket it came from */
2839 nskb = skb_clone(skb, GFP_KERNEL);
2843 if (chan->ops->recv(chan, nskb))
2847 mutex_unlock(&conn->chan_lock);
2850 /* ---- L2CAP signalling commands ---- */
2851 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2852 u8 ident, u16 dlen, void *data)
2854 struct sk_buff *skb, **frag;
2855 struct l2cap_cmd_hdr *cmd;
2856 struct l2cap_hdr *lh;
2859 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2860 conn, code, ident, dlen);
2862 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2865 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2866 count = min_t(unsigned int, conn->mtu, len);
2868 skb = bt_skb_alloc(count, GFP_KERNEL);
2872 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2873 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2875 if (conn->hcon->type == LE_LINK)
2876 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2878 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2880 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2883 cmd->len = cpu_to_le16(dlen);
2886 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2887 memcpy(skb_put(skb, count), data, count);
2893 /* Continuation fragments (no L2CAP header) */
2894 frag = &skb_shinfo(skb)->frag_list;
2896 count = min_t(unsigned int, conn->mtu, len);
2898 *frag = bt_skb_alloc(count, GFP_KERNEL);
2902 memcpy(skb_put(*frag, count), data, count);
2907 frag = &(*frag)->next;
2917 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2920 struct l2cap_conf_opt *opt = *ptr;
2923 len = L2CAP_CONF_OPT_SIZE + opt->len;
2931 *val = *((u8 *) opt->val);
2935 *val = get_unaligned_le16(opt->val);
2939 *val = get_unaligned_le32(opt->val);
2943 *val = (unsigned long) opt->val;
2947 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2951 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2953 struct l2cap_conf_opt *opt = *ptr;
2955 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2962 *((u8 *) opt->val) = val;
2966 put_unaligned_le16(val, opt->val);
2970 put_unaligned_le32(val, opt->val);
2974 memcpy(opt->val, (void *) val, len);
2978 *ptr += L2CAP_CONF_OPT_SIZE + len;
2981 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2983 struct l2cap_conf_efs efs;
2985 switch (chan->mode) {
2986 case L2CAP_MODE_ERTM:
2987 efs.id = chan->local_id;
2988 efs.stype = chan->local_stype;
2989 efs.msdu = cpu_to_le16(chan->local_msdu);
2990 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2991 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2992 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2995 case L2CAP_MODE_STREAMING:
2997 efs.stype = L2CAP_SERV_BESTEFFORT;
2998 efs.msdu = cpu_to_le16(chan->local_msdu);
2999 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3008 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3009 (unsigned long) &efs);
3012 static void l2cap_ack_timeout(struct work_struct *work)
3014 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3018 BT_DBG("chan %p", chan);
3020 l2cap_chan_lock(chan);
3022 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3023 chan->last_acked_seq);
3026 l2cap_send_rr_or_rnr(chan, 0);
3028 l2cap_chan_unlock(chan);
3029 l2cap_chan_put(chan);
3032 int l2cap_ertm_init(struct l2cap_chan *chan)
3036 chan->next_tx_seq = 0;
3037 chan->expected_tx_seq = 0;
3038 chan->expected_ack_seq = 0;
3039 chan->unacked_frames = 0;
3040 chan->buffer_seq = 0;
3041 chan->frames_sent = 0;
3042 chan->last_acked_seq = 0;
3044 chan->sdu_last_frag = NULL;
3047 skb_queue_head_init(&chan->tx_q);
3049 chan->local_amp_id = 0;
3051 chan->move_state = L2CAP_MOVE_STABLE;
3052 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3054 if (chan->mode != L2CAP_MODE_ERTM)
3057 chan->rx_state = L2CAP_RX_STATE_RECV;
3058 chan->tx_state = L2CAP_TX_STATE_XMIT;
3060 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3061 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3062 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3064 skb_queue_head_init(&chan->srej_q);
3066 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3070 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3072 l2cap_seq_list_free(&chan->srej_list);
3077 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3080 case L2CAP_MODE_STREAMING:
3081 case L2CAP_MODE_ERTM:
3082 if (l2cap_mode_supported(mode, remote_feat_mask))
3086 return L2CAP_MODE_BASIC;
3090 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
3092 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3095 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
3097 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3100 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3101 struct l2cap_conf_rfc *rfc)
3103 if (chan->local_amp_id && chan->hs_hcon) {
3104 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3106 /* Class 1 devices have must have ERTM timeouts
3107 * exceeding the Link Supervision Timeout. The
3108 * default Link Supervision Timeout for AMP
3109 * controllers is 10 seconds.
3111 * Class 1 devices use 0xffffffff for their
3112 * best-effort flush timeout, so the clamping logic
3113 * will result in a timeout that meets the above
3114 * requirement. ERTM timeouts are 16-bit values, so
3115 * the maximum timeout is 65.535 seconds.
3118 /* Convert timeout to milliseconds and round */
3119 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3121 /* This is the recommended formula for class 2 devices
3122 * that start ERTM timers when packets are sent to the
3125 ertm_to = 3 * ertm_to + 500;
3127 if (ertm_to > 0xffff)
3130 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3131 rfc->monitor_timeout = rfc->retrans_timeout;
3133 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3134 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3138 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3140 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3141 __l2cap_ews_supported(chan)) {
3142 /* use extended control field */
3143 set_bit(FLAG_EXT_CTRL, &chan->flags);
3144 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3146 chan->tx_win = min_t(u16, chan->tx_win,
3147 L2CAP_DEFAULT_TX_WINDOW);
3148 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3150 chan->ack_win = chan->tx_win;
3153 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3155 struct l2cap_conf_req *req = data;
3156 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3157 void *ptr = req->data;
3160 BT_DBG("chan %p", chan);
3162 if (chan->num_conf_req || chan->num_conf_rsp)
3165 switch (chan->mode) {
3166 case L2CAP_MODE_STREAMING:
3167 case L2CAP_MODE_ERTM:
3168 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3171 if (__l2cap_efs_supported(chan))
3172 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3176 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3181 if (chan->imtu != L2CAP_DEFAULT_MTU)
3182 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3184 switch (chan->mode) {
3185 case L2CAP_MODE_BASIC:
3186 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3187 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3190 rfc.mode = L2CAP_MODE_BASIC;
3192 rfc.max_transmit = 0;
3193 rfc.retrans_timeout = 0;
3194 rfc.monitor_timeout = 0;
3195 rfc.max_pdu_size = 0;
3197 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3198 (unsigned long) &rfc);
3201 case L2CAP_MODE_ERTM:
3202 rfc.mode = L2CAP_MODE_ERTM;
3203 rfc.max_transmit = chan->max_tx;
3205 __l2cap_set_ertm_timeouts(chan, &rfc);
3207 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3208 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3210 rfc.max_pdu_size = cpu_to_le16(size);
3212 l2cap_txwin_setup(chan);
3214 rfc.txwin_size = min_t(u16, chan->tx_win,
3215 L2CAP_DEFAULT_TX_WINDOW);
3217 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3218 (unsigned long) &rfc);
3220 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3221 l2cap_add_opt_efs(&ptr, chan);
3223 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3224 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3227 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3228 if (chan->fcs == L2CAP_FCS_NONE ||
3229 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3230 chan->fcs = L2CAP_FCS_NONE;
3231 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3236 case L2CAP_MODE_STREAMING:
3237 l2cap_txwin_setup(chan);
3238 rfc.mode = L2CAP_MODE_STREAMING;
3240 rfc.max_transmit = 0;
3241 rfc.retrans_timeout = 0;
3242 rfc.monitor_timeout = 0;
3244 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3245 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3247 rfc.max_pdu_size = cpu_to_le16(size);
3249 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3250 (unsigned long) &rfc);
3252 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3253 l2cap_add_opt_efs(&ptr, chan);
3255 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3256 if (chan->fcs == L2CAP_FCS_NONE ||
3257 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3258 chan->fcs = L2CAP_FCS_NONE;
3259 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3265 req->dcid = cpu_to_le16(chan->dcid);
3266 req->flags = __constant_cpu_to_le16(0);
3271 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3273 struct l2cap_conf_rsp *rsp = data;
3274 void *ptr = rsp->data;
3275 void *req = chan->conf_req;
3276 int len = chan->conf_len;
3277 int type, hint, olen;
3279 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3280 struct l2cap_conf_efs efs;
3282 u16 mtu = L2CAP_DEFAULT_MTU;
3283 u16 result = L2CAP_CONF_SUCCESS;
3286 BT_DBG("chan %p", chan);
3288 while (len >= L2CAP_CONF_OPT_SIZE) {
3289 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3291 hint = type & L2CAP_CONF_HINT;
3292 type &= L2CAP_CONF_MASK;
3295 case L2CAP_CONF_MTU:
3299 case L2CAP_CONF_FLUSH_TO:
3300 chan->flush_to = val;
3303 case L2CAP_CONF_QOS:
3306 case L2CAP_CONF_RFC:
3307 if (olen == sizeof(rfc))
3308 memcpy(&rfc, (void *) val, olen);
3311 case L2CAP_CONF_FCS:
3312 if (val == L2CAP_FCS_NONE)
3313 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3316 case L2CAP_CONF_EFS:
3318 if (olen == sizeof(efs))
3319 memcpy(&efs, (void *) val, olen);
3322 case L2CAP_CONF_EWS:
3324 return -ECONNREFUSED;
3326 set_bit(FLAG_EXT_CTRL, &chan->flags);
3327 set_bit(CONF_EWS_RECV, &chan->conf_state);
3328 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3329 chan->remote_tx_win = val;
3336 result = L2CAP_CONF_UNKNOWN;
3337 *((u8 *) ptr++) = type;
3342 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3345 switch (chan->mode) {
3346 case L2CAP_MODE_STREAMING:
3347 case L2CAP_MODE_ERTM:
3348 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3349 chan->mode = l2cap_select_mode(rfc.mode,
3350 chan->conn->feat_mask);
3355 if (__l2cap_efs_supported(chan))
3356 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3358 return -ECONNREFUSED;
3361 if (chan->mode != rfc.mode)
3362 return -ECONNREFUSED;
3368 if (chan->mode != rfc.mode) {
3369 result = L2CAP_CONF_UNACCEPT;
3370 rfc.mode = chan->mode;
3372 if (chan->num_conf_rsp == 1)
3373 return -ECONNREFUSED;
3375 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3376 (unsigned long) &rfc);
3379 if (result == L2CAP_CONF_SUCCESS) {
3380 /* Configure output options and let the other side know
3381 * which ones we don't like. */
3383 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3384 result = L2CAP_CONF_UNACCEPT;
3387 set_bit(CONF_MTU_DONE, &chan->conf_state);
3389 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3392 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3393 efs.stype != L2CAP_SERV_NOTRAFIC &&
3394 efs.stype != chan->local_stype) {
3396 result = L2CAP_CONF_UNACCEPT;
3398 if (chan->num_conf_req >= 1)
3399 return -ECONNREFUSED;
3401 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3403 (unsigned long) &efs);
3405 /* Send PENDING Conf Rsp */
3406 result = L2CAP_CONF_PENDING;
3407 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3412 case L2CAP_MODE_BASIC:
3413 chan->fcs = L2CAP_FCS_NONE;
3414 set_bit(CONF_MODE_DONE, &chan->conf_state);
3417 case L2CAP_MODE_ERTM:
3418 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3419 chan->remote_tx_win = rfc.txwin_size;
3421 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3423 chan->remote_max_tx = rfc.max_transmit;
3425 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3426 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3427 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3428 rfc.max_pdu_size = cpu_to_le16(size);
3429 chan->remote_mps = size;
3431 __l2cap_set_ertm_timeouts(chan, &rfc);
3433 set_bit(CONF_MODE_DONE, &chan->conf_state);
3435 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3436 sizeof(rfc), (unsigned long) &rfc);
3438 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3439 chan->remote_id = efs.id;
3440 chan->remote_stype = efs.stype;
3441 chan->remote_msdu = le16_to_cpu(efs.msdu);
3442 chan->remote_flush_to =
3443 le32_to_cpu(efs.flush_to);
3444 chan->remote_acc_lat =
3445 le32_to_cpu(efs.acc_lat);
3446 chan->remote_sdu_itime =
3447 le32_to_cpu(efs.sdu_itime);
3448 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3450 (unsigned long) &efs);
3454 case L2CAP_MODE_STREAMING:
3455 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3456 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3457 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3458 rfc.max_pdu_size = cpu_to_le16(size);
3459 chan->remote_mps = size;
3461 set_bit(CONF_MODE_DONE, &chan->conf_state);
3463 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3464 (unsigned long) &rfc);
3469 result = L2CAP_CONF_UNACCEPT;
3471 memset(&rfc, 0, sizeof(rfc));
3472 rfc.mode = chan->mode;
3475 if (result == L2CAP_CONF_SUCCESS)
3476 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3478 rsp->scid = cpu_to_le16(chan->dcid);
3479 rsp->result = cpu_to_le16(result);
3480 rsp->flags = __constant_cpu_to_le16(0);
3485 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3486 void *data, u16 *result)
3488 struct l2cap_conf_req *req = data;
3489 void *ptr = req->data;
3492 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3493 struct l2cap_conf_efs efs;
3495 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3497 while (len >= L2CAP_CONF_OPT_SIZE) {
3498 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3501 case L2CAP_CONF_MTU:
3502 if (val < L2CAP_DEFAULT_MIN_MTU) {
3503 *result = L2CAP_CONF_UNACCEPT;
3504 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3510 case L2CAP_CONF_FLUSH_TO:
3511 chan->flush_to = val;
3512 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3516 case L2CAP_CONF_RFC:
3517 if (olen == sizeof(rfc))
3518 memcpy(&rfc, (void *)val, olen);
3520 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3521 rfc.mode != chan->mode)
3522 return -ECONNREFUSED;
3526 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3527 sizeof(rfc), (unsigned long) &rfc);
3530 case L2CAP_CONF_EWS:
3531 chan->ack_win = min_t(u16, val, chan->ack_win);
3532 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3536 case L2CAP_CONF_EFS:
3537 if (olen == sizeof(efs))
3538 memcpy(&efs, (void *)val, olen);
3540 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3541 efs.stype != L2CAP_SERV_NOTRAFIC &&
3542 efs.stype != chan->local_stype)
3543 return -ECONNREFUSED;
3545 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3546 (unsigned long) &efs);
3549 case L2CAP_CONF_FCS:
3550 if (*result == L2CAP_CONF_PENDING)
3551 if (val == L2CAP_FCS_NONE)
3552 set_bit(CONF_RECV_NO_FCS,
3558 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3559 return -ECONNREFUSED;
3561 chan->mode = rfc.mode;
3563 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3565 case L2CAP_MODE_ERTM:
3566 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3567 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3568 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3569 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3570 chan->ack_win = min_t(u16, chan->ack_win,
3573 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3574 chan->local_msdu = le16_to_cpu(efs.msdu);
3575 chan->local_sdu_itime =
3576 le32_to_cpu(efs.sdu_itime);
3577 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3578 chan->local_flush_to =
3579 le32_to_cpu(efs.flush_to);
3583 case L2CAP_MODE_STREAMING:
3584 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3588 req->dcid = cpu_to_le16(chan->dcid);
3589 req->flags = __constant_cpu_to_le16(0);
3594 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3595 u16 result, u16 flags)
3597 struct l2cap_conf_rsp *rsp = data;
3598 void *ptr = rsp->data;
3600 BT_DBG("chan %p", chan);
3602 rsp->scid = cpu_to_le16(chan->dcid);
3603 rsp->result = cpu_to_le16(result);
3604 rsp->flags = cpu_to_le16(flags);
3609 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3611 struct l2cap_conn_rsp rsp;
3612 struct l2cap_conn *conn = chan->conn;
3616 rsp.scid = cpu_to_le16(chan->dcid);
3617 rsp.dcid = cpu_to_le16(chan->scid);
3618 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3619 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3622 rsp_code = L2CAP_CREATE_CHAN_RSP;
3624 rsp_code = L2CAP_CONN_RSP;
3626 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3628 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3630 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3633 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3634 l2cap_build_conf_req(chan, buf), buf);
3635 chan->num_conf_req++;
3638 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3642 /* Use sane default values in case a misbehaving remote device
3643 * did not send an RFC or extended window size option.
3645 u16 txwin_ext = chan->ack_win;
3646 struct l2cap_conf_rfc rfc = {
3648 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3649 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3650 .max_pdu_size = cpu_to_le16(chan->imtu),
3651 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3654 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3656 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3659 while (len >= L2CAP_CONF_OPT_SIZE) {
3660 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3663 case L2CAP_CONF_RFC:
3664 if (olen == sizeof(rfc))
3665 memcpy(&rfc, (void *)val, olen);
3667 case L2CAP_CONF_EWS:
3674 case L2CAP_MODE_ERTM:
3675 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3676 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3677 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3678 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3679 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3681 chan->ack_win = min_t(u16, chan->ack_win,
3684 case L2CAP_MODE_STREAMING:
3685 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3689 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3690 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3693 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3695 if (cmd_len < sizeof(*rej))
3698 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3701 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3702 cmd->ident == conn->info_ident) {
3703 cancel_delayed_work(&conn->info_timer);
3705 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3706 conn->info_ident = 0;
3708 l2cap_conn_start(conn);
3714 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3715 struct l2cap_cmd_hdr *cmd,
3716 u8 *data, u8 rsp_code, u8 amp_id)
3718 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3719 struct l2cap_conn_rsp rsp;
3720 struct l2cap_chan *chan = NULL, *pchan;
3721 struct sock *parent, *sk = NULL;
3722 int result, status = L2CAP_CS_NO_INFO;
3724 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3725 __le16 psm = req->psm;
3727 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3729 /* Check if we have socket listening on psm */
3730 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3732 result = L2CAP_CR_BAD_PSM;
3738 mutex_lock(&conn->chan_lock);
3741 /* Check if the ACL is secure enough (if not SDP) */
3742 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3743 !hci_conn_check_link_mode(conn->hcon)) {
3744 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3745 result = L2CAP_CR_SEC_BLOCK;
3749 result = L2CAP_CR_NO_MEM;
3751 /* Check if we already have channel with that dcid */
3752 if (__l2cap_get_chan_by_dcid(conn, scid))
3755 chan = pchan->ops->new_connection(pchan);
3761 /* For certain devices (ex: HID mouse), support for authentication,
3762 * pairing and bonding is optional. For such devices, inorder to avoid
3763 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3764 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3766 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3768 bacpy(&bt_sk(sk)->src, conn->src);
3769 bacpy(&bt_sk(sk)->dst, conn->dst);
3772 chan->local_amp_id = amp_id;
3774 __l2cap_chan_add(conn, chan);
3778 __set_chan_timer(chan, sk->sk_sndtimeo);
3780 chan->ident = cmd->ident;
3782 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3783 if (l2cap_chan_check_security(chan)) {
3784 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3785 __l2cap_state_change(chan, BT_CONNECT2);
3786 result = L2CAP_CR_PEND;
3787 status = L2CAP_CS_AUTHOR_PEND;
3788 chan->ops->defer(chan);
3790 /* Force pending result for AMP controllers.
3791 * The connection will succeed after the
3792 * physical link is up.
3795 __l2cap_state_change(chan, BT_CONNECT2);
3796 result = L2CAP_CR_PEND;
3798 __l2cap_state_change(chan, BT_CONFIG);
3799 result = L2CAP_CR_SUCCESS;
3801 status = L2CAP_CS_NO_INFO;
3804 __l2cap_state_change(chan, BT_CONNECT2);
3805 result = L2CAP_CR_PEND;
3806 status = L2CAP_CS_AUTHEN_PEND;
3809 __l2cap_state_change(chan, BT_CONNECT2);
3810 result = L2CAP_CR_PEND;
3811 status = L2CAP_CS_NO_INFO;
3815 release_sock(parent);
3816 mutex_unlock(&conn->chan_lock);
3819 rsp.scid = cpu_to_le16(scid);
3820 rsp.dcid = cpu_to_le16(dcid);
3821 rsp.result = cpu_to_le16(result);
3822 rsp.status = cpu_to_le16(status);
3823 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3825 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3826 struct l2cap_info_req info;
3827 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3829 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3830 conn->info_ident = l2cap_get_ident(conn);
3832 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3834 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3835 sizeof(info), &info);
3838 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3839 result == L2CAP_CR_SUCCESS) {
3841 set_bit(CONF_REQ_SENT, &chan->conf_state);
3842 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3843 l2cap_build_conf_req(chan, buf), buf);
3844 chan->num_conf_req++;
3850 static int l2cap_connect_req(struct l2cap_conn *conn,
3851 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3853 struct hci_dev *hdev = conn->hcon->hdev;
3854 struct hci_conn *hcon = conn->hcon;
3856 if (cmd_len < sizeof(struct l2cap_conn_req))
3860 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3861 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3862 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3863 hcon->dst_type, 0, NULL, 0,
3865 hci_dev_unlock(hdev);
3867 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3871 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3872 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3875 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3876 u16 scid, dcid, result, status;
3877 struct l2cap_chan *chan;
3881 if (cmd_len < sizeof(*rsp))
3884 scid = __le16_to_cpu(rsp->scid);
3885 dcid = __le16_to_cpu(rsp->dcid);
3886 result = __le16_to_cpu(rsp->result);
3887 status = __le16_to_cpu(rsp->status);
3889 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3890 dcid, scid, result, status);
3892 mutex_lock(&conn->chan_lock);
3895 chan = __l2cap_get_chan_by_scid(conn, scid);
3901 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3910 l2cap_chan_lock(chan);
3913 case L2CAP_CR_SUCCESS:
3914 l2cap_state_change(chan, BT_CONFIG);
3917 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3919 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3922 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3923 l2cap_build_conf_req(chan, req), req);
3924 chan->num_conf_req++;
3928 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3932 l2cap_chan_del(chan, ECONNREFUSED);
3936 l2cap_chan_unlock(chan);
3939 mutex_unlock(&conn->chan_lock);
3944 static inline void set_default_fcs(struct l2cap_chan *chan)
3946 /* FCS is enabled only in ERTM or streaming mode, if one or both
3949 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3950 chan->fcs = L2CAP_FCS_NONE;
3951 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3952 chan->fcs = L2CAP_FCS_CRC16;
3955 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3956 u8 ident, u16 flags)
3958 struct l2cap_conn *conn = chan->conn;
3960 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3963 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3964 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3966 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3967 l2cap_build_conf_rsp(chan, data,
3968 L2CAP_CONF_SUCCESS, flags), data);
3971 static inline int l2cap_config_req(struct l2cap_conn *conn,
3972 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3975 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3978 struct l2cap_chan *chan;
3981 if (cmd_len < sizeof(*req))
3984 dcid = __le16_to_cpu(req->dcid);
3985 flags = __le16_to_cpu(req->flags);
3987 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3989 chan = l2cap_get_chan_by_scid(conn, dcid);
3993 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3994 struct l2cap_cmd_rej_cid rej;
3996 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3997 rej.scid = cpu_to_le16(chan->scid);
3998 rej.dcid = cpu_to_le16(chan->dcid);
4000 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4005 /* Reject if config buffer is too small. */
4006 len = cmd_len - sizeof(*req);
4007 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4008 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4009 l2cap_build_conf_rsp(chan, rsp,
4010 L2CAP_CONF_REJECT, flags), rsp);
4015 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4016 chan->conf_len += len;
4018 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4019 /* Incomplete config. Send empty response. */
4020 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4021 l2cap_build_conf_rsp(chan, rsp,
4022 L2CAP_CONF_SUCCESS, flags), rsp);
4026 /* Complete config. */
4027 len = l2cap_parse_conf_req(chan, rsp);
4029 l2cap_send_disconn_req(chan, ECONNRESET);
4033 chan->ident = cmd->ident;
4034 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4035 chan->num_conf_rsp++;
4037 /* Reset config buffer. */
4040 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4043 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4044 set_default_fcs(chan);
4046 if (chan->mode == L2CAP_MODE_ERTM ||
4047 chan->mode == L2CAP_MODE_STREAMING)
4048 err = l2cap_ertm_init(chan);
4051 l2cap_send_disconn_req(chan, -err);
4053 l2cap_chan_ready(chan);
4058 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4060 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4061 l2cap_build_conf_req(chan, buf), buf);
4062 chan->num_conf_req++;
4065 /* Got Conf Rsp PENDING from remote side and asume we sent
4066 Conf Rsp PENDING in the code above */
4067 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4068 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4070 /* check compatibility */
4072 /* Send rsp for BR/EDR channel */
4074 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4076 chan->ident = cmd->ident;
4080 l2cap_chan_unlock(chan);
4084 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4085 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4088 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4089 u16 scid, flags, result;
4090 struct l2cap_chan *chan;
4091 int len = cmd_len - sizeof(*rsp);
4094 if (cmd_len < sizeof(*rsp))
4097 scid = __le16_to_cpu(rsp->scid);
4098 flags = __le16_to_cpu(rsp->flags);
4099 result = __le16_to_cpu(rsp->result);
4101 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4104 chan = l2cap_get_chan_by_scid(conn, scid);
4109 case L2CAP_CONF_SUCCESS:
4110 l2cap_conf_rfc_get(chan, rsp->data, len);
4111 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4114 case L2CAP_CONF_PENDING:
4115 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4117 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4120 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4123 l2cap_send_disconn_req(chan, ECONNRESET);
4127 if (!chan->hs_hcon) {
4128 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4131 if (l2cap_check_efs(chan)) {
4132 amp_create_logical_link(chan);
4133 chan->ident = cmd->ident;
4139 case L2CAP_CONF_UNACCEPT:
4140 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4143 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4144 l2cap_send_disconn_req(chan, ECONNRESET);
4148 /* throw out any old stored conf requests */
4149 result = L2CAP_CONF_SUCCESS;
4150 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4153 l2cap_send_disconn_req(chan, ECONNRESET);
4157 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4158 L2CAP_CONF_REQ, len, req);
4159 chan->num_conf_req++;
4160 if (result != L2CAP_CONF_SUCCESS)
4166 l2cap_chan_set_err(chan, ECONNRESET);
4168 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4169 l2cap_send_disconn_req(chan, ECONNRESET);
4173 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4176 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4178 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4179 set_default_fcs(chan);
4181 if (chan->mode == L2CAP_MODE_ERTM ||
4182 chan->mode == L2CAP_MODE_STREAMING)
4183 err = l2cap_ertm_init(chan);
4186 l2cap_send_disconn_req(chan, -err);
4188 l2cap_chan_ready(chan);
4192 l2cap_chan_unlock(chan);
4196 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4197 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4200 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4201 struct l2cap_disconn_rsp rsp;
4203 struct l2cap_chan *chan;
4206 if (cmd_len != sizeof(*req))
4209 scid = __le16_to_cpu(req->scid);
4210 dcid = __le16_to_cpu(req->dcid);
4212 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4214 mutex_lock(&conn->chan_lock);
4216 chan = __l2cap_get_chan_by_scid(conn, dcid);
4218 mutex_unlock(&conn->chan_lock);
4222 l2cap_chan_lock(chan);
4226 rsp.dcid = cpu_to_le16(chan->scid);
4227 rsp.scid = cpu_to_le16(chan->dcid);
4228 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4231 sk->sk_shutdown = SHUTDOWN_MASK;
4234 l2cap_chan_hold(chan);
4235 l2cap_chan_del(chan, ECONNRESET);
4237 l2cap_chan_unlock(chan);
4239 chan->ops->close(chan);
4240 l2cap_chan_put(chan);
4242 mutex_unlock(&conn->chan_lock);
4247 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4248 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4251 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4253 struct l2cap_chan *chan;
4255 if (cmd_len != sizeof(*rsp))
4258 scid = __le16_to_cpu(rsp->scid);
4259 dcid = __le16_to_cpu(rsp->dcid);
4261 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4263 mutex_lock(&conn->chan_lock);
4265 chan = __l2cap_get_chan_by_scid(conn, scid);
4267 mutex_unlock(&conn->chan_lock);
4271 l2cap_chan_lock(chan);
4273 l2cap_chan_hold(chan);
4274 l2cap_chan_del(chan, 0);
4276 l2cap_chan_unlock(chan);
4278 chan->ops->close(chan);
4279 l2cap_chan_put(chan);
4281 mutex_unlock(&conn->chan_lock);
4286 static inline int l2cap_information_req(struct l2cap_conn *conn,
4287 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4290 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4293 if (cmd_len != sizeof(*req))
4296 type = __le16_to_cpu(req->type);
4298 BT_DBG("type 0x%4.4x", type);
4300 if (type == L2CAP_IT_FEAT_MASK) {
4302 u32 feat_mask = l2cap_feat_mask;
4303 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4304 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4305 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4307 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4310 feat_mask |= L2CAP_FEAT_EXT_FLOW
4311 | L2CAP_FEAT_EXT_WINDOW;
4313 put_unaligned_le32(feat_mask, rsp->data);
4314 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4316 } else if (type == L2CAP_IT_FIXED_CHAN) {
4318 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4321 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4323 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4325 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4326 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4327 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4328 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4331 struct l2cap_info_rsp rsp;
4332 rsp.type = cpu_to_le16(type);
4333 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4334 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4341 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4342 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4345 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4348 if (cmd_len < sizeof(*rsp))
4351 type = __le16_to_cpu(rsp->type);
4352 result = __le16_to_cpu(rsp->result);
4354 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4356 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4357 if (cmd->ident != conn->info_ident ||
4358 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4361 cancel_delayed_work(&conn->info_timer);
4363 if (result != L2CAP_IR_SUCCESS) {
4364 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4365 conn->info_ident = 0;
4367 l2cap_conn_start(conn);
4373 case L2CAP_IT_FEAT_MASK:
4374 conn->feat_mask = get_unaligned_le32(rsp->data);
4376 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4377 struct l2cap_info_req req;
4378 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4380 conn->info_ident = l2cap_get_ident(conn);
4382 l2cap_send_cmd(conn, conn->info_ident,
4383 L2CAP_INFO_REQ, sizeof(req), &req);
4385 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4386 conn->info_ident = 0;
4388 l2cap_conn_start(conn);
4392 case L2CAP_IT_FIXED_CHAN:
4393 conn->fixed_chan_mask = rsp->data[0];
4394 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4395 conn->info_ident = 0;
4397 l2cap_conn_start(conn);
4404 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4405 struct l2cap_cmd_hdr *cmd,
4406 u16 cmd_len, void *data)
4408 struct l2cap_create_chan_req *req = data;
4409 struct l2cap_create_chan_rsp rsp;
4410 struct l2cap_chan *chan;
4411 struct hci_dev *hdev;
4414 if (cmd_len != sizeof(*req))
4420 psm = le16_to_cpu(req->psm);
4421 scid = le16_to_cpu(req->scid);
4423 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4425 /* For controller id 0 make BR/EDR connection */
4426 if (req->amp_id == HCI_BREDR_ID) {
4427 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4432 /* Validate AMP controller id */
4433 hdev = hci_dev_get(req->amp_id);
4437 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4442 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4445 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4446 struct hci_conn *hs_hcon;
4448 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4454 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4456 mgr->bredr_chan = chan;
4457 chan->hs_hcon = hs_hcon;
4458 chan->fcs = L2CAP_FCS_NONE;
4459 conn->mtu = hdev->block_mtu;
4468 rsp.scid = cpu_to_le16(scid);
4469 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4470 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4472 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4478 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4480 struct l2cap_move_chan_req req;
4483 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4485 ident = l2cap_get_ident(chan->conn);
4486 chan->ident = ident;
4488 req.icid = cpu_to_le16(chan->scid);
4489 req.dest_amp_id = dest_amp_id;
4491 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4494 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4497 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4499 struct l2cap_move_chan_rsp rsp;
4501 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4503 rsp.icid = cpu_to_le16(chan->dcid);
4504 rsp.result = cpu_to_le16(result);
4506 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4510 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4512 struct l2cap_move_chan_cfm cfm;
4514 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4516 chan->ident = l2cap_get_ident(chan->conn);
4518 cfm.icid = cpu_to_le16(chan->scid);
4519 cfm.result = cpu_to_le16(result);
4521 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4524 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4527 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4529 struct l2cap_move_chan_cfm cfm;
4531 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4533 cfm.icid = cpu_to_le16(icid);
4534 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4536 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4540 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4543 struct l2cap_move_chan_cfm_rsp rsp;
4545 BT_DBG("icid 0x%4.4x", icid);
4547 rsp.icid = cpu_to_le16(icid);
4548 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4551 static void __release_logical_link(struct l2cap_chan *chan)
4553 chan->hs_hchan = NULL;
4554 chan->hs_hcon = NULL;
4556 /* Placeholder - release the logical link */
4559 static void l2cap_logical_fail(struct l2cap_chan *chan)
4561 /* Logical link setup failed */
4562 if (chan->state != BT_CONNECTED) {
4563 /* Create channel failure, disconnect */
4564 l2cap_send_disconn_req(chan, ECONNRESET);
4568 switch (chan->move_role) {
4569 case L2CAP_MOVE_ROLE_RESPONDER:
4570 l2cap_move_done(chan);
4571 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4573 case L2CAP_MOVE_ROLE_INITIATOR:
4574 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4575 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4576 /* Remote has only sent pending or
4577 * success responses, clean up
4579 l2cap_move_done(chan);
4582 /* Other amp move states imply that the move
4583 * has already aborted
4585 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4590 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4591 struct hci_chan *hchan)
4593 struct l2cap_conf_rsp rsp;
4595 chan->hs_hchan = hchan;
4596 chan->hs_hcon->l2cap_data = chan->conn;
4598 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4600 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4603 set_default_fcs(chan);
4605 err = l2cap_ertm_init(chan);
4607 l2cap_send_disconn_req(chan, -err);
4609 l2cap_chan_ready(chan);
4613 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4614 struct hci_chan *hchan)
4616 chan->hs_hcon = hchan->conn;
4617 chan->hs_hcon->l2cap_data = chan->conn;
4619 BT_DBG("move_state %d", chan->move_state);
4621 switch (chan->move_state) {
4622 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4623 /* Move confirm will be sent after a success
4624 * response is received
4626 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4628 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4629 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4630 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4631 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4632 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4633 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4634 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4635 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4636 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4640 /* Move was not in expected state, free the channel */
4641 __release_logical_link(chan);
4643 chan->move_state = L2CAP_MOVE_STABLE;
4647 /* Call with chan locked */
4648 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4651 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4654 l2cap_logical_fail(chan);
4655 __release_logical_link(chan);
4659 if (chan->state != BT_CONNECTED) {
4660 /* Ignore logical link if channel is on BR/EDR */
4661 if (chan->local_amp_id)
4662 l2cap_logical_finish_create(chan, hchan);
4664 l2cap_logical_finish_move(chan, hchan);
4668 void l2cap_move_start(struct l2cap_chan *chan)
4670 BT_DBG("chan %p", chan);
4672 if (chan->local_amp_id == HCI_BREDR_ID) {
4673 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4675 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4676 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4677 /* Placeholder - start physical link setup */
4679 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4680 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4682 l2cap_move_setup(chan);
4683 l2cap_send_move_chan_req(chan, 0);
4687 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4688 u8 local_amp_id, u8 remote_amp_id)
4690 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4691 local_amp_id, remote_amp_id);
4693 chan->fcs = L2CAP_FCS_NONE;
4695 /* Outgoing channel on AMP */
4696 if (chan->state == BT_CONNECT) {
4697 if (result == L2CAP_CR_SUCCESS) {
4698 chan->local_amp_id = local_amp_id;
4699 l2cap_send_create_chan_req(chan, remote_amp_id);
4701 /* Revert to BR/EDR connect */
4702 l2cap_send_conn_req(chan);
4708 /* Incoming channel on AMP */
4709 if (__l2cap_no_conn_pending(chan)) {
4710 struct l2cap_conn_rsp rsp;
4712 rsp.scid = cpu_to_le16(chan->dcid);
4713 rsp.dcid = cpu_to_le16(chan->scid);
4715 if (result == L2CAP_CR_SUCCESS) {
4716 /* Send successful response */
4717 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4718 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4720 /* Send negative response */
4721 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4722 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4725 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4728 if (result == L2CAP_CR_SUCCESS) {
4729 __l2cap_state_change(chan, BT_CONFIG);
4730 set_bit(CONF_REQ_SENT, &chan->conf_state);
4731 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4733 l2cap_build_conf_req(chan, buf), buf);
4734 chan->num_conf_req++;
4739 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4742 l2cap_move_setup(chan);
4743 chan->move_id = local_amp_id;
4744 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4746 l2cap_send_move_chan_req(chan, remote_amp_id);
4749 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4751 struct hci_chan *hchan = NULL;
4753 /* Placeholder - get hci_chan for logical link */
4756 if (hchan->state == BT_CONNECTED) {
4757 /* Logical link is ready to go */
4758 chan->hs_hcon = hchan->conn;
4759 chan->hs_hcon->l2cap_data = chan->conn;
4760 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4761 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4763 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4765 /* Wait for logical link to be ready */
4766 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4769 /* Logical link not available */
4770 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4774 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4776 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4778 if (result == -EINVAL)
4779 rsp_result = L2CAP_MR_BAD_ID;
4781 rsp_result = L2CAP_MR_NOT_ALLOWED;
4783 l2cap_send_move_chan_rsp(chan, rsp_result);
4786 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4787 chan->move_state = L2CAP_MOVE_STABLE;
4789 /* Restart data transmission */
4790 l2cap_ertm_send(chan);
4793 /* Invoke with locked chan */
4794 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4796 u8 local_amp_id = chan->local_amp_id;
4797 u8 remote_amp_id = chan->remote_amp_id;
4799 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4800 chan, result, local_amp_id, remote_amp_id);
4802 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4803 l2cap_chan_unlock(chan);
4807 if (chan->state != BT_CONNECTED) {
4808 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4809 } else if (result != L2CAP_MR_SUCCESS) {
4810 l2cap_do_move_cancel(chan, result);
4812 switch (chan->move_role) {
4813 case L2CAP_MOVE_ROLE_INITIATOR:
4814 l2cap_do_move_initiate(chan, local_amp_id,
4817 case L2CAP_MOVE_ROLE_RESPONDER:
4818 l2cap_do_move_respond(chan, result);
4821 l2cap_do_move_cancel(chan, result);
4827 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4828 struct l2cap_cmd_hdr *cmd,
4829 u16 cmd_len, void *data)
4831 struct l2cap_move_chan_req *req = data;
4832 struct l2cap_move_chan_rsp rsp;
4833 struct l2cap_chan *chan;
4835 u16 result = L2CAP_MR_NOT_ALLOWED;
4837 if (cmd_len != sizeof(*req))
4840 icid = le16_to_cpu(req->icid);
4842 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4847 chan = l2cap_get_chan_by_dcid(conn, icid);
4849 rsp.icid = cpu_to_le16(icid);
4850 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4851 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4856 chan->ident = cmd->ident;
4858 if (chan->scid < L2CAP_CID_DYN_START ||
4859 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4860 (chan->mode != L2CAP_MODE_ERTM &&
4861 chan->mode != L2CAP_MODE_STREAMING)) {
4862 result = L2CAP_MR_NOT_ALLOWED;
4863 goto send_move_response;
4866 if (chan->local_amp_id == req->dest_amp_id) {
4867 result = L2CAP_MR_SAME_ID;
4868 goto send_move_response;
4871 if (req->dest_amp_id) {
4872 struct hci_dev *hdev;
4873 hdev = hci_dev_get(req->dest_amp_id);
4874 if (!hdev || hdev->dev_type != HCI_AMP ||
4875 !test_bit(HCI_UP, &hdev->flags)) {
4879 result = L2CAP_MR_BAD_ID;
4880 goto send_move_response;
4885 /* Detect a move collision. Only send a collision response
4886 * if this side has "lost", otherwise proceed with the move.
4887 * The winner has the larger bd_addr.
4889 if ((__chan_is_moving(chan) ||
4890 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4891 bacmp(conn->src, conn->dst) > 0) {
4892 result = L2CAP_MR_COLLISION;
4893 goto send_move_response;
4896 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4897 l2cap_move_setup(chan);
4898 chan->move_id = req->dest_amp_id;
4901 if (!req->dest_amp_id) {
4902 /* Moving to BR/EDR */
4903 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4904 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4905 result = L2CAP_MR_PEND;
4907 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4908 result = L2CAP_MR_SUCCESS;
4911 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4912 /* Placeholder - uncomment when amp functions are available */
4913 /*amp_accept_physical(chan, req->dest_amp_id);*/
4914 result = L2CAP_MR_PEND;
4918 l2cap_send_move_chan_rsp(chan, result);
4920 l2cap_chan_unlock(chan);
4925 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4927 struct l2cap_chan *chan;
4928 struct hci_chan *hchan = NULL;
4930 chan = l2cap_get_chan_by_scid(conn, icid);
4932 l2cap_send_move_chan_cfm_icid(conn, icid);
4936 __clear_chan_timer(chan);
4937 if (result == L2CAP_MR_PEND)
4938 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4940 switch (chan->move_state) {
4941 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4942 /* Move confirm will be sent when logical link
4945 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4947 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4948 if (result == L2CAP_MR_PEND) {
4950 } else if (test_bit(CONN_LOCAL_BUSY,
4951 &chan->conn_state)) {
4952 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4954 /* Logical link is up or moving to BR/EDR,
4957 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4958 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4961 case L2CAP_MOVE_WAIT_RSP:
4963 if (result == L2CAP_MR_SUCCESS) {
4964 /* Remote is ready, send confirm immediately
4965 * after logical link is ready
4967 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4969 /* Both logical link and move success
4970 * are required to confirm
4972 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4975 /* Placeholder - get hci_chan for logical link */
4977 /* Logical link not available */
4978 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4982 /* If the logical link is not yet connected, do not
4983 * send confirmation.
4985 if (hchan->state != BT_CONNECTED)
4988 /* Logical link is already ready to go */
4990 chan->hs_hcon = hchan->conn;
4991 chan->hs_hcon->l2cap_data = chan->conn;
4993 if (result == L2CAP_MR_SUCCESS) {
4994 /* Can confirm now */
4995 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4997 /* Now only need move success
5000 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5003 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5006 /* Any other amp move state means the move failed. */
5007 chan->move_id = chan->local_amp_id;
5008 l2cap_move_done(chan);
5009 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5012 l2cap_chan_unlock(chan);
5015 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5018 struct l2cap_chan *chan;
5020 chan = l2cap_get_chan_by_ident(conn, ident);
5022 /* Could not locate channel, icid is best guess */
5023 l2cap_send_move_chan_cfm_icid(conn, icid);
5027 __clear_chan_timer(chan);
5029 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5030 if (result == L2CAP_MR_COLLISION) {
5031 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5033 /* Cleanup - cancel move */
5034 chan->move_id = chan->local_amp_id;
5035 l2cap_move_done(chan);
5039 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5041 l2cap_chan_unlock(chan);
5044 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5045 struct l2cap_cmd_hdr *cmd,
5046 u16 cmd_len, void *data)
5048 struct l2cap_move_chan_rsp *rsp = data;
5051 if (cmd_len != sizeof(*rsp))
5054 icid = le16_to_cpu(rsp->icid);
5055 result = le16_to_cpu(rsp->result);
5057 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5059 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5060 l2cap_move_continue(conn, icid, result);
5062 l2cap_move_fail(conn, cmd->ident, icid, result);
5067 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5068 struct l2cap_cmd_hdr *cmd,
5069 u16 cmd_len, void *data)
5071 struct l2cap_move_chan_cfm *cfm = data;
5072 struct l2cap_chan *chan;
5075 if (cmd_len != sizeof(*cfm))
5078 icid = le16_to_cpu(cfm->icid);
5079 result = le16_to_cpu(cfm->result);
5081 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5083 chan = l2cap_get_chan_by_dcid(conn, icid);
5085 /* Spec requires a response even if the icid was not found */
5086 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5090 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5091 if (result == L2CAP_MC_CONFIRMED) {
5092 chan->local_amp_id = chan->move_id;
5093 if (!chan->local_amp_id)
5094 __release_logical_link(chan);
5096 chan->move_id = chan->local_amp_id;
5099 l2cap_move_done(chan);
5102 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5104 l2cap_chan_unlock(chan);
5109 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5110 struct l2cap_cmd_hdr *cmd,
5111 u16 cmd_len, void *data)
5113 struct l2cap_move_chan_cfm_rsp *rsp = data;
5114 struct l2cap_chan *chan;
5117 if (cmd_len != sizeof(*rsp))
5120 icid = le16_to_cpu(rsp->icid);
5122 BT_DBG("icid 0x%4.4x", icid);
5124 chan = l2cap_get_chan_by_scid(conn, icid);
5128 __clear_chan_timer(chan);
5130 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5131 chan->local_amp_id = chan->move_id;
5133 if (!chan->local_amp_id && chan->hs_hchan)
5134 __release_logical_link(chan);
5136 l2cap_move_done(chan);
5139 l2cap_chan_unlock(chan);
5144 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5149 if (min > max || min < 6 || max > 3200)
5152 if (to_multiplier < 10 || to_multiplier > 3200)
5155 if (max >= to_multiplier * 8)
5158 max_latency = (to_multiplier * 8 / max) - 1;
5159 if (latency > 499 || latency > max_latency)
5165 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5166 struct l2cap_cmd_hdr *cmd,
5169 struct hci_conn *hcon = conn->hcon;
5170 struct l2cap_conn_param_update_req *req;
5171 struct l2cap_conn_param_update_rsp rsp;
5172 u16 min, max, latency, to_multiplier, cmd_len;
5175 if (!(hcon->link_mode & HCI_LM_MASTER))
5178 cmd_len = __le16_to_cpu(cmd->len);
5179 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5182 req = (struct l2cap_conn_param_update_req *) data;
5183 min = __le16_to_cpu(req->min);
5184 max = __le16_to_cpu(req->max);
5185 latency = __le16_to_cpu(req->latency);
5186 to_multiplier = __le16_to_cpu(req->to_multiplier);
5188 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5189 min, max, latency, to_multiplier);
5191 memset(&rsp, 0, sizeof(rsp));
5193 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5195 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5197 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5199 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5203 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5208 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5209 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5214 switch (cmd->code) {
5215 case L2CAP_COMMAND_REJ:
5216 l2cap_command_rej(conn, cmd, cmd_len, data);
5219 case L2CAP_CONN_REQ:
5220 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5223 case L2CAP_CONN_RSP:
5224 case L2CAP_CREATE_CHAN_RSP:
5225 err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5228 case L2CAP_CONF_REQ:
5229 err = l2cap_config_req(conn, cmd, cmd_len, data);
5232 case L2CAP_CONF_RSP:
5233 err = l2cap_config_rsp(conn, cmd, cmd_len, data);
5236 case L2CAP_DISCONN_REQ:
5237 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5240 case L2CAP_DISCONN_RSP:
5241 err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5244 case L2CAP_ECHO_REQ:
5245 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5248 case L2CAP_ECHO_RSP:
5251 case L2CAP_INFO_REQ:
5252 err = l2cap_information_req(conn, cmd, cmd_len, data);
5255 case L2CAP_INFO_RSP:
5256 err = l2cap_information_rsp(conn, cmd, cmd_len, data);
5259 case L2CAP_CREATE_CHAN_REQ:
5260 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5263 case L2CAP_MOVE_CHAN_REQ:
5264 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5267 case L2CAP_MOVE_CHAN_RSP:
5268 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5271 case L2CAP_MOVE_CHAN_CFM:
5272 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5275 case L2CAP_MOVE_CHAN_CFM_RSP:
5276 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5280 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5288 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5289 struct l2cap_cmd_hdr *cmd, u8 *data)
5291 switch (cmd->code) {
5292 case L2CAP_COMMAND_REJ:
5295 case L2CAP_CONN_PARAM_UPDATE_REQ:
5296 return l2cap_conn_param_update_req(conn, cmd, data);
5298 case L2CAP_CONN_PARAM_UPDATE_RSP:
5302 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5307 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5308 struct sk_buff *skb)
5310 u8 *data = skb->data;
5312 struct l2cap_cmd_hdr cmd;
5315 l2cap_raw_recv(conn, skb);
5317 while (len >= L2CAP_CMD_HDR_SIZE) {
5319 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5320 data += L2CAP_CMD_HDR_SIZE;
5321 len -= L2CAP_CMD_HDR_SIZE;
5323 cmd_len = le16_to_cpu(cmd.len);
5325 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5328 if (cmd_len > len || !cmd.ident) {
5329 BT_DBG("corrupted command");
5333 err = l2cap_le_sig_cmd(conn, &cmd, data);
5335 struct l2cap_cmd_rej_unk rej;
5337 BT_ERR("Wrong link type (%d)", err);
5339 /* FIXME: Map err to a valid reason */
5340 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5341 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5352 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5353 struct sk_buff *skb)
5355 u8 *data = skb->data;
5357 struct l2cap_cmd_hdr cmd;
5360 l2cap_raw_recv(conn, skb);
5362 while (len >= L2CAP_CMD_HDR_SIZE) {
5364 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5365 data += L2CAP_CMD_HDR_SIZE;
5366 len -= L2CAP_CMD_HDR_SIZE;
5368 cmd_len = le16_to_cpu(cmd.len);
5370 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5373 if (cmd_len > len || !cmd.ident) {
5374 BT_DBG("corrupted command");
5378 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5380 struct l2cap_cmd_rej_unk rej;
5382 BT_ERR("Wrong link type (%d)", err);
5384 /* FIXME: Map err to a valid reason */
5385 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5386 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5397 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5399 u16 our_fcs, rcv_fcs;
5402 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5403 hdr_size = L2CAP_EXT_HDR_SIZE;
5405 hdr_size = L2CAP_ENH_HDR_SIZE;
5407 if (chan->fcs == L2CAP_FCS_CRC16) {
5408 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5409 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5410 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5412 if (our_fcs != rcv_fcs)
5418 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5420 struct l2cap_ctrl control;
5422 BT_DBG("chan %p", chan);
5424 memset(&control, 0, sizeof(control));
5427 control.reqseq = chan->buffer_seq;
5428 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5430 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5431 control.super = L2CAP_SUPER_RNR;
5432 l2cap_send_sframe(chan, &control);
5435 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5436 chan->unacked_frames > 0)
5437 __set_retrans_timer(chan);
5439 /* Send pending iframes */
5440 l2cap_ertm_send(chan);
5442 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5443 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5444 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5447 control.super = L2CAP_SUPER_RR;
5448 l2cap_send_sframe(chan, &control);
5452 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5453 struct sk_buff **last_frag)
5455 /* skb->len reflects data in skb as well as all fragments
5456 * skb->data_len reflects only data in fragments
5458 if (!skb_has_frag_list(skb))
5459 skb_shinfo(skb)->frag_list = new_frag;
5461 new_frag->next = NULL;
5463 (*last_frag)->next = new_frag;
5464 *last_frag = new_frag;
5466 skb->len += new_frag->len;
5467 skb->data_len += new_frag->len;
5468 skb->truesize += new_frag->truesize;
5471 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5472 struct l2cap_ctrl *control)
5476 switch (control->sar) {
5477 case L2CAP_SAR_UNSEGMENTED:
5481 err = chan->ops->recv(chan, skb);
5484 case L2CAP_SAR_START:
5488 chan->sdu_len = get_unaligned_le16(skb->data);
5489 skb_pull(skb, L2CAP_SDULEN_SIZE);
5491 if (chan->sdu_len > chan->imtu) {
5496 if (skb->len >= chan->sdu_len)
5500 chan->sdu_last_frag = skb;
5506 case L2CAP_SAR_CONTINUE:
5510 append_skb_frag(chan->sdu, skb,
5511 &chan->sdu_last_frag);
5514 if (chan->sdu->len >= chan->sdu_len)
5524 append_skb_frag(chan->sdu, skb,
5525 &chan->sdu_last_frag);
5528 if (chan->sdu->len != chan->sdu_len)
5531 err = chan->ops->recv(chan, chan->sdu);
5534 /* Reassembly complete */
5536 chan->sdu_last_frag = NULL;
5544 kfree_skb(chan->sdu);
5546 chan->sdu_last_frag = NULL;
5553 static int l2cap_resegment(struct l2cap_chan *chan)
5559 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5563 if (chan->mode != L2CAP_MODE_ERTM)
5566 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5567 l2cap_tx(chan, NULL, NULL, event);
5570 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5573 /* Pass sequential frames to l2cap_reassemble_sdu()
5574 * until a gap is encountered.
5577 BT_DBG("chan %p", chan);
5579 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5580 struct sk_buff *skb;
5581 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5582 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5584 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5589 skb_unlink(skb, &chan->srej_q);
5590 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5591 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5596 if (skb_queue_empty(&chan->srej_q)) {
5597 chan->rx_state = L2CAP_RX_STATE_RECV;
5598 l2cap_send_ack(chan);
5604 static void l2cap_handle_srej(struct l2cap_chan *chan,
5605 struct l2cap_ctrl *control)
5607 struct sk_buff *skb;
5609 BT_DBG("chan %p, control %p", chan, control);
5611 if (control->reqseq == chan->next_tx_seq) {
5612 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5613 l2cap_send_disconn_req(chan, ECONNRESET);
5617 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5620 BT_DBG("Seq %d not available for retransmission",
5625 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5626 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5627 l2cap_send_disconn_req(chan, ECONNRESET);
5631 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5633 if (control->poll) {
5634 l2cap_pass_to_tx(chan, control);
5636 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5637 l2cap_retransmit(chan, control);
5638 l2cap_ertm_send(chan);
5640 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5641 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5642 chan->srej_save_reqseq = control->reqseq;
5645 l2cap_pass_to_tx_fbit(chan, control);
5647 if (control->final) {
5648 if (chan->srej_save_reqseq != control->reqseq ||
5649 !test_and_clear_bit(CONN_SREJ_ACT,
5651 l2cap_retransmit(chan, control);
5653 l2cap_retransmit(chan, control);
5654 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5655 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5656 chan->srej_save_reqseq = control->reqseq;
5662 static void l2cap_handle_rej(struct l2cap_chan *chan,
5663 struct l2cap_ctrl *control)
5665 struct sk_buff *skb;
5667 BT_DBG("chan %p, control %p", chan, control);
5669 if (control->reqseq == chan->next_tx_seq) {
5670 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5671 l2cap_send_disconn_req(chan, ECONNRESET);
5675 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5677 if (chan->max_tx && skb &&
5678 bt_cb(skb)->control.retries >= chan->max_tx) {
5679 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5680 l2cap_send_disconn_req(chan, ECONNRESET);
5684 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5686 l2cap_pass_to_tx(chan, control);
5688 if (control->final) {
5689 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5690 l2cap_retransmit_all(chan, control);
5692 l2cap_retransmit_all(chan, control);
5693 l2cap_ertm_send(chan);
5694 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5695 set_bit(CONN_REJ_ACT, &chan->conn_state);
5699 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5701 BT_DBG("chan %p, txseq %d", chan, txseq);
5703 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5704 chan->expected_tx_seq);
5706 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5707 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5709 /* See notes below regarding "double poll" and
5712 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5713 BT_DBG("Invalid/Ignore - after SREJ");
5714 return L2CAP_TXSEQ_INVALID_IGNORE;
5716 BT_DBG("Invalid - in window after SREJ sent");
5717 return L2CAP_TXSEQ_INVALID;
5721 if (chan->srej_list.head == txseq) {
5722 BT_DBG("Expected SREJ");
5723 return L2CAP_TXSEQ_EXPECTED_SREJ;
5726 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5727 BT_DBG("Duplicate SREJ - txseq already stored");
5728 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5731 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5732 BT_DBG("Unexpected SREJ - not requested");
5733 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5737 if (chan->expected_tx_seq == txseq) {
5738 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5740 BT_DBG("Invalid - txseq outside tx window");
5741 return L2CAP_TXSEQ_INVALID;
5744 return L2CAP_TXSEQ_EXPECTED;
5748 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5749 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5750 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5751 return L2CAP_TXSEQ_DUPLICATE;
5754 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5755 /* A source of invalid packets is a "double poll" condition,
5756 * where delays cause us to send multiple poll packets. If
5757 * the remote stack receives and processes both polls,
5758 * sequence numbers can wrap around in such a way that a
5759 * resent frame has a sequence number that looks like new data
5760 * with a sequence gap. This would trigger an erroneous SREJ
5763 * Fortunately, this is impossible with a tx window that's
5764 * less than half of the maximum sequence number, which allows
5765 * invalid frames to be safely ignored.
5767 * With tx window sizes greater than half of the tx window
5768 * maximum, the frame is invalid and cannot be ignored. This
5769 * causes a disconnect.
5772 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5773 BT_DBG("Invalid/Ignore - txseq outside tx window");
5774 return L2CAP_TXSEQ_INVALID_IGNORE;
5776 BT_DBG("Invalid - txseq outside tx window");
5777 return L2CAP_TXSEQ_INVALID;
5780 BT_DBG("Unexpected - txseq indicates missing frames");
5781 return L2CAP_TXSEQ_UNEXPECTED;
5785 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5786 struct l2cap_ctrl *control,
5787 struct sk_buff *skb, u8 event)
5790 bool skb_in_use = 0;
5792 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5796 case L2CAP_EV_RECV_IFRAME:
5797 switch (l2cap_classify_txseq(chan, control->txseq)) {
5798 case L2CAP_TXSEQ_EXPECTED:
5799 l2cap_pass_to_tx(chan, control);
5801 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5802 BT_DBG("Busy, discarding expected seq %d",
5807 chan->expected_tx_seq = __next_seq(chan,
5810 chan->buffer_seq = chan->expected_tx_seq;
5813 err = l2cap_reassemble_sdu(chan, skb, control);
5817 if (control->final) {
5818 if (!test_and_clear_bit(CONN_REJ_ACT,
5819 &chan->conn_state)) {
5821 l2cap_retransmit_all(chan, control);
5822 l2cap_ertm_send(chan);
5826 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5827 l2cap_send_ack(chan);
5829 case L2CAP_TXSEQ_UNEXPECTED:
5830 l2cap_pass_to_tx(chan, control);
5832 /* Can't issue SREJ frames in the local busy state.
5833 * Drop this frame, it will be seen as missing
5834 * when local busy is exited.
5836 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5837 BT_DBG("Busy, discarding unexpected seq %d",
5842 /* There was a gap in the sequence, so an SREJ
5843 * must be sent for each missing frame. The
5844 * current frame is stored for later use.
5846 skb_queue_tail(&chan->srej_q, skb);
5848 BT_DBG("Queued %p (queue len %d)", skb,
5849 skb_queue_len(&chan->srej_q));
5851 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5852 l2cap_seq_list_clear(&chan->srej_list);
5853 l2cap_send_srej(chan, control->txseq);
5855 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5857 case L2CAP_TXSEQ_DUPLICATE:
5858 l2cap_pass_to_tx(chan, control);
5860 case L2CAP_TXSEQ_INVALID_IGNORE:
5862 case L2CAP_TXSEQ_INVALID:
5864 l2cap_send_disconn_req(chan, ECONNRESET);
5868 case L2CAP_EV_RECV_RR:
5869 l2cap_pass_to_tx(chan, control);
5870 if (control->final) {
5871 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5873 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5874 !__chan_is_moving(chan)) {
5876 l2cap_retransmit_all(chan, control);
5879 l2cap_ertm_send(chan);
5880 } else if (control->poll) {
5881 l2cap_send_i_or_rr_or_rnr(chan);
5883 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5884 &chan->conn_state) &&
5885 chan->unacked_frames)
5886 __set_retrans_timer(chan);
5888 l2cap_ertm_send(chan);
5891 case L2CAP_EV_RECV_RNR:
5892 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5893 l2cap_pass_to_tx(chan, control);
5894 if (control && control->poll) {
5895 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5896 l2cap_send_rr_or_rnr(chan, 0);
5898 __clear_retrans_timer(chan);
5899 l2cap_seq_list_clear(&chan->retrans_list);
5901 case L2CAP_EV_RECV_REJ:
5902 l2cap_handle_rej(chan, control);
5904 case L2CAP_EV_RECV_SREJ:
5905 l2cap_handle_srej(chan, control);
5911 if (skb && !skb_in_use) {
5912 BT_DBG("Freeing %p", skb);
5919 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5920 struct l2cap_ctrl *control,
5921 struct sk_buff *skb, u8 event)
5924 u16 txseq = control->txseq;
5925 bool skb_in_use = 0;
5927 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5931 case L2CAP_EV_RECV_IFRAME:
5932 switch (l2cap_classify_txseq(chan, txseq)) {
5933 case L2CAP_TXSEQ_EXPECTED:
5934 /* Keep frame for reassembly later */
5935 l2cap_pass_to_tx(chan, control);
5936 skb_queue_tail(&chan->srej_q, skb);
5938 BT_DBG("Queued %p (queue len %d)", skb,
5939 skb_queue_len(&chan->srej_q));
5941 chan->expected_tx_seq = __next_seq(chan, txseq);
5943 case L2CAP_TXSEQ_EXPECTED_SREJ:
5944 l2cap_seq_list_pop(&chan->srej_list);
5946 l2cap_pass_to_tx(chan, control);
5947 skb_queue_tail(&chan->srej_q, skb);
5949 BT_DBG("Queued %p (queue len %d)", skb,
5950 skb_queue_len(&chan->srej_q));
5952 err = l2cap_rx_queued_iframes(chan);
5957 case L2CAP_TXSEQ_UNEXPECTED:
5958 /* Got a frame that can't be reassembled yet.
5959 * Save it for later, and send SREJs to cover
5960 * the missing frames.
5962 skb_queue_tail(&chan->srej_q, skb);
5964 BT_DBG("Queued %p (queue len %d)", skb,
5965 skb_queue_len(&chan->srej_q));
5967 l2cap_pass_to_tx(chan, control);
5968 l2cap_send_srej(chan, control->txseq);
5970 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5971 /* This frame was requested with an SREJ, but
5972 * some expected retransmitted frames are
5973 * missing. Request retransmission of missing
5976 skb_queue_tail(&chan->srej_q, skb);
5978 BT_DBG("Queued %p (queue len %d)", skb,
5979 skb_queue_len(&chan->srej_q));
5981 l2cap_pass_to_tx(chan, control);
5982 l2cap_send_srej_list(chan, control->txseq);
5984 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5985 /* We've already queued this frame. Drop this copy. */
5986 l2cap_pass_to_tx(chan, control);
5988 case L2CAP_TXSEQ_DUPLICATE:
5989 /* Expecting a later sequence number, so this frame
5990 * was already received. Ignore it completely.
5993 case L2CAP_TXSEQ_INVALID_IGNORE:
5995 case L2CAP_TXSEQ_INVALID:
5997 l2cap_send_disconn_req(chan, ECONNRESET);
6001 case L2CAP_EV_RECV_RR:
6002 l2cap_pass_to_tx(chan, control);
6003 if (control->final) {
6004 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6006 if (!test_and_clear_bit(CONN_REJ_ACT,
6007 &chan->conn_state)) {
6009 l2cap_retransmit_all(chan, control);
6012 l2cap_ertm_send(chan);
6013 } else if (control->poll) {
6014 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6015 &chan->conn_state) &&
6016 chan->unacked_frames) {
6017 __set_retrans_timer(chan);
6020 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6021 l2cap_send_srej_tail(chan);
6023 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6024 &chan->conn_state) &&
6025 chan->unacked_frames)
6026 __set_retrans_timer(chan);
6028 l2cap_send_ack(chan);
6031 case L2CAP_EV_RECV_RNR:
6032 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6033 l2cap_pass_to_tx(chan, control);
6034 if (control->poll) {
6035 l2cap_send_srej_tail(chan);
6037 struct l2cap_ctrl rr_control;
6038 memset(&rr_control, 0, sizeof(rr_control));
6039 rr_control.sframe = 1;
6040 rr_control.super = L2CAP_SUPER_RR;
6041 rr_control.reqseq = chan->buffer_seq;
6042 l2cap_send_sframe(chan, &rr_control);
6046 case L2CAP_EV_RECV_REJ:
6047 l2cap_handle_rej(chan, control);
6049 case L2CAP_EV_RECV_SREJ:
6050 l2cap_handle_srej(chan, control);
6054 if (skb && !skb_in_use) {
6055 BT_DBG("Freeing %p", skb);
6062 static int l2cap_finish_move(struct l2cap_chan *chan)
6064 BT_DBG("chan %p", chan);
6066 chan->rx_state = L2CAP_RX_STATE_RECV;
6069 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6071 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6073 return l2cap_resegment(chan);
6076 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6077 struct l2cap_ctrl *control,
6078 struct sk_buff *skb, u8 event)
6082 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6088 l2cap_process_reqseq(chan, control->reqseq);
6090 if (!skb_queue_empty(&chan->tx_q))
6091 chan->tx_send_head = skb_peek(&chan->tx_q);
6093 chan->tx_send_head = NULL;
6095 /* Rewind next_tx_seq to the point expected
6098 chan->next_tx_seq = control->reqseq;
6099 chan->unacked_frames = 0;
6101 err = l2cap_finish_move(chan);
6105 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6106 l2cap_send_i_or_rr_or_rnr(chan);
6108 if (event == L2CAP_EV_RECV_IFRAME)
6111 return l2cap_rx_state_recv(chan, control, NULL, event);
6114 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6115 struct l2cap_ctrl *control,
6116 struct sk_buff *skb, u8 event)
6120 if (!control->final)
6123 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6125 chan->rx_state = L2CAP_RX_STATE_RECV;
6126 l2cap_process_reqseq(chan, control->reqseq);
6128 if (!skb_queue_empty(&chan->tx_q))
6129 chan->tx_send_head = skb_peek(&chan->tx_q);
6131 chan->tx_send_head = NULL;
6133 /* Rewind next_tx_seq to the point expected
6136 chan->next_tx_seq = control->reqseq;
6137 chan->unacked_frames = 0;
6140 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6142 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6144 err = l2cap_resegment(chan);
6147 err = l2cap_rx_state_recv(chan, control, skb, event);
6152 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6154 /* Make sure reqseq is for a packet that has been sent but not acked */
6157 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6158 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6161 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6162 struct sk_buff *skb, u8 event)
6166 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6167 control, skb, event, chan->rx_state);
6169 if (__valid_reqseq(chan, control->reqseq)) {
6170 switch (chan->rx_state) {
6171 case L2CAP_RX_STATE_RECV:
6172 err = l2cap_rx_state_recv(chan, control, skb, event);
6174 case L2CAP_RX_STATE_SREJ_SENT:
6175 err = l2cap_rx_state_srej_sent(chan, control, skb,
6178 case L2CAP_RX_STATE_WAIT_P:
6179 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6181 case L2CAP_RX_STATE_WAIT_F:
6182 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6189 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6190 control->reqseq, chan->next_tx_seq,
6191 chan->expected_ack_seq);
6192 l2cap_send_disconn_req(chan, ECONNRESET);
6198 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6199 struct sk_buff *skb)
6203 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6206 if (l2cap_classify_txseq(chan, control->txseq) ==
6207 L2CAP_TXSEQ_EXPECTED) {
6208 l2cap_pass_to_tx(chan, control);
6210 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6211 __next_seq(chan, chan->buffer_seq));
6213 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6215 l2cap_reassemble_sdu(chan, skb, control);
6218 kfree_skb(chan->sdu);
6221 chan->sdu_last_frag = NULL;
6225 BT_DBG("Freeing %p", skb);
6230 chan->last_acked_seq = control->txseq;
6231 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6236 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6238 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6242 __unpack_control(chan, skb);
6247 * We can just drop the corrupted I-frame here.
6248 * Receiver will miss it and start proper recovery
6249 * procedures and ask for retransmission.
6251 if (l2cap_check_fcs(chan, skb))
6254 if (!control->sframe && control->sar == L2CAP_SAR_START)
6255 len -= L2CAP_SDULEN_SIZE;
6257 if (chan->fcs == L2CAP_FCS_CRC16)
6258 len -= L2CAP_FCS_SIZE;
6260 if (len > chan->mps) {
6261 l2cap_send_disconn_req(chan, ECONNRESET);
6265 if (!control->sframe) {
6268 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6269 control->sar, control->reqseq, control->final,
6272 /* Validate F-bit - F=0 always valid, F=1 only
6273 * valid in TX WAIT_F
6275 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6278 if (chan->mode != L2CAP_MODE_STREAMING) {
6279 event = L2CAP_EV_RECV_IFRAME;
6280 err = l2cap_rx(chan, control, skb, event);
6282 err = l2cap_stream_rx(chan, control, skb);
6286 l2cap_send_disconn_req(chan, ECONNRESET);
6288 const u8 rx_func_to_event[4] = {
6289 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6290 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6293 /* Only I-frames are expected in streaming mode */
6294 if (chan->mode == L2CAP_MODE_STREAMING)
6297 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6298 control->reqseq, control->final, control->poll,
6302 BT_ERR("Trailing bytes: %d in sframe", len);
6303 l2cap_send_disconn_req(chan, ECONNRESET);
6307 /* Validate F and P bits */
6308 if (control->final && (control->poll ||
6309 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6312 event = rx_func_to_event[control->super];
6313 if (l2cap_rx(chan, control, skb, event))
6314 l2cap_send_disconn_req(chan, ECONNRESET);
6324 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6325 struct sk_buff *skb)
6327 struct l2cap_chan *chan;
6329 chan = l2cap_get_chan_by_scid(conn, cid);
6331 if (cid == L2CAP_CID_A2MP) {
6332 chan = a2mp_channel_create(conn, skb);
6338 l2cap_chan_lock(chan);
6340 BT_DBG("unknown cid 0x%4.4x", cid);
6341 /* Drop packet and return */
6347 BT_DBG("chan %p, len %d", chan, skb->len);
6349 if (chan->state != BT_CONNECTED)
6352 switch (chan->mode) {
6353 case L2CAP_MODE_BASIC:
6354 /* If socket recv buffers overflows we drop data here
6355 * which is *bad* because L2CAP has to be reliable.
6356 * But we don't have any other choice. L2CAP doesn't
6357 * provide flow control mechanism. */
6359 if (chan->imtu < skb->len)
6362 if (!chan->ops->recv(chan, skb))
6366 case L2CAP_MODE_ERTM:
6367 case L2CAP_MODE_STREAMING:
6368 l2cap_data_rcv(chan, skb);
6372 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6380 l2cap_chan_unlock(chan);
6383 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6384 struct sk_buff *skb)
6386 struct l2cap_chan *chan;
6388 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6392 BT_DBG("chan %p, len %d", chan, skb->len);
6394 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6397 if (chan->imtu < skb->len)
6400 if (!chan->ops->recv(chan, skb))
6407 static void l2cap_att_channel(struct l2cap_conn *conn,
6408 struct sk_buff *skb)
6410 struct l2cap_chan *chan;
6412 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6413 conn->src, conn->dst);
6417 BT_DBG("chan %p, len %d", chan, skb->len);
6419 if (chan->imtu < skb->len)
6422 if (!chan->ops->recv(chan, skb))
6429 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6431 struct l2cap_hdr *lh = (void *) skb->data;
6435 skb_pull(skb, L2CAP_HDR_SIZE);
6436 cid = __le16_to_cpu(lh->cid);
6437 len = __le16_to_cpu(lh->len);
6439 if (len != skb->len) {
6444 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6447 case L2CAP_CID_LE_SIGNALING:
6448 l2cap_le_sig_channel(conn, skb);
6450 case L2CAP_CID_SIGNALING:
6451 l2cap_sig_channel(conn, skb);
6454 case L2CAP_CID_CONN_LESS:
6455 psm = get_unaligned((__le16 *) skb->data);
6456 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6457 l2cap_conless_channel(conn, psm, skb);
6461 l2cap_att_channel(conn, skb);
6465 if (smp_sig_channel(conn, skb))
6466 l2cap_conn_del(conn->hcon, EACCES);
6470 l2cap_data_channel(conn, cid, skb);
6475 /* ---- L2CAP interface with lower layer (HCI) ---- */
6477 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6479 int exact = 0, lm1 = 0, lm2 = 0;
6480 struct l2cap_chan *c;
6482 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6484 /* Find listening sockets and check their link_mode */
6485 read_lock(&chan_list_lock);
6486 list_for_each_entry(c, &chan_list, global_l) {
6487 struct sock *sk = c->sk;
6489 if (c->state != BT_LISTEN)
6492 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6493 lm1 |= HCI_LM_ACCEPT;
6494 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6495 lm1 |= HCI_LM_MASTER;
6497 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6498 lm2 |= HCI_LM_ACCEPT;
6499 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6500 lm2 |= HCI_LM_MASTER;
6503 read_unlock(&chan_list_lock);
6505 return exact ? lm1 : lm2;
6508 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6510 struct l2cap_conn *conn;
6512 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6515 conn = l2cap_conn_add(hcon);
6517 l2cap_conn_ready(conn);
6519 l2cap_conn_del(hcon, bt_to_errno(status));
6523 int l2cap_disconn_ind(struct hci_conn *hcon)
6525 struct l2cap_conn *conn = hcon->l2cap_data;
6527 BT_DBG("hcon %p", hcon);
6530 return HCI_ERROR_REMOTE_USER_TERM;
6531 return conn->disc_reason;
6534 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6536 BT_DBG("hcon %p reason %d", hcon, reason);
6538 l2cap_conn_del(hcon, bt_to_errno(reason));
6541 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6543 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6546 if (encrypt == 0x00) {
6547 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6548 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6549 } else if (chan->sec_level == BT_SECURITY_HIGH)
6550 l2cap_chan_close(chan, ECONNREFUSED);
6552 if (chan->sec_level == BT_SECURITY_MEDIUM)
6553 __clear_chan_timer(chan);
6557 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6559 struct l2cap_conn *conn = hcon->l2cap_data;
6560 struct l2cap_chan *chan;
6565 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6567 if (hcon->type == LE_LINK) {
6568 if (!status && encrypt)
6569 smp_distribute_keys(conn, 0);
6570 cancel_delayed_work(&conn->security_timer);
6573 mutex_lock(&conn->chan_lock);
6575 list_for_each_entry(chan, &conn->chan_l, list) {
6576 l2cap_chan_lock(chan);
6578 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6579 state_to_string(chan->state));
6581 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6582 l2cap_chan_unlock(chan);
6586 if (chan->scid == L2CAP_CID_ATT) {
6587 if (!status && encrypt) {
6588 chan->sec_level = hcon->sec_level;
6589 l2cap_chan_ready(chan);
6592 l2cap_chan_unlock(chan);
6596 if (!__l2cap_no_conn_pending(chan)) {
6597 l2cap_chan_unlock(chan);
6601 if (!status && (chan->state == BT_CONNECTED ||
6602 chan->state == BT_CONFIG)) {
6603 struct sock *sk = chan->sk;
6605 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6606 sk->sk_state_change(sk);
6608 l2cap_check_encryption(chan, encrypt);
6609 l2cap_chan_unlock(chan);
6613 if (chan->state == BT_CONNECT) {
6615 l2cap_start_connection(chan);
6617 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6619 } else if (chan->state == BT_CONNECT2) {
6620 struct sock *sk = chan->sk;
6621 struct l2cap_conn_rsp rsp;
6627 if (test_bit(BT_SK_DEFER_SETUP,
6628 &bt_sk(sk)->flags)) {
6629 res = L2CAP_CR_PEND;
6630 stat = L2CAP_CS_AUTHOR_PEND;
6631 chan->ops->defer(chan);
6633 __l2cap_state_change(chan, BT_CONFIG);
6634 res = L2CAP_CR_SUCCESS;
6635 stat = L2CAP_CS_NO_INFO;
6638 __l2cap_state_change(chan, BT_DISCONN);
6639 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6640 res = L2CAP_CR_SEC_BLOCK;
6641 stat = L2CAP_CS_NO_INFO;
6646 rsp.scid = cpu_to_le16(chan->dcid);
6647 rsp.dcid = cpu_to_le16(chan->scid);
6648 rsp.result = cpu_to_le16(res);
6649 rsp.status = cpu_to_le16(stat);
6650 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6653 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6654 res == L2CAP_CR_SUCCESS) {
6656 set_bit(CONF_REQ_SENT, &chan->conf_state);
6657 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6659 l2cap_build_conf_req(chan, buf),
6661 chan->num_conf_req++;
6665 l2cap_chan_unlock(chan);
6668 mutex_unlock(&conn->chan_lock);
6673 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6675 struct l2cap_conn *conn = hcon->l2cap_data;
6676 struct l2cap_hdr *hdr;
6679 /* For AMP controller do not create l2cap conn */
6680 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6684 conn = l2cap_conn_add(hcon);
6689 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6693 case ACL_START_NO_FLUSH:
6696 BT_ERR("Unexpected start frame (len %d)", skb->len);
6697 kfree_skb(conn->rx_skb);
6698 conn->rx_skb = NULL;
6700 l2cap_conn_unreliable(conn, ECOMM);
6703 /* Start fragment always begin with Basic L2CAP header */
6704 if (skb->len < L2CAP_HDR_SIZE) {
6705 BT_ERR("Frame is too short (len %d)", skb->len);
6706 l2cap_conn_unreliable(conn, ECOMM);
6710 hdr = (struct l2cap_hdr *) skb->data;
6711 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6713 if (len == skb->len) {
6714 /* Complete frame received */
6715 l2cap_recv_frame(conn, skb);
6719 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6721 if (skb->len > len) {
6722 BT_ERR("Frame is too long (len %d, expected len %d)",
6724 l2cap_conn_unreliable(conn, ECOMM);
6728 /* Allocate skb for the complete frame (with header) */
6729 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6733 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6735 conn->rx_len = len - skb->len;
6739 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6741 if (!conn->rx_len) {
6742 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6743 l2cap_conn_unreliable(conn, ECOMM);
6747 if (skb->len > conn->rx_len) {
6748 BT_ERR("Fragment is too long (len %d, expected %d)",
6749 skb->len, conn->rx_len);
6750 kfree_skb(conn->rx_skb);
6751 conn->rx_skb = NULL;
6753 l2cap_conn_unreliable(conn, ECOMM);
6757 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6759 conn->rx_len -= skb->len;
6761 if (!conn->rx_len) {
6762 /* Complete frame received */
6763 l2cap_recv_frame(conn, conn->rx_skb);
6764 conn->rx_skb = NULL;
6774 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6776 struct l2cap_chan *c;
6778 read_lock(&chan_list_lock);
6780 list_for_each_entry(c, &chan_list, global_l) {
6781 struct sock *sk = c->sk;
6783 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6784 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6785 c->state, __le16_to_cpu(c->psm),
6786 c->scid, c->dcid, c->imtu, c->omtu,
6787 c->sec_level, c->mode);
6790 read_unlock(&chan_list_lock);
6795 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6797 return single_open(file, l2cap_debugfs_show, inode->i_private);
6800 static const struct file_operations l2cap_debugfs_fops = {
6801 .open = l2cap_debugfs_open,
6803 .llseek = seq_lseek,
6804 .release = single_release,
6807 static struct dentry *l2cap_debugfs;
6809 int __init l2cap_init(void)
6813 err = l2cap_init_sockets();
6818 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6819 NULL, &l2cap_debugfs_fops);
6821 BT_ERR("Failed to create L2CAP debug file");
6827 void l2cap_exit(void)
6829 debugfs_remove(l2cap_debugfs);
6830 l2cap_cleanup_sockets();
6833 module_param(disable_ertm, bool, 0644);
6834 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");