2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_cache.h>
48 int ib_rate_to_mult(enum ib_rate rate)
51 case IB_RATE_2_5_GBPS: return 1;
52 case IB_RATE_5_GBPS: return 2;
53 case IB_RATE_10_GBPS: return 4;
54 case IB_RATE_20_GBPS: return 8;
55 case IB_RATE_30_GBPS: return 12;
56 case IB_RATE_40_GBPS: return 16;
57 case IB_RATE_60_GBPS: return 24;
58 case IB_RATE_80_GBPS: return 32;
59 case IB_RATE_120_GBPS: return 48;
63 EXPORT_SYMBOL(ib_rate_to_mult);
65 enum ib_rate mult_to_ib_rate(int mult)
68 case 1: return IB_RATE_2_5_GBPS;
69 case 2: return IB_RATE_5_GBPS;
70 case 4: return IB_RATE_10_GBPS;
71 case 8: return IB_RATE_20_GBPS;
72 case 12: return IB_RATE_30_GBPS;
73 case 16: return IB_RATE_40_GBPS;
74 case 24: return IB_RATE_60_GBPS;
75 case 32: return IB_RATE_80_GBPS;
76 case 48: return IB_RATE_120_GBPS;
77 default: return IB_RATE_PORT_CURRENT;
80 EXPORT_SYMBOL(mult_to_ib_rate);
82 int ib_rate_to_mbps(enum ib_rate rate)
85 case IB_RATE_2_5_GBPS: return 2500;
86 case IB_RATE_5_GBPS: return 5000;
87 case IB_RATE_10_GBPS: return 10000;
88 case IB_RATE_20_GBPS: return 20000;
89 case IB_RATE_30_GBPS: return 30000;
90 case IB_RATE_40_GBPS: return 40000;
91 case IB_RATE_60_GBPS: return 60000;
92 case IB_RATE_80_GBPS: return 80000;
93 case IB_RATE_120_GBPS: return 120000;
94 case IB_RATE_14_GBPS: return 14062;
95 case IB_RATE_56_GBPS: return 56250;
96 case IB_RATE_112_GBPS: return 112500;
97 case IB_RATE_168_GBPS: return 168750;
98 case IB_RATE_25_GBPS: return 25781;
99 case IB_RATE_100_GBPS: return 103125;
100 case IB_RATE_200_GBPS: return 206250;
101 case IB_RATE_300_GBPS: return 309375;
105 EXPORT_SYMBOL(ib_rate_to_mbps);
107 enum rdma_transport_type
108 rdma_node_get_transport(enum rdma_node_type node_type)
111 case RDMA_NODE_IB_CA:
112 case RDMA_NODE_IB_SWITCH:
113 case RDMA_NODE_IB_ROUTER:
114 return RDMA_TRANSPORT_IB;
116 return RDMA_TRANSPORT_IWARP;
122 EXPORT_SYMBOL(rdma_node_get_transport);
124 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
126 if (device->get_link_layer)
127 return device->get_link_layer(device, port_num);
129 switch (rdma_node_get_transport(device->node_type)) {
130 case RDMA_TRANSPORT_IB:
131 return IB_LINK_LAYER_INFINIBAND;
132 case RDMA_TRANSPORT_IWARP:
133 return IB_LINK_LAYER_ETHERNET;
135 return IB_LINK_LAYER_UNSPECIFIED;
138 EXPORT_SYMBOL(rdma_port_get_link_layer);
140 /* Protection domains */
142 struct ib_pd *ib_alloc_pd(struct ib_device *device)
146 pd = device->alloc_pd(device, NULL, NULL);
151 atomic_set(&pd->usecnt, 0);
156 EXPORT_SYMBOL(ib_alloc_pd);
158 int ib_dealloc_pd(struct ib_pd *pd)
160 if (atomic_read(&pd->usecnt))
163 return pd->device->dealloc_pd(pd);
165 EXPORT_SYMBOL(ib_dealloc_pd);
167 /* Address handles */
169 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
173 ah = pd->device->create_ah(pd, ah_attr);
176 ah->device = pd->device;
179 atomic_inc(&pd->usecnt);
184 EXPORT_SYMBOL(ib_create_ah);
186 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
187 struct ib_grh *grh, struct ib_ah_attr *ah_attr)
193 memset(ah_attr, 0, sizeof *ah_attr);
194 ah_attr->dlid = wc->slid;
195 ah_attr->sl = wc->sl;
196 ah_attr->src_path_bits = wc->dlid_path_bits;
197 ah_attr->port_num = port_num;
199 if (wc->wc_flags & IB_WC_GRH) {
200 ah_attr->ah_flags = IB_AH_GRH;
201 ah_attr->grh.dgid = grh->sgid;
203 ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
208 ah_attr->grh.sgid_index = (u8) gid_index;
209 flow_class = be32_to_cpu(grh->version_tclass_flow);
210 ah_attr->grh.flow_label = flow_class & 0xFFFFF;
211 ah_attr->grh.hop_limit = 0xFF;
212 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
216 EXPORT_SYMBOL(ib_init_ah_from_wc);
218 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
219 struct ib_grh *grh, u8 port_num)
221 struct ib_ah_attr ah_attr;
224 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
228 return ib_create_ah(pd, &ah_attr);
230 EXPORT_SYMBOL(ib_create_ah_from_wc);
232 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
234 return ah->device->modify_ah ?
235 ah->device->modify_ah(ah, ah_attr) :
238 EXPORT_SYMBOL(ib_modify_ah);
240 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
242 return ah->device->query_ah ?
243 ah->device->query_ah(ah, ah_attr) :
246 EXPORT_SYMBOL(ib_query_ah);
248 int ib_destroy_ah(struct ib_ah *ah)
254 ret = ah->device->destroy_ah(ah);
256 atomic_dec(&pd->usecnt);
260 EXPORT_SYMBOL(ib_destroy_ah);
262 /* Shared receive queues */
264 struct ib_srq *ib_create_srq(struct ib_pd *pd,
265 struct ib_srq_init_attr *srq_init_attr)
269 if (!pd->device->create_srq)
270 return ERR_PTR(-ENOSYS);
272 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
275 srq->device = pd->device;
278 srq->event_handler = srq_init_attr->event_handler;
279 srq->srq_context = srq_init_attr->srq_context;
280 srq->srq_type = srq_init_attr->srq_type;
281 if (srq->srq_type == IB_SRQT_XRC) {
282 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
283 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
284 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
285 atomic_inc(&srq->ext.xrc.cq->usecnt);
287 atomic_inc(&pd->usecnt);
288 atomic_set(&srq->usecnt, 0);
293 EXPORT_SYMBOL(ib_create_srq);
295 int ib_modify_srq(struct ib_srq *srq,
296 struct ib_srq_attr *srq_attr,
297 enum ib_srq_attr_mask srq_attr_mask)
299 return srq->device->modify_srq ?
300 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
303 EXPORT_SYMBOL(ib_modify_srq);
305 int ib_query_srq(struct ib_srq *srq,
306 struct ib_srq_attr *srq_attr)
308 return srq->device->query_srq ?
309 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
311 EXPORT_SYMBOL(ib_query_srq);
313 int ib_destroy_srq(struct ib_srq *srq)
316 enum ib_srq_type srq_type;
317 struct ib_xrcd *uninitialized_var(xrcd);
318 struct ib_cq *uninitialized_var(cq);
321 if (atomic_read(&srq->usecnt))
325 srq_type = srq->srq_type;
326 if (srq_type == IB_SRQT_XRC) {
327 xrcd = srq->ext.xrc.xrcd;
328 cq = srq->ext.xrc.cq;
331 ret = srq->device->destroy_srq(srq);
333 atomic_dec(&pd->usecnt);
334 if (srq_type == IB_SRQT_XRC) {
335 atomic_dec(&xrcd->usecnt);
336 atomic_dec(&cq->usecnt);
342 EXPORT_SYMBOL(ib_destroy_srq);
346 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
348 struct ib_qp *qp = context;
351 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
352 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
353 if (event->element.qp->event_handler)
354 event->element.qp->event_handler(event, event->element.qp->qp_context);
355 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
358 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
360 mutex_lock(&xrcd->tgt_qp_mutex);
361 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
362 mutex_unlock(&xrcd->tgt_qp_mutex);
365 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
366 void (*event_handler)(struct ib_event *, void *),
372 qp = kzalloc(sizeof *qp, GFP_KERNEL);
374 return ERR_PTR(-ENOMEM);
376 qp->real_qp = real_qp;
377 atomic_inc(&real_qp->usecnt);
378 qp->device = real_qp->device;
379 qp->event_handler = event_handler;
380 qp->qp_context = qp_context;
381 qp->qp_num = real_qp->qp_num;
382 qp->qp_type = real_qp->qp_type;
384 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
385 list_add(&qp->open_list, &real_qp->open_list);
386 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
391 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
392 struct ib_qp_open_attr *qp_open_attr)
394 struct ib_qp *qp, *real_qp;
396 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
397 return ERR_PTR(-EINVAL);
399 qp = ERR_PTR(-EINVAL);
400 mutex_lock(&xrcd->tgt_qp_mutex);
401 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
402 if (real_qp->qp_num == qp_open_attr->qp_num) {
403 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
404 qp_open_attr->qp_context);
408 mutex_unlock(&xrcd->tgt_qp_mutex);
411 EXPORT_SYMBOL(ib_open_qp);
413 struct ib_qp *ib_create_qp(struct ib_pd *pd,
414 struct ib_qp_init_attr *qp_init_attr)
416 struct ib_qp *qp, *real_qp;
417 struct ib_device *device;
419 device = pd ? pd->device : qp_init_attr->xrcd->device;
420 qp = device->create_qp(pd, qp_init_attr, NULL);
426 qp->qp_type = qp_init_attr->qp_type;
428 atomic_set(&qp->usecnt, 0);
429 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
430 qp->event_handler = __ib_shared_qp_event_handler;
433 qp->send_cq = qp->recv_cq = NULL;
435 qp->xrcd = qp_init_attr->xrcd;
436 atomic_inc(&qp_init_attr->xrcd->usecnt);
437 INIT_LIST_HEAD(&qp->open_list);
440 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
441 qp_init_attr->qp_context);
443 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
445 real_qp->device->destroy_qp(real_qp);
447 qp->event_handler = qp_init_attr->event_handler;
448 qp->qp_context = qp_init_attr->qp_context;
449 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
453 qp->recv_cq = qp_init_attr->recv_cq;
454 atomic_inc(&qp_init_attr->recv_cq->usecnt);
455 qp->srq = qp_init_attr->srq;
457 atomic_inc(&qp_init_attr->srq->usecnt);
461 qp->send_cq = qp_init_attr->send_cq;
464 atomic_inc(&pd->usecnt);
465 atomic_inc(&qp_init_attr->send_cq->usecnt);
471 EXPORT_SYMBOL(ib_create_qp);
473 static const struct {
475 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
476 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
477 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
479 [IB_QPS_RESET] = { .valid = 1 },
483 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
486 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
487 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
490 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
493 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
496 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
499 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
501 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
507 [IB_QPS_RESET] = { .valid = 1 },
508 [IB_QPS_ERR] = { .valid = 1 },
512 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
515 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
518 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
521 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
524 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
527 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
529 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
536 [IB_QPT_UC] = (IB_QP_AV |
540 [IB_QPT_RC] = (IB_QP_AV |
544 IB_QP_MAX_DEST_RD_ATOMIC |
545 IB_QP_MIN_RNR_TIMER),
546 [IB_QPT_XRC_INI] = (IB_QP_AV |
550 [IB_QPT_XRC_TGT] = (IB_QP_AV |
554 IB_QP_MAX_DEST_RD_ATOMIC |
555 IB_QP_MIN_RNR_TIMER),
558 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
560 [IB_QPT_UC] = (IB_QP_ALT_PATH |
563 [IB_QPT_RC] = (IB_QP_ALT_PATH |
566 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
569 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
572 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
574 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
580 [IB_QPS_RESET] = { .valid = 1 },
581 [IB_QPS_ERR] = { .valid = 1 },
585 [IB_QPT_UD] = IB_QP_SQ_PSN,
586 [IB_QPT_UC] = IB_QP_SQ_PSN,
587 [IB_QPT_RC] = (IB_QP_TIMEOUT |
591 IB_QP_MAX_QP_RD_ATOMIC),
592 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
596 IB_QP_MAX_QP_RD_ATOMIC),
597 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
599 [IB_QPT_SMI] = IB_QP_SQ_PSN,
600 [IB_QPT_GSI] = IB_QP_SQ_PSN,
603 [IB_QPT_UD] = (IB_QP_CUR_STATE |
605 [IB_QPT_UC] = (IB_QP_CUR_STATE |
608 IB_QP_PATH_MIG_STATE),
609 [IB_QPT_RC] = (IB_QP_CUR_STATE |
612 IB_QP_MIN_RNR_TIMER |
613 IB_QP_PATH_MIG_STATE),
614 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
617 IB_QP_PATH_MIG_STATE),
618 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
621 IB_QP_MIN_RNR_TIMER |
622 IB_QP_PATH_MIG_STATE),
623 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
625 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
631 [IB_QPS_RESET] = { .valid = 1 },
632 [IB_QPS_ERR] = { .valid = 1 },
636 [IB_QPT_UD] = (IB_QP_CUR_STATE |
638 [IB_QPT_UC] = (IB_QP_CUR_STATE |
641 IB_QP_PATH_MIG_STATE),
642 [IB_QPT_RC] = (IB_QP_CUR_STATE |
645 IB_QP_PATH_MIG_STATE |
646 IB_QP_MIN_RNR_TIMER),
647 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
650 IB_QP_PATH_MIG_STATE),
651 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
654 IB_QP_PATH_MIG_STATE |
655 IB_QP_MIN_RNR_TIMER),
656 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
658 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
665 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
666 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
667 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
668 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
669 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
670 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
671 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
676 [IB_QPS_RESET] = { .valid = 1 },
677 [IB_QPS_ERR] = { .valid = 1 },
681 [IB_QPT_UD] = (IB_QP_CUR_STATE |
683 [IB_QPT_UC] = (IB_QP_CUR_STATE |
686 IB_QP_PATH_MIG_STATE),
687 [IB_QPT_RC] = (IB_QP_CUR_STATE |
690 IB_QP_MIN_RNR_TIMER |
691 IB_QP_PATH_MIG_STATE),
692 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
695 IB_QP_PATH_MIG_STATE),
696 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
699 IB_QP_MIN_RNR_TIMER |
700 IB_QP_PATH_MIG_STATE),
701 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
703 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
710 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
712 [IB_QPT_UC] = (IB_QP_AV |
716 IB_QP_PATH_MIG_STATE),
717 [IB_QPT_RC] = (IB_QP_PORT |
722 IB_QP_MAX_QP_RD_ATOMIC |
723 IB_QP_MAX_DEST_RD_ATOMIC |
727 IB_QP_MIN_RNR_TIMER |
728 IB_QP_PATH_MIG_STATE),
729 [IB_QPT_XRC_INI] = (IB_QP_PORT |
734 IB_QP_MAX_QP_RD_ATOMIC |
738 IB_QP_PATH_MIG_STATE),
739 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
742 IB_QP_MAX_DEST_RD_ATOMIC |
746 IB_QP_MIN_RNR_TIMER |
747 IB_QP_PATH_MIG_STATE),
748 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
750 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
756 [IB_QPS_RESET] = { .valid = 1 },
757 [IB_QPS_ERR] = { .valid = 1 },
761 [IB_QPT_UD] = (IB_QP_CUR_STATE |
763 [IB_QPT_UC] = (IB_QP_CUR_STATE |
765 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
767 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
773 [IB_QPS_RESET] = { .valid = 1 },
774 [IB_QPS_ERR] = { .valid = 1 }
778 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
779 enum ib_qp_type type, enum ib_qp_attr_mask mask)
781 enum ib_qp_attr_mask req_param, opt_param;
783 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
784 next_state < 0 || next_state > IB_QPS_ERR)
787 if (mask & IB_QP_CUR_STATE &&
788 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
789 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
792 if (!qp_state_table[cur_state][next_state].valid)
795 req_param = qp_state_table[cur_state][next_state].req_param[type];
796 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
798 if ((mask & req_param) != req_param)
801 if (mask & ~(req_param | opt_param | IB_QP_STATE))
806 EXPORT_SYMBOL(ib_modify_qp_is_ok);
808 int ib_modify_qp(struct ib_qp *qp,
809 struct ib_qp_attr *qp_attr,
812 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
814 EXPORT_SYMBOL(ib_modify_qp);
816 int ib_query_qp(struct ib_qp *qp,
817 struct ib_qp_attr *qp_attr,
819 struct ib_qp_init_attr *qp_init_attr)
821 return qp->device->query_qp ?
822 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
825 EXPORT_SYMBOL(ib_query_qp);
827 int ib_close_qp(struct ib_qp *qp)
829 struct ib_qp *real_qp;
832 real_qp = qp->real_qp;
836 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
837 list_del(&qp->open_list);
838 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
840 atomic_dec(&real_qp->usecnt);
845 EXPORT_SYMBOL(ib_close_qp);
847 static int __ib_destroy_shared_qp(struct ib_qp *qp)
849 struct ib_xrcd *xrcd;
850 struct ib_qp *real_qp;
853 real_qp = qp->real_qp;
854 xrcd = real_qp->xrcd;
856 mutex_lock(&xrcd->tgt_qp_mutex);
858 if (atomic_read(&real_qp->usecnt) == 0)
859 list_del(&real_qp->xrcd_list);
862 mutex_unlock(&xrcd->tgt_qp_mutex);
865 ret = ib_destroy_qp(real_qp);
867 atomic_dec(&xrcd->usecnt);
869 __ib_insert_xrcd_qp(xrcd, real_qp);
875 int ib_destroy_qp(struct ib_qp *qp)
878 struct ib_cq *scq, *rcq;
882 if (atomic_read(&qp->usecnt))
885 if (qp->real_qp != qp)
886 return __ib_destroy_shared_qp(qp);
893 ret = qp->device->destroy_qp(qp);
896 atomic_dec(&pd->usecnt);
898 atomic_dec(&scq->usecnt);
900 atomic_dec(&rcq->usecnt);
902 atomic_dec(&srq->usecnt);
907 EXPORT_SYMBOL(ib_destroy_qp);
909 /* Completion queues */
911 struct ib_cq *ib_create_cq(struct ib_device *device,
912 ib_comp_handler comp_handler,
913 void (*event_handler)(struct ib_event *, void *),
914 void *cq_context, int cqe, int comp_vector)
918 cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
923 cq->comp_handler = comp_handler;
924 cq->event_handler = event_handler;
925 cq->cq_context = cq_context;
926 atomic_set(&cq->usecnt, 0);
931 EXPORT_SYMBOL(ib_create_cq);
933 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
935 return cq->device->modify_cq ?
936 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
938 EXPORT_SYMBOL(ib_modify_cq);
940 int ib_destroy_cq(struct ib_cq *cq)
942 if (atomic_read(&cq->usecnt))
945 return cq->device->destroy_cq(cq);
947 EXPORT_SYMBOL(ib_destroy_cq);
949 int ib_resize_cq(struct ib_cq *cq, int cqe)
951 return cq->device->resize_cq ?
952 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
954 EXPORT_SYMBOL(ib_resize_cq);
958 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
962 mr = pd->device->get_dma_mr(pd, mr_access_flags);
965 mr->device = pd->device;
968 atomic_inc(&pd->usecnt);
969 atomic_set(&mr->usecnt, 0);
974 EXPORT_SYMBOL(ib_get_dma_mr);
976 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
977 struct ib_phys_buf *phys_buf_array,
984 if (!pd->device->reg_phys_mr)
985 return ERR_PTR(-ENOSYS);
987 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
988 mr_access_flags, iova_start);
991 mr->device = pd->device;
994 atomic_inc(&pd->usecnt);
995 atomic_set(&mr->usecnt, 0);
1000 EXPORT_SYMBOL(ib_reg_phys_mr);
1002 int ib_rereg_phys_mr(struct ib_mr *mr,
1005 struct ib_phys_buf *phys_buf_array,
1007 int mr_access_flags,
1010 struct ib_pd *old_pd;
1013 if (!mr->device->rereg_phys_mr)
1016 if (atomic_read(&mr->usecnt))
1021 ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
1022 phys_buf_array, num_phys_buf,
1023 mr_access_flags, iova_start);
1025 if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
1026 atomic_dec(&old_pd->usecnt);
1027 atomic_inc(&pd->usecnt);
1032 EXPORT_SYMBOL(ib_rereg_phys_mr);
1034 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
1036 return mr->device->query_mr ?
1037 mr->device->query_mr(mr, mr_attr) : -ENOSYS;
1039 EXPORT_SYMBOL(ib_query_mr);
1041 int ib_dereg_mr(struct ib_mr *mr)
1046 if (atomic_read(&mr->usecnt))
1050 ret = mr->device->dereg_mr(mr);
1052 atomic_dec(&pd->usecnt);
1056 EXPORT_SYMBOL(ib_dereg_mr);
1058 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
1062 if (!pd->device->alloc_fast_reg_mr)
1063 return ERR_PTR(-ENOSYS);
1065 mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len);
1068 mr->device = pd->device;
1071 atomic_inc(&pd->usecnt);
1072 atomic_set(&mr->usecnt, 0);
1077 EXPORT_SYMBOL(ib_alloc_fast_reg_mr);
1079 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
1080 int max_page_list_len)
1082 struct ib_fast_reg_page_list *page_list;
1084 if (!device->alloc_fast_reg_page_list)
1085 return ERR_PTR(-ENOSYS);
1087 page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
1089 if (!IS_ERR(page_list)) {
1090 page_list->device = device;
1091 page_list->max_page_list_len = max_page_list_len;
1096 EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
1098 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1100 page_list->device->free_fast_reg_page_list(page_list);
1102 EXPORT_SYMBOL(ib_free_fast_reg_page_list);
1104 /* Memory windows */
1106 struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
1110 if (!pd->device->alloc_mw)
1111 return ERR_PTR(-ENOSYS);
1113 mw = pd->device->alloc_mw(pd, type);
1115 mw->device = pd->device;
1119 atomic_inc(&pd->usecnt);
1124 EXPORT_SYMBOL(ib_alloc_mw);
1126 int ib_dealloc_mw(struct ib_mw *mw)
1132 ret = mw->device->dealloc_mw(mw);
1134 atomic_dec(&pd->usecnt);
1138 EXPORT_SYMBOL(ib_dealloc_mw);
1140 /* "Fast" memory regions */
1142 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1143 int mr_access_flags,
1144 struct ib_fmr_attr *fmr_attr)
1148 if (!pd->device->alloc_fmr)
1149 return ERR_PTR(-ENOSYS);
1151 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1153 fmr->device = pd->device;
1155 atomic_inc(&pd->usecnt);
1160 EXPORT_SYMBOL(ib_alloc_fmr);
1162 int ib_unmap_fmr(struct list_head *fmr_list)
1166 if (list_empty(fmr_list))
1169 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1170 return fmr->device->unmap_fmr(fmr_list);
1172 EXPORT_SYMBOL(ib_unmap_fmr);
1174 int ib_dealloc_fmr(struct ib_fmr *fmr)
1180 ret = fmr->device->dealloc_fmr(fmr);
1182 atomic_dec(&pd->usecnt);
1186 EXPORT_SYMBOL(ib_dealloc_fmr);
1188 /* Multicast groups */
1190 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1194 if (!qp->device->attach_mcast)
1196 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1199 ret = qp->device->attach_mcast(qp, gid, lid);
1201 atomic_inc(&qp->usecnt);
1204 EXPORT_SYMBOL(ib_attach_mcast);
1206 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1210 if (!qp->device->detach_mcast)
1212 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1215 ret = qp->device->detach_mcast(qp, gid, lid);
1217 atomic_dec(&qp->usecnt);
1220 EXPORT_SYMBOL(ib_detach_mcast);
1222 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1224 struct ib_xrcd *xrcd;
1226 if (!device->alloc_xrcd)
1227 return ERR_PTR(-ENOSYS);
1229 xrcd = device->alloc_xrcd(device, NULL, NULL);
1230 if (!IS_ERR(xrcd)) {
1231 xrcd->device = device;
1233 atomic_set(&xrcd->usecnt, 0);
1234 mutex_init(&xrcd->tgt_qp_mutex);
1235 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
1240 EXPORT_SYMBOL(ib_alloc_xrcd);
1242 int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1247 if (atomic_read(&xrcd->usecnt))
1250 while (!list_empty(&xrcd->tgt_qp_list)) {
1251 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1252 ret = ib_destroy_qp(qp);
1257 return xrcd->device->dealloc_xrcd(xrcd);
1259 EXPORT_SYMBOL(ib_dealloc_xrcd);
1261 struct ib_flow *ib_create_flow(struct ib_qp *qp,
1262 struct ib_flow_attr *flow_attr,
1265 struct ib_flow *flow_id;
1266 if (!qp->device->create_flow)
1267 return ERR_PTR(-ENOSYS);
1269 flow_id = qp->device->create_flow(qp, flow_attr, domain);
1270 if (!IS_ERR(flow_id))
1271 atomic_inc(&qp->usecnt);
1274 EXPORT_SYMBOL(ib_create_flow);
1276 int ib_destroy_flow(struct ib_flow *flow_id)
1279 struct ib_qp *qp = flow_id->qp;
1281 err = qp->device->destroy_flow(flow_id);
1283 atomic_dec(&qp->usecnt);
1286 EXPORT_SYMBOL(ib_destroy_flow);