3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <rdma/ib_mad.h>
52 #include <rdma/ib_user_verbs.h>
54 #include <linux/module.h>
55 #include <linux/utsname.h>
56 #include <linux/rculist.h>
58 #include <linux/random.h>
59 #include <linux/vmalloc.h>
68 unsigned int hfi1_lkey_table_size = 16;
69 module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
71 MODULE_PARM_DESC(lkey_table_size,
72 "LKEY table size in bits (2^n, 1 <= n <= 23)");
74 static unsigned int hfi1_max_pds = 0xFFFF;
75 module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO);
76 MODULE_PARM_DESC(max_pds,
77 "Maximum number of protection domains to support");
79 static unsigned int hfi1_max_ahs = 0xFFFF;
80 module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO);
81 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
83 unsigned int hfi1_max_cqes = 0x2FFFF;
84 module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO);
85 MODULE_PARM_DESC(max_cqes,
86 "Maximum number of completion queue entries to support");
88 unsigned int hfi1_max_cqs = 0x1FFFF;
89 module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO);
90 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
92 unsigned int hfi1_max_qp_wrs = 0x3FFF;
93 module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO);
94 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
96 unsigned int hfi1_max_qps = 16384;
97 module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO);
98 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
100 unsigned int hfi1_max_sges = 0x60;
101 module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO);
102 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
104 unsigned int hfi1_max_mcast_grps = 16384;
105 module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO);
106 MODULE_PARM_DESC(max_mcast_grps,
107 "Maximum number of multicast groups to support");
109 unsigned int hfi1_max_mcast_qp_attached = 16;
110 module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached,
112 MODULE_PARM_DESC(max_mcast_qp_attached,
113 "Maximum number of attached QPs to support");
115 unsigned int hfi1_max_srqs = 1024;
116 module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO);
117 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
119 unsigned int hfi1_max_srq_sges = 128;
120 module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO);
121 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
123 unsigned int hfi1_max_srq_wrs = 0x1FFFF;
124 module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO);
125 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
127 static void verbs_sdma_complete(
128 struct sdma_txreq *cookie,
132 /* Length of buffer to create verbs txreq cache name */
133 #define TXREQ_NAME_LEN 24
136 * Note that it is OK to post send work requests in the SQE and ERR
137 * states; hfi1_do_send() will process them and generate error
138 * completions as per IB 1.2 C10-96.
140 const int ib_hfi1_state_ops[IB_QPS_ERR + 1] = {
142 [IB_QPS_INIT] = HFI1_POST_RECV_OK,
143 [IB_QPS_RTR] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK,
144 [IB_QPS_RTS] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK |
145 HFI1_POST_SEND_OK | HFI1_PROCESS_SEND_OK |
146 HFI1_PROCESS_NEXT_SEND_OK,
147 [IB_QPS_SQD] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK |
148 HFI1_POST_SEND_OK | HFI1_PROCESS_SEND_OK,
149 [IB_QPS_SQE] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK |
150 HFI1_POST_SEND_OK | HFI1_FLUSH_SEND,
151 [IB_QPS_ERR] = HFI1_POST_RECV_OK | HFI1_FLUSH_RECV |
152 HFI1_POST_SEND_OK | HFI1_FLUSH_SEND,
155 struct hfi1_ucontext {
156 struct ib_ucontext ibucontext;
159 static inline struct hfi1_ucontext *to_iucontext(struct ib_ucontext
162 return container_of(ibucontext, struct hfi1_ucontext, ibucontext);
166 * Translate ib_wr_opcode into ib_wc_opcode.
168 const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
169 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
170 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
171 [IB_WR_SEND] = IB_WC_SEND,
172 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
173 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
174 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
175 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
179 * Length of header by opcode, 0 --> not supported
181 const u8 hdr_len_by_opcode[256] = {
183 [IB_OPCODE_RC_SEND_FIRST] = 12 + 8,
184 [IB_OPCODE_RC_SEND_MIDDLE] = 12 + 8,
185 [IB_OPCODE_RC_SEND_LAST] = 12 + 8,
186 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
187 [IB_OPCODE_RC_SEND_ONLY] = 12 + 8,
188 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
189 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
190 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = 12 + 8,
191 [IB_OPCODE_RC_RDMA_WRITE_LAST] = 12 + 8,
192 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
193 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
194 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
195 [IB_OPCODE_RC_RDMA_READ_REQUEST] = 12 + 8 + 16,
196 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = 12 + 8 + 4,
197 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = 12 + 8,
198 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = 12 + 8 + 4,
199 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = 12 + 8 + 4,
200 [IB_OPCODE_RC_ACKNOWLEDGE] = 12 + 8 + 4,
201 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4,
202 [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28,
203 [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
205 [IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
206 [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
207 [IB_OPCODE_UC_SEND_LAST] = 12 + 8,
208 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
209 [IB_OPCODE_UC_SEND_ONLY] = 12 + 8,
210 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
211 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
212 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = 12 + 8,
213 [IB_OPCODE_UC_RDMA_WRITE_LAST] = 12 + 8,
214 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
215 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
216 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
218 [IB_OPCODE_UD_SEND_ONLY] = 12 + 8 + 8,
219 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 12
222 static const opcode_handler opcode_handler_tbl[256] = {
224 [IB_OPCODE_RC_SEND_FIRST] = &hfi1_rc_rcv,
225 [IB_OPCODE_RC_SEND_MIDDLE] = &hfi1_rc_rcv,
226 [IB_OPCODE_RC_SEND_LAST] = &hfi1_rc_rcv,
227 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
228 [IB_OPCODE_RC_SEND_ONLY] = &hfi1_rc_rcv,
229 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
230 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = &hfi1_rc_rcv,
231 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = &hfi1_rc_rcv,
232 [IB_OPCODE_RC_RDMA_WRITE_LAST] = &hfi1_rc_rcv,
233 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
234 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = &hfi1_rc_rcv,
235 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
236 [IB_OPCODE_RC_RDMA_READ_REQUEST] = &hfi1_rc_rcv,
237 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = &hfi1_rc_rcv,
238 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = &hfi1_rc_rcv,
239 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = &hfi1_rc_rcv,
240 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = &hfi1_rc_rcv,
241 [IB_OPCODE_RC_ACKNOWLEDGE] = &hfi1_rc_rcv,
242 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = &hfi1_rc_rcv,
243 [IB_OPCODE_RC_COMPARE_SWAP] = &hfi1_rc_rcv,
244 [IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv,
246 [IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv,
247 [IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv,
248 [IB_OPCODE_UC_SEND_LAST] = &hfi1_uc_rcv,
249 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
250 [IB_OPCODE_UC_SEND_ONLY] = &hfi1_uc_rcv,
251 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
252 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = &hfi1_uc_rcv,
253 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = &hfi1_uc_rcv,
254 [IB_OPCODE_UC_RDMA_WRITE_LAST] = &hfi1_uc_rcv,
255 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
256 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = &hfi1_uc_rcv,
257 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
259 [IB_OPCODE_UD_SEND_ONLY] = &hfi1_ud_rcv,
260 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_ud_rcv,
262 [IB_OPCODE_CNP] = &hfi1_cnp_rcv
268 __be64 ib_hfi1_sys_image_guid;
271 * hfi1_copy_sge - copy data to SGE memory
273 * @data: the data to copy
274 * @length: the length of the data
277 struct hfi1_sge_state *ss,
278 void *data, u32 length,
281 struct hfi1_sge *sge = &ss->sge;
284 u32 len = sge->length;
288 if (len > sge->sge_length)
289 len = sge->sge_length;
290 WARN_ON_ONCE(len == 0);
291 memcpy(sge->vaddr, data, len);
294 sge->sge_length -= len;
295 if (sge->sge_length == 0) {
297 hfi1_put_mr(sge->mr);
299 *sge = *ss->sg_list++;
300 } else if (sge->length == 0 && sge->mr->lkey) {
301 if (++sge->n >= HFI1_SEGSZ) {
302 if (++sge->m >= sge->mr->mapsz)
307 sge->mr->map[sge->m]->segs[sge->n].vaddr;
309 sge->mr->map[sge->m]->segs[sge->n].length;
317 * hfi1_skip_sge - skip over SGE memory
319 * @length: the number of bytes to skip
321 void hfi1_skip_sge(struct hfi1_sge_state *ss, u32 length, int release)
323 struct hfi1_sge *sge = &ss->sge;
326 u32 len = sge->length;
330 if (len > sge->sge_length)
331 len = sge->sge_length;
332 WARN_ON_ONCE(len == 0);
335 sge->sge_length -= len;
336 if (sge->sge_length == 0) {
338 hfi1_put_mr(sge->mr);
340 *sge = *ss->sg_list++;
341 } else if (sge->length == 0 && sge->mr->lkey) {
342 if (++sge->n >= HFI1_SEGSZ) {
343 if (++sge->m >= sge->mr->mapsz)
348 sge->mr->map[sge->m]->segs[sge->n].vaddr;
350 sge->mr->map[sge->m]->segs[sge->n].length;
357 * post_one_send - post one RC, UC, or UD send work request
358 * @qp: the QP to post on
359 * @wr: the work request to send
361 static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr)
363 struct hfi1_swqe *wqe;
368 struct hfi1_lkey_table *rkt;
370 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
371 struct hfi1_pportdata *ppd;
372 struct hfi1_ibport *ibp;
374 /* IB spec says that num_sge == 0 is OK. */
375 if (unlikely(wr->num_sge > qp->s_max_sge))
378 ppd = &dd->pport[qp->port_num - 1];
379 ibp = &ppd->ibport_data;
382 * Don't allow RDMA reads or atomic operations on UC or
383 * undefined operations.
384 * Make sure buffer is large enough to hold the result for atomics.
386 if (wr->opcode == IB_WR_FAST_REG_MR) {
388 } else if (qp->ibqp.qp_type == IB_QPT_UC) {
389 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
391 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
392 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
393 if (wr->opcode != IB_WR_SEND &&
394 wr->opcode != IB_WR_SEND_WITH_IMM)
396 /* Check UD destination address PD */
397 if (qp->ibqp.pd != wr->wr.ud.ah->pd)
399 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
401 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
403 wr->sg_list[0].length < sizeof(u64) ||
404 wr->sg_list[0].addr & (sizeof(u64) - 1)))
406 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
409 next = qp->s_head + 1;
410 if (next >= qp->s_size)
412 if (next == qp->s_last)
415 rkt = &to_idev(qp->ibqp.device)->lk_table;
416 pd = to_ipd(qp->ibqp.pd);
417 wqe = get_swqe_ptr(qp, qp->s_head);
422 acc = wr->opcode >= IB_WR_RDMA_READ ?
423 IB_ACCESS_LOCAL_WRITE : 0;
424 for (i = 0; i < wr->num_sge; i++) {
425 u32 length = wr->sg_list[i].length;
430 ok = hfi1_lkey_ok(rkt, pd, &wqe->sg_list[j],
431 &wr->sg_list[i], acc);
433 goto bail_inval_free;
434 wqe->length += length;
439 if (qp->ibqp.qp_type == IB_QPT_UC ||
440 qp->ibqp.qp_type == IB_QPT_RC) {
441 if (wqe->length > 0x80000000U)
442 goto bail_inval_free;
444 struct hfi1_ah *ah = to_iah(wr->wr.ud.ah);
446 atomic_inc(&ah->refcount);
448 wqe->ssn = qp->s_ssn++;
454 /* release mr holds */
456 struct hfi1_sge *sge = &wqe->sg_list[--j];
458 hfi1_put_mr(sge->mr);
464 * post_send - post a send on a QP
465 * @ibqp: the QP to post the send on
466 * @wr: the list of work requests to post
467 * @bad_wr: the first bad WR is put here
469 * This may be called from interrupt context.
471 static int post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
472 struct ib_send_wr **bad_wr)
474 struct hfi1_qp *qp = to_iqp(ibqp);
480 spin_lock_irqsave(&qp->s_lock, flags);
482 /* Check that state is OK to post send. */
483 if (unlikely(!(ib_hfi1_state_ops[qp->state] & HFI1_POST_SEND_OK))) {
484 spin_unlock_irqrestore(&qp->s_lock, flags);
488 /* sq empty and not list -> call send */
489 call_send = qp->s_head == qp->s_last && !wr->next;
491 for (; wr; wr = wr->next) {
492 err = post_one_send(qp, wr);
500 if (nreq && !call_send)
501 hfi1_schedule_send(qp);
502 spin_unlock_irqrestore(&qp->s_lock, flags);
503 if (nreq && call_send)
504 hfi1_do_send(&qp->s_iowait.iowork);
509 * post_receive - post a receive on a QP
510 * @ibqp: the QP to post the receive on
511 * @wr: the WR to post
512 * @bad_wr: the first bad WR is put here
514 * This may be called from interrupt context.
516 static int post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
517 struct ib_recv_wr **bad_wr)
519 struct hfi1_qp *qp = to_iqp(ibqp);
520 struct hfi1_rwq *wq = qp->r_rq.wq;
524 /* Check that state is OK to post receive. */
525 if (!(ib_hfi1_state_ops[qp->state] & HFI1_POST_RECV_OK) || !wq) {
531 for (; wr; wr = wr->next) {
532 struct hfi1_rwqe *wqe;
536 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
542 spin_lock_irqsave(&qp->r_rq.lock, flags);
544 if (next >= qp->r_rq.size)
546 if (next == wq->tail) {
547 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
553 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
554 wqe->wr_id = wr->wr_id;
555 wqe->num_sge = wr->num_sge;
556 for (i = 0; i < wr->num_sge; i++)
557 wqe->sg_list[i] = wr->sg_list[i];
558 /* Make sure queue entry is written before the head index. */
561 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
570 * Make sure the QP is ready and able to accept the given opcode.
572 static inline int qp_ok(int opcode, struct hfi1_packet *packet)
574 struct hfi1_ibport *ibp;
576 if (!(ib_hfi1_state_ops[packet->qp->state] & HFI1_PROCESS_RECV_OK))
578 if (((opcode & OPCODE_QP_MASK) == packet->qp->allowed_ops) ||
579 (opcode == IB_OPCODE_CNP))
582 ibp = &packet->rcd->ppd->ibport_data;
589 * hfi1_ib_rcv - process an incoming packet
590 * @packet: data packet information
592 * This is called to process an incoming packet at interrupt level.
594 * Tlen is the length of the header + data + CRC in bytes.
596 void hfi1_ib_rcv(struct hfi1_packet *packet)
598 struct hfi1_ctxtdata *rcd = packet->rcd;
599 struct hfi1_ib_header *hdr = packet->hdr;
600 u32 tlen = packet->tlen;
601 struct hfi1_pportdata *ppd = rcd->ppd;
602 struct hfi1_ibport *ibp = &ppd->ibport_data;
610 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
611 if (lnh == HFI1_LRH_BTH)
612 packet->ohdr = &hdr->u.oth;
613 else if (lnh == HFI1_LRH_GRH) {
616 packet->ohdr = &hdr->u.l.oth;
617 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
619 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
620 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
622 packet->rcv_flags |= HFI1_HAS_GRH;
626 trace_input_ibhdr(rcd->dd, hdr);
628 opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
629 inc_opstats(tlen, &rcd->opstats->stats[opcode]);
631 /* Get the destination QP number. */
632 qp_num = be32_to_cpu(packet->ohdr->bth[1]) & HFI1_QPN_MASK;
633 lid = be16_to_cpu(hdr->lrh[1]);
634 if (unlikely((lid >= HFI1_MULTICAST_LID_BASE) &&
635 (lid != HFI1_PERMISSIVE_LID))) {
636 struct hfi1_mcast *mcast;
637 struct hfi1_mcast_qp *p;
639 if (lnh != HFI1_LRH_GRH)
641 mcast = hfi1_mcast_find(ibp, &hdr->u.l.grh.dgid);
644 list_for_each_entry_rcu(p, &mcast->qp_list, list) {
646 spin_lock_irqsave(&packet->qp->r_lock, flags);
647 if (likely((qp_ok(opcode, packet))))
648 opcode_handler_tbl[opcode](packet);
649 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
652 * Notify hfi1_multicast_detach() if it is waiting for us
655 if (atomic_dec_return(&mcast->refcount) <= 1)
656 wake_up(&mcast->wait);
659 packet->qp = hfi1_lookup_qpn(ibp, qp_num);
664 spin_lock_irqsave(&packet->qp->r_lock, flags);
665 if (likely((qp_ok(opcode, packet))))
666 opcode_handler_tbl[opcode](packet);
667 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
677 * This is called from a timer to check for QPs
678 * which need kernel memory in order to send a packet.
680 static void mem_timer(unsigned long data)
682 struct hfi1_ibdev *dev = (struct hfi1_ibdev *)data;
683 struct list_head *list = &dev->memwait;
684 struct hfi1_qp *qp = NULL;
688 write_seqlock_irqsave(&dev->iowait_lock, flags);
689 if (!list_empty(list)) {
690 wait = list_first_entry(list, struct iowait, list);
691 qp = container_of(wait, struct hfi1_qp, s_iowait);
692 list_del_init(&qp->s_iowait.list);
693 /* refcount held until actual wake up */
694 if (!list_empty(list))
695 mod_timer(&dev->mem_timer, jiffies + 1);
697 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
700 hfi1_qp_wakeup(qp, HFI1_S_WAIT_KMEM);
703 void update_sge(struct hfi1_sge_state *ss, u32 length)
705 struct hfi1_sge *sge = &ss->sge;
707 sge->vaddr += length;
708 sge->length -= length;
709 sge->sge_length -= length;
710 if (sge->sge_length == 0) {
712 *sge = *ss->sg_list++;
713 } else if (sge->length == 0 && sge->mr->lkey) {
714 if (++sge->n >= HFI1_SEGSZ) {
715 if (++sge->m >= sge->mr->mapsz)
719 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
720 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
724 static noinline struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
727 struct verbs_txreq *tx;
730 tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
732 spin_lock_irqsave(&qp->s_lock, flags);
733 write_seqlock(&dev->iowait_lock);
734 if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK &&
735 list_empty(&qp->s_iowait.list)) {
737 qp->s_flags |= HFI1_S_WAIT_TX;
738 list_add_tail(&qp->s_iowait.list, &dev->txwait);
739 trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TX);
740 atomic_inc(&qp->refcount);
742 qp->s_flags &= ~HFI1_S_BUSY;
743 write_sequnlock(&dev->iowait_lock);
744 spin_unlock_irqrestore(&qp->s_lock, flags);
745 tx = ERR_PTR(-EBUSY);
750 static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
753 struct verbs_txreq *tx;
755 tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
757 /* call slow path to get the lock */
758 tx = __get_txreq(dev, qp);
766 void hfi1_put_txreq(struct verbs_txreq *tx)
768 struct hfi1_ibdev *dev;
774 dev = to_idev(qp->ibqp.device);
780 sdma_txclean(dd_from_dev(dev), &tx->txreq);
782 /* Free verbs_txreq and return to slab cache */
783 kmem_cache_free(dev->verbs_txreq_cache, tx);
786 seq = read_seqbegin(&dev->iowait_lock);
787 if (!list_empty(&dev->txwait)) {
790 write_seqlock_irqsave(&dev->iowait_lock, flags);
791 /* Wake up first QP wanting a free struct */
792 wait = list_first_entry(&dev->txwait, struct iowait,
794 qp = container_of(wait, struct hfi1_qp, s_iowait);
795 list_del_init(&qp->s_iowait.list);
796 /* refcount held until actual wake up */
797 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
798 hfi1_qp_wakeup(qp, HFI1_S_WAIT_TX);
801 } while (read_seqretry(&dev->iowait_lock, seq));
805 * This is called with progress side lock held.
808 static void verbs_sdma_complete(
809 struct sdma_txreq *cookie,
813 struct verbs_txreq *tx =
814 container_of(cookie, struct verbs_txreq, txreq);
815 struct hfi1_qp *qp = tx->qp;
817 spin_lock(&qp->s_lock);
819 hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
820 else if (qp->ibqp.qp_type == IB_QPT_RC) {
821 struct hfi1_ib_header *hdr;
824 hfi1_rc_send_complete(qp, hdr);
828 * This happens when the send engine notes
829 * a QP in the error state and cannot
830 * do the flush work until that QP's
831 * sdma work has finished.
833 if (qp->s_flags & HFI1_S_WAIT_DMA) {
834 qp->s_flags &= ~HFI1_S_WAIT_DMA;
835 hfi1_schedule_send(qp);
838 spin_unlock(&qp->s_lock);
843 static int wait_kmem(struct hfi1_ibdev *dev, struct hfi1_qp *qp)
848 spin_lock_irqsave(&qp->s_lock, flags);
849 if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) {
850 write_seqlock(&dev->iowait_lock);
851 if (list_empty(&qp->s_iowait.list)) {
852 if (list_empty(&dev->memwait))
853 mod_timer(&dev->mem_timer, jiffies + 1);
854 qp->s_flags |= HFI1_S_WAIT_KMEM;
855 list_add_tail(&qp->s_iowait.list, &dev->memwait);
856 trace_hfi1_qpsleep(qp, HFI1_S_WAIT_KMEM);
857 atomic_inc(&qp->refcount);
859 write_sequnlock(&dev->iowait_lock);
860 qp->s_flags &= ~HFI1_S_BUSY;
863 spin_unlock_irqrestore(&qp->s_lock, flags);
869 * This routine calls txadds for each sg entry.
871 * Add failures will revert the sge cursor
873 static int build_verbs_ulp_payload(
874 struct sdma_engine *sde,
875 struct hfi1_sge_state *ss,
877 struct verbs_txreq *tx)
879 struct hfi1_sge *sg_list = ss->sg_list;
880 struct hfi1_sge sge = ss->sge;
881 u8 num_sge = ss->num_sge;
886 len = ss->sge.length;
889 if (len > ss->sge.sge_length)
890 len = ss->sge.sge_length;
891 WARN_ON_ONCE(len == 0);
892 ret = sdma_txadd_kvaddr(
906 ss->num_sge = num_sge;
907 ss->sg_list = sg_list;
912 * Build the number of DMA descriptors needed to send length bytes of data.
914 * NOTE: DMA mapping is held in the tx until completed in the ring or
915 * the tx desc is freed without having been submitted to the ring
917 * This routine insures the following all the helper routine
921 static int build_verbs_tx_desc(
922 struct sdma_engine *sde,
923 struct hfi1_sge_state *ss,
925 struct verbs_txreq *tx,
926 struct ahg_ib_header *ahdr,
930 struct hfi1_pio_header *phdr;
931 u16 hdrbytes = tx->hdr_dwords << 2;
934 if (!ahdr->ahgcount) {
935 ret = sdma_txinit_ahg(
943 verbs_sdma_complete);
946 phdr->pbc = cpu_to_le64(pbc);
947 memcpy(&phdr->hdr, &ahdr->ibh, hdrbytes - sizeof(phdr->pbc));
949 ret = sdma_txadd_kvaddr(
953 tx->hdr_dwords << 2);
957 struct hfi1_other_headers *sohdr = &ahdr->ibh.u.oth;
958 struct hfi1_other_headers *dohdr = &phdr->hdr.u.oth;
960 /* needed in rc_send_complete() */
961 phdr->hdr.lrh[0] = ahdr->ibh.lrh[0];
962 if ((be16_to_cpu(phdr->hdr.lrh[0]) & 3) == HFI1_LRH_GRH) {
963 sohdr = &ahdr->ibh.u.l.oth;
964 dohdr = &phdr->hdr.u.l.oth;
967 dohdr->bth[0] = sohdr->bth[0];
969 dohdr->bth[2] = sohdr->bth[2];
970 ret = sdma_txinit_ahg(
978 verbs_sdma_complete);
983 /* add the ulp payload - if any. ss can be NULL for acks */
985 ret = build_verbs_ulp_payload(sde, ss, length, tx);
990 int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
991 u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
992 u32 plen, u32 dwords, u64 pbc)
994 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
995 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
996 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
997 struct verbs_txreq *tx;
998 struct sdma_txreq *stx;
1000 struct sdma_engine *sde;
1004 if (!list_empty(&qp->s_iowait.tx_head)) {
1005 stx = list_first_entry(
1006 &qp->s_iowait.tx_head,
1009 list_del_init(&stx->list);
1010 tx = container_of(stx, struct verbs_txreq, txreq);
1011 ret = sdma_send_txreq(tx->sde, &qp->s_iowait, stx);
1012 if (unlikely(ret == -ECOMM))
1017 tx = get_txreq(dev, qp);
1021 if (!qp->s_hdr->sde) {
1022 tx->sde = sde = qp_to_sdma_engine(qp, sc5);
1026 tx->sde = sde = qp->s_hdr->sde;
1028 if (likely(pbc == 0)) {
1029 u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
1031 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
1032 pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
1034 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
1036 tx->wqe = qp->s_wqe;
1037 tx->mr = qp->s_rdma_mr;
1039 qp->s_rdma_mr = NULL;
1040 tx->hdr_dwords = hdrwords + 2;
1041 ret = build_verbs_tx_desc(sde, ss, len, tx, ahdr, pbc);
1044 trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh);
1045 ret = sdma_send_txreq(sde, &qp->s_iowait, &tx->txreq);
1046 if (unlikely(ret == -ECOMM))
1053 /* The current one got "sent" */
1056 /* kmalloc or mapping fail */
1058 return wait_kmem(dev, qp);
1064 * If we are now in the error state, return zero to flush the
1065 * send work request.
1067 static int no_bufs_available(struct hfi1_qp *qp, struct send_context *sc)
1069 struct hfi1_devdata *dd = sc->dd;
1070 struct hfi1_ibdev *dev = &dd->verbs_dev;
1071 unsigned long flags;
1075 * Note that as soon as want_buffer() is called and
1076 * possibly before it returns, sc_piobufavail()
1077 * could be called. Therefore, put QP on the I/O wait list before
1078 * enabling the PIO avail interrupt.
1080 spin_lock_irqsave(&qp->s_lock, flags);
1081 if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) {
1082 write_seqlock(&dev->iowait_lock);
1083 if (list_empty(&qp->s_iowait.list)) {
1084 struct hfi1_ibdev *dev = &dd->verbs_dev;
1088 qp->s_flags |= HFI1_S_WAIT_PIO;
1089 was_empty = list_empty(&sc->piowait);
1090 list_add_tail(&qp->s_iowait.list, &sc->piowait);
1091 trace_hfi1_qpsleep(qp, HFI1_S_WAIT_PIO);
1092 atomic_inc(&qp->refcount);
1093 /* counting: only call wantpiobuf_intr if first user */
1095 hfi1_sc_wantpiobuf_intr(sc, 1);
1097 write_sequnlock(&dev->iowait_lock);
1098 qp->s_flags &= ~HFI1_S_BUSY;
1101 spin_unlock_irqrestore(&qp->s_lock, flags);
1105 struct send_context *qp_to_send_context(struct hfi1_qp *qp, u8 sc5)
1107 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1108 struct hfi1_pportdata *ppd = dd->pport + (qp->port_num - 1);
1111 vl = sc_to_vlt(dd, sc5);
1112 if (vl >= ppd->vls_supported && vl != 15)
1114 return dd->vld[vl].sc;
1117 int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
1118 u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
1119 u32 plen, u32 dwords, u64 pbc)
1121 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1122 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1123 u32 *hdr = (u32 *)&ahdr->ibh;
1126 unsigned long flags = 0;
1127 struct send_context *sc;
1128 struct pio_buf *pbuf;
1129 int wc_status = IB_WC_SUCCESS;
1131 /* vl15 special case taken care of in ud.c */
1133 sc = qp_to_send_context(qp, sc5);
1137 if (likely(pbc == 0)) {
1138 u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
1139 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
1140 pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
1141 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
1143 pbuf = sc_buffer_alloc(sc, plen, NULL, NULL);
1144 if (unlikely(pbuf == NULL)) {
1145 if (ppd->host_link_state != HLS_UP_ACTIVE) {
1147 * If we have filled the PIO buffers to capacity and are
1148 * not in an active state this request is not going to
1149 * go out to so just complete it with an error or else a
1150 * ULP or the core may be stuck waiting.
1154 "alloc failed. state not active, completing");
1155 wc_status = IB_WC_GENERAL_ERR;
1159 * This is a normal occurrence. The PIO buffs are full
1160 * up but we are still happily sending, well we could be
1161 * so lets continue to queue the request.
1163 hfi1_cdbg(PIO, "alloc failed. state active, queuing");
1164 return no_bufs_available(qp, sc);
1169 pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords);
1172 seg_pio_copy_start(pbuf, pbc, hdr, hdrwords*4);
1174 void *addr = ss->sge.vaddr;
1175 u32 slen = ss->sge.length;
1179 update_sge(ss, slen);
1180 seg_pio_copy_mid(pbuf, addr, slen);
1183 seg_pio_copy_end(pbuf);
1187 trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh);
1189 if (qp->s_rdma_mr) {
1190 hfi1_put_mr(qp->s_rdma_mr);
1191 qp->s_rdma_mr = NULL;
1196 spin_lock_irqsave(&qp->s_lock, flags);
1197 hfi1_send_complete(qp, qp->s_wqe, wc_status);
1198 spin_unlock_irqrestore(&qp->s_lock, flags);
1199 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1200 spin_lock_irqsave(&qp->s_lock, flags);
1201 hfi1_rc_send_complete(qp, &ahdr->ibh);
1202 spin_unlock_irqrestore(&qp->s_lock, flags);
1208 * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1209 * being an entry from the ingress partition key table), return 0
1210 * otherwise. Use the matching criteria for egress partition keys
1211 * specified in the OPAv1 spec., section 9.1l.7.
1213 static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
1215 u16 mkey = pkey & PKEY_LOW_15_MASK;
1216 u16 ment = ent & PKEY_LOW_15_MASK;
1220 * If pkey[15] is set (full partition member),
1221 * is bit 15 in the corresponding table element
1222 * clear (limited member)?
1224 if (pkey & PKEY_MEMBER_MASK)
1225 return !!(ent & PKEY_MEMBER_MASK);
1232 * egress_pkey_check - return 0 if hdr's pkey matches according to the
1233 * criteria in the OPAv1 spec., section 9.11.7.
1235 static inline int egress_pkey_check(struct hfi1_pportdata *ppd,
1236 struct hfi1_ib_header *hdr,
1239 struct hfi1_other_headers *ohdr;
1240 struct hfi1_devdata *dd;
1243 u8 lnh, sc5 = qp->s_sc;
1245 if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT))
1248 /* locate the pkey within the headers */
1249 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
1250 if (lnh == HFI1_LRH_GRH)
1251 ohdr = &hdr->u.l.oth;
1255 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
1257 /* If SC15, pkey[0:14] must be 0x7fff */
1258 if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
1262 /* Is the pkey = 0x0, or 0x8000? */
1263 if ((pkey & PKEY_LOW_15_MASK) == 0)
1266 /* The most likely matching pkey has index qp->s_pkey_index */
1267 if (unlikely(!egress_pkey_matches_entry(pkey,
1268 ppd->pkeys[qp->s_pkey_index]))) {
1269 /* no match - try the entire table */
1270 for (; i < MAX_PKEY_VALUES; i++) {
1271 if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
1276 if (i < MAX_PKEY_VALUES)
1279 incr_cntr64(&ppd->port_xmit_constraint_errors);
1281 if (!(dd->err_info_xmit_constraint.status & OPA_EI_STATUS_SMASK)) {
1282 u16 slid = be16_to_cpu(hdr->lrh[3]);
1284 dd->err_info_xmit_constraint.status |= OPA_EI_STATUS_SMASK;
1285 dd->err_info_xmit_constraint.slid = slid;
1286 dd->err_info_xmit_constraint.pkey = pkey;
1292 * hfi1_verbs_send - send a packet
1293 * @qp: the QP to send on
1294 * @ahdr: the packet header
1295 * @hdrwords: the number of 32-bit words in the header
1296 * @ss: the SGE to send
1297 * @len: the length of the packet in bytes
1299 * Return zero if packet is sent or queued OK.
1300 * Return non-zero and clear qp->s_flags HFI1_S_BUSY otherwise.
1302 int hfi1_verbs_send(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
1303 u32 hdrwords, struct hfi1_sge_state *ss, u32 len)
1305 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1309 unsigned long flags = 0;
1310 u32 dwords = (len + 3) >> 2;
1313 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1314 * can defer SDMA restart until link goes ACTIVE without
1315 * worrying about just how we got there.
1317 if ((qp->ibqp.qp_type == IB_QPT_SMI) ||
1318 !(dd->flags & HFI1_HAS_SEND_DMA))
1321 ret = egress_pkey_check(dd->pport, &ahdr->ibh, qp);
1322 if (unlikely(ret)) {
1324 * The value we are returning here does not get propagated to
1325 * the verbs caller. Thus we need to complete the request with
1326 * error otherwise the caller could be sitting waiting on the
1327 * completion event. Only do this for PIO. SDMA has its own
1328 * mechanism for handling the errors. So for SDMA we can just
1332 hfi1_cdbg(PIO, "%s() Failed. Completing with err",
1334 spin_lock_irqsave(&qp->s_lock, flags);
1335 hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
1336 spin_unlock_irqrestore(&qp->s_lock, flags);
1342 * Calculate the send buffer trigger address.
1343 * The +2 counts for the pbc control qword
1345 plen = hdrwords + dwords + 2;
1348 ret = dd->process_pio_send(
1349 qp, ahdr, hdrwords, ss, len, plen, dwords, 0);
1351 #ifdef CONFIG_SDMA_VERBOSITY
1352 dd_dev_err(dd, "CONFIG SDMA %s:%d %s()\n",
1353 slashstrip(__FILE__), __LINE__, __func__);
1354 dd_dev_err(dd, "SDMA hdrwords = %u, len = %u\n", hdrwords, len);
1356 ret = dd->process_dma_send(
1357 qp, ahdr, hdrwords, ss, len, plen, dwords, 0);
1363 static int query_device(struct ib_device *ibdev,
1364 struct ib_device_attr *props,
1365 struct ib_udata *uhw)
1367 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1368 struct hfi1_ibdev *dev = to_idev(ibdev);
1370 if (uhw->inlen || uhw->outlen)
1372 memset(props, 0, sizeof(*props));
1374 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1375 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1376 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1377 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1379 props->page_size_cap = PAGE_SIZE;
1381 dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
1382 props->vendor_part_id = dd->pcidev->device;
1383 props->hw_ver = dd->minrev;
1384 props->sys_image_guid = ib_hfi1_sys_image_guid;
1385 props->max_mr_size = ~0ULL;
1386 props->max_qp = hfi1_max_qps;
1387 props->max_qp_wr = hfi1_max_qp_wrs;
1388 props->max_sge = hfi1_max_sges;
1389 props->max_sge_rd = hfi1_max_sges;
1390 props->max_cq = hfi1_max_cqs;
1391 props->max_ah = hfi1_max_ahs;
1392 props->max_cqe = hfi1_max_cqes;
1393 props->max_mr = dev->lk_table.max;
1394 props->max_fmr = dev->lk_table.max;
1395 props->max_map_per_fmr = 32767;
1396 props->max_pd = hfi1_max_pds;
1397 props->max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
1398 props->max_qp_init_rd_atom = 255;
1399 /* props->max_res_rd_atom */
1400 props->max_srq = hfi1_max_srqs;
1401 props->max_srq_wr = hfi1_max_srq_wrs;
1402 props->max_srq_sge = hfi1_max_srq_sges;
1403 /* props->local_ca_ack_delay */
1404 props->atomic_cap = IB_ATOMIC_GLOB;
1405 props->max_pkeys = hfi1_get_npkeys(dd);
1406 props->max_mcast_grp = hfi1_max_mcast_grps;
1407 props->max_mcast_qp_attach = hfi1_max_mcast_qp_attached;
1408 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1409 props->max_mcast_grp;
1414 static inline u16 opa_speed_to_ib(u16 in)
1418 if (in & OPA_LINK_SPEED_25G)
1419 out |= IB_SPEED_EDR;
1420 if (in & OPA_LINK_SPEED_12_5G)
1421 out |= IB_SPEED_FDR;
1427 * Convert a single OPA link width (no multiple flags) to an IB value.
1428 * A zero OPA link width means link down, which means the IB width value
1431 static inline u16 opa_width_to_ib(u16 in)
1434 case OPA_LINK_WIDTH_1X:
1435 /* map 2x and 3x to 1x as they don't exist in IB */
1436 case OPA_LINK_WIDTH_2X:
1437 case OPA_LINK_WIDTH_3X:
1439 default: /* link down or unknown, return our largest width */
1440 case OPA_LINK_WIDTH_4X:
1445 static int query_port(struct ib_device *ibdev, u8 port,
1446 struct ib_port_attr *props)
1448 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1449 struct hfi1_ibport *ibp = to_iport(ibdev, port);
1450 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1453 memset(props, 0, sizeof(*props));
1454 props->lid = lid ? lid : 0;
1455 props->lmc = ppd->lmc;
1456 props->sm_lid = ibp->sm_lid;
1457 props->sm_sl = ibp->sm_sl;
1458 /* OPA logical states match IB logical states */
1459 props->state = driver_lstate(ppd);
1460 props->phys_state = hfi1_ibphys_portstate(ppd);
1461 props->port_cap_flags = ibp->port_cap_flags;
1462 props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
1463 props->max_msg_sz = 0x80000000;
1464 props->pkey_tbl_len = hfi1_get_npkeys(dd);
1465 props->bad_pkey_cntr = ibp->pkey_violations;
1466 props->qkey_viol_cntr = ibp->qkey_violations;
1467 props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
1468 /* see rate_show() in ib core/sysfs.c */
1469 props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active);
1470 props->max_vl_num = ppd->vls_supported;
1471 props->init_type_reply = 0;
1473 /* Once we are a "first class" citizen and have added the OPA MTUs to
1474 * the core we can advertise the larger MTU enum to the ULPs, for now
1475 * advertise only 4K.
1477 * Those applications which are either OPA aware or pass the MTU enum
1478 * from the Path Records to us will get the new 8k MTU. Those that
1479 * attempt to process the MTU enum may fail in various ways.
1481 props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
1482 4096 : hfi1_max_mtu), IB_MTU_4096);
1483 props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
1484 mtu_to_enum(ppd->ibmtu, IB_MTU_2048);
1485 props->subnet_timeout = ibp->subnet_timeout;
1490 static int port_immutable(struct ib_device *ibdev, u8 port_num,
1491 struct ib_port_immutable *immutable)
1493 struct ib_port_attr attr;
1496 err = query_port(ibdev, port_num, &attr);
1500 memset(immutable, 0, sizeof(*immutable));
1502 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1503 immutable->gid_tbl_len = attr.gid_tbl_len;
1504 immutable->core_cap_flags = RDMA_CORE_PORT_INTEL_OPA;
1505 immutable->max_mad_size = OPA_MGMT_MAD_SIZE;
1510 static int modify_device(struct ib_device *device,
1511 int device_modify_mask,
1512 struct ib_device_modify *device_modify)
1514 struct hfi1_devdata *dd = dd_from_ibdev(device);
1518 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1519 IB_DEVICE_MODIFY_NODE_DESC)) {
1524 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1525 memcpy(device->node_desc, device_modify->node_desc, 64);
1526 for (i = 0; i < dd->num_pports; i++) {
1527 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1529 hfi1_node_desc_chg(ibp);
1533 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1534 ib_hfi1_sys_image_guid =
1535 cpu_to_be64(device_modify->sys_image_guid);
1536 for (i = 0; i < dd->num_pports; i++) {
1537 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1539 hfi1_sys_guid_chg(ibp);
1549 static int modify_port(struct ib_device *ibdev, u8 port,
1550 int port_modify_mask, struct ib_port_modify *props)
1552 struct hfi1_ibport *ibp = to_iport(ibdev, port);
1553 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1556 ibp->port_cap_flags |= props->set_port_cap_mask;
1557 ibp->port_cap_flags &= ~props->clr_port_cap_mask;
1558 if (props->set_port_cap_mask || props->clr_port_cap_mask)
1559 hfi1_cap_mask_chg(ibp);
1560 if (port_modify_mask & IB_PORT_SHUTDOWN) {
1561 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
1562 OPA_LINKDOWN_REASON_UNKNOWN);
1563 ret = set_link_state(ppd, HLS_DN_DOWNDEF);
1565 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1566 ibp->qkey_violations = 0;
1570 static int query_gid(struct ib_device *ibdev, u8 port,
1571 int index, union ib_gid *gid)
1573 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1576 if (!port || port > dd->num_pports)
1579 struct hfi1_ibport *ibp = to_iport(ibdev, port);
1580 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1582 gid->global.subnet_prefix = ibp->gid_prefix;
1584 gid->global.interface_id = cpu_to_be64(ppd->guid);
1585 else if (index < HFI1_GUIDS_PER_PORT)
1586 gid->global.interface_id = ibp->guids[index - 1];
1594 static struct ib_pd *alloc_pd(struct ib_device *ibdev,
1595 struct ib_ucontext *context,
1596 struct ib_udata *udata)
1598 struct hfi1_ibdev *dev = to_idev(ibdev);
1603 * This is actually totally arbitrary. Some correctness tests
1604 * assume there's a maximum number of PDs that can be allocated.
1605 * We don't actually have this limit, but we fail the test if
1606 * we allow allocations of more than we report for this value.
1609 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
1611 ret = ERR_PTR(-ENOMEM);
1615 spin_lock(&dev->n_pds_lock);
1616 if (dev->n_pds_allocated == hfi1_max_pds) {
1617 spin_unlock(&dev->n_pds_lock);
1619 ret = ERR_PTR(-ENOMEM);
1623 dev->n_pds_allocated++;
1624 spin_unlock(&dev->n_pds_lock);
1626 /* ib_alloc_pd() will initialize pd->ibpd. */
1627 pd->user = udata != NULL;
1635 static int dealloc_pd(struct ib_pd *ibpd)
1637 struct hfi1_pd *pd = to_ipd(ibpd);
1638 struct hfi1_ibdev *dev = to_idev(ibpd->device);
1640 spin_lock(&dev->n_pds_lock);
1641 dev->n_pds_allocated--;
1642 spin_unlock(&dev->n_pds_lock);
1650 * convert ah port,sl to sc
1652 u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah)
1654 struct hfi1_ibport *ibp = to_iport(ibdev, ah->port_num);
1656 return ibp->sl_to_sc[ah->sl];
1659 int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1661 struct hfi1_ibport *ibp;
1662 struct hfi1_pportdata *ppd;
1663 struct hfi1_devdata *dd;
1666 /* A multicast address requires a GRH (see ch. 8.4.1). */
1667 if (ah_attr->dlid >= HFI1_MULTICAST_LID_BASE &&
1668 ah_attr->dlid != HFI1_PERMISSIVE_LID &&
1669 !(ah_attr->ah_flags & IB_AH_GRH))
1671 if ((ah_attr->ah_flags & IB_AH_GRH) &&
1672 ah_attr->grh.sgid_index >= HFI1_GUIDS_PER_PORT)
1674 if (ah_attr->dlid == 0)
1676 if (ah_attr->port_num < 1 ||
1677 ah_attr->port_num > ibdev->phys_port_cnt)
1679 if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
1680 ib_rate_to_mbps(ah_attr->static_rate) < 0)
1682 if (ah_attr->sl >= OPA_MAX_SLS)
1684 /* test the mapping for validity */
1685 ibp = to_iport(ibdev, ah_attr->port_num);
1686 ppd = ppd_from_ibp(ibp);
1687 sc5 = ibp->sl_to_sc[ah_attr->sl];
1688 dd = dd_from_ppd(ppd);
1689 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
1697 * create_ah - create an address handle
1698 * @pd: the protection domain
1699 * @ah_attr: the attributes of the AH
1701 * This may be called from interrupt context.
1703 static struct ib_ah *create_ah(struct ib_pd *pd,
1704 struct ib_ah_attr *ah_attr)
1708 struct hfi1_ibdev *dev = to_idev(pd->device);
1709 unsigned long flags;
1711 if (hfi1_check_ah(pd->device, ah_attr)) {
1712 ret = ERR_PTR(-EINVAL);
1716 ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
1718 ret = ERR_PTR(-ENOMEM);
1722 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1723 if (dev->n_ahs_allocated == hfi1_max_ahs) {
1724 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1726 ret = ERR_PTR(-ENOMEM);
1730 dev->n_ahs_allocated++;
1731 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1733 /* ib_create_ah() will initialize ah->ibah. */
1734 ah->attr = *ah_attr;
1735 atomic_set(&ah->refcount, 0);
1743 struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid)
1745 struct ib_ah_attr attr;
1746 struct ib_ah *ah = ERR_PTR(-EINVAL);
1747 struct hfi1_qp *qp0;
1749 memset(&attr, 0, sizeof(attr));
1751 attr.port_num = ppd_from_ibp(ibp)->port;
1753 qp0 = rcu_dereference(ibp->qp[0]);
1755 ah = ib_create_ah(qp0->ibqp.pd, &attr);
1761 * destroy_ah - destroy an address handle
1762 * @ibah: the AH to destroy
1764 * This may be called from interrupt context.
1766 static int destroy_ah(struct ib_ah *ibah)
1768 struct hfi1_ibdev *dev = to_idev(ibah->device);
1769 struct hfi1_ah *ah = to_iah(ibah);
1770 unsigned long flags;
1772 if (atomic_read(&ah->refcount) != 0)
1775 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1776 dev->n_ahs_allocated--;
1777 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1784 static int modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1786 struct hfi1_ah *ah = to_iah(ibah);
1788 if (hfi1_check_ah(ibah->device, ah_attr))
1791 ah->attr = *ah_attr;
1796 static int query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1798 struct hfi1_ah *ah = to_iah(ibah);
1800 *ah_attr = ah->attr;
1806 * hfi1_get_npkeys - return the size of the PKEY table for context 0
1807 * @dd: the hfi1_ib device
1809 unsigned hfi1_get_npkeys(struct hfi1_devdata *dd)
1811 return ARRAY_SIZE(dd->pport[0].pkeys);
1814 static int query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1817 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1820 if (index >= hfi1_get_npkeys(dd)) {
1825 *pkey = hfi1_get_pkey(to_iport(ibdev, port), index);
1833 * alloc_ucontext - allocate a ucontest
1834 * @ibdev: the infiniband device
1835 * @udata: not used by the driver
1838 static struct ib_ucontext *alloc_ucontext(struct ib_device *ibdev,
1839 struct ib_udata *udata)
1841 struct hfi1_ucontext *context;
1842 struct ib_ucontext *ret;
1844 context = kmalloc(sizeof(*context), GFP_KERNEL);
1846 ret = ERR_PTR(-ENOMEM);
1850 ret = &context->ibucontext;
1856 static int dealloc_ucontext(struct ib_ucontext *context)
1858 kfree(to_iucontext(context));
1862 static void init_ibport(struct hfi1_pportdata *ppd)
1864 struct hfi1_ibport *ibp = &ppd->ibport_data;
1865 size_t sz = ARRAY_SIZE(ibp->sl_to_sc);
1868 for (i = 0; i < sz; i++) {
1869 ibp->sl_to_sc[i] = i;
1870 ibp->sc_to_sl[i] = i;
1873 spin_lock_init(&ibp->lock);
1874 /* Set the prefix to the default value (see ch. 4.1.1) */
1875 ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
1877 /* Below should only set bits defined in OPA PortInfo.CapabilityMask */
1878 ibp->port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
1879 IB_PORT_CAP_MASK_NOTICE_SUP;
1880 ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1881 ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1882 ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1883 ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1884 ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1886 RCU_INIT_POINTER(ibp->qp[0], NULL);
1887 RCU_INIT_POINTER(ibp->qp[1], NULL);
1890 static void verbs_txreq_kmem_cache_ctor(void *obj)
1892 struct verbs_txreq *tx = obj;
1894 memset(tx, 0, sizeof(*tx));
1898 * hfi1_register_ib_device - register our device with the infiniband core
1899 * @dd: the device data structure
1900 * Return 0 if successful, errno if unsuccessful.
1902 int hfi1_register_ib_device(struct hfi1_devdata *dd)
1904 struct hfi1_ibdev *dev = &dd->verbs_dev;
1905 struct ib_device *ibdev = &dev->ibdev;
1906 struct hfi1_pportdata *ppd = dd->pport;
1907 unsigned i, lk_tab_size;
1909 size_t lcpysz = IB_DEVICE_NAME_MAX;
1911 char buf[TXREQ_NAME_LEN];
1913 ret = hfi1_qp_init(dev);
1918 for (i = 0; i < dd->num_pports; i++)
1919 init_ibport(ppd + i);
1921 /* Only need to initialize non-zero fields. */
1922 spin_lock_init(&dev->n_pds_lock);
1923 spin_lock_init(&dev->n_ahs_lock);
1924 spin_lock_init(&dev->n_cqs_lock);
1925 spin_lock_init(&dev->n_qps_lock);
1926 spin_lock_init(&dev->n_srqs_lock);
1927 spin_lock_init(&dev->n_mcast_grps_lock);
1928 init_timer(&dev->mem_timer);
1929 dev->mem_timer.function = mem_timer;
1930 dev->mem_timer.data = (unsigned long) dev;
1933 * The top hfi1_lkey_table_size bits are used to index the
1934 * table. The lower 8 bits can be owned by the user (copied from
1935 * the LKEY). The remaining bits act as a generation number or tag.
1937 spin_lock_init(&dev->lk_table.lock);
1938 dev->lk_table.max = 1 << hfi1_lkey_table_size;
1939 /* ensure generation is at least 4 bits (keys.c) */
1940 if (hfi1_lkey_table_size > MAX_LKEY_TABLE_BITS) {
1941 dd_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
1942 hfi1_lkey_table_size, MAX_LKEY_TABLE_BITS);
1943 hfi1_lkey_table_size = MAX_LKEY_TABLE_BITS;
1945 lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
1946 dev->lk_table.table = (struct hfi1_mregion __rcu **)
1947 vmalloc(lk_tab_size);
1948 if (dev->lk_table.table == NULL) {
1952 RCU_INIT_POINTER(dev->dma_mr, NULL);
1953 for (i = 0; i < dev->lk_table.max; i++)
1954 RCU_INIT_POINTER(dev->lk_table.table[i], NULL);
1955 INIT_LIST_HEAD(&dev->pending_mmaps);
1956 spin_lock_init(&dev->pending_lock);
1957 seqlock_init(&dev->iowait_lock);
1958 dev->mmap_offset = PAGE_SIZE;
1959 spin_lock_init(&dev->mmap_offset_lock);
1960 INIT_LIST_HEAD(&dev->txwait);
1961 INIT_LIST_HEAD(&dev->memwait);
1963 descq_cnt = sdma_get_descq_cnt();
1965 snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit);
1966 /* SLAB_HWCACHE_ALIGN for AHG */
1967 dev->verbs_txreq_cache = kmem_cache_create(buf,
1968 sizeof(struct verbs_txreq),
1969 0, SLAB_HWCACHE_ALIGN,
1970 verbs_txreq_kmem_cache_ctor);
1971 if (!dev->verbs_txreq_cache) {
1973 goto err_verbs_txreq;
1977 * The system image GUID is supposed to be the same for all
1978 * HFIs in a single system but since there can be other
1979 * device types in the system, we can't be sure this is unique.
1981 if (!ib_hfi1_sys_image_guid)
1982 ib_hfi1_sys_image_guid = cpu_to_be64(ppd->guid);
1983 lcpysz = strlcpy(ibdev->name, class_name(), lcpysz);
1984 strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
1985 ibdev->owner = THIS_MODULE;
1986 ibdev->node_guid = cpu_to_be64(ppd->guid);
1987 ibdev->uverbs_abi_ver = HFI1_UVERBS_ABI_VERSION;
1988 ibdev->uverbs_cmd_mask =
1989 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1990 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1991 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1992 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1993 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1994 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
1995 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
1996 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
1997 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
1998 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1999 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2000 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2001 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2002 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
2003 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2004 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2005 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2006 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2007 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2008 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2009 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2010 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
2011 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2012 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2013 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2014 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2015 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2016 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2017 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2018 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
2019 ibdev->node_type = RDMA_NODE_IB_CA;
2020 ibdev->phys_port_cnt = dd->num_pports;
2021 ibdev->num_comp_vectors = 1;
2022 ibdev->dma_device = &dd->pcidev->dev;
2023 ibdev->query_device = query_device;
2024 ibdev->modify_device = modify_device;
2025 ibdev->query_port = query_port;
2026 ibdev->modify_port = modify_port;
2027 ibdev->query_pkey = query_pkey;
2028 ibdev->query_gid = query_gid;
2029 ibdev->alloc_ucontext = alloc_ucontext;
2030 ibdev->dealloc_ucontext = dealloc_ucontext;
2031 ibdev->alloc_pd = alloc_pd;
2032 ibdev->dealloc_pd = dealloc_pd;
2033 ibdev->create_ah = create_ah;
2034 ibdev->destroy_ah = destroy_ah;
2035 ibdev->modify_ah = modify_ah;
2036 ibdev->query_ah = query_ah;
2037 ibdev->create_srq = hfi1_create_srq;
2038 ibdev->modify_srq = hfi1_modify_srq;
2039 ibdev->query_srq = hfi1_query_srq;
2040 ibdev->destroy_srq = hfi1_destroy_srq;
2041 ibdev->create_qp = hfi1_create_qp;
2042 ibdev->modify_qp = hfi1_modify_qp;
2043 ibdev->query_qp = hfi1_query_qp;
2044 ibdev->destroy_qp = hfi1_destroy_qp;
2045 ibdev->post_send = post_send;
2046 ibdev->post_recv = post_receive;
2047 ibdev->post_srq_recv = hfi1_post_srq_receive;
2048 ibdev->create_cq = hfi1_create_cq;
2049 ibdev->destroy_cq = hfi1_destroy_cq;
2050 ibdev->resize_cq = hfi1_resize_cq;
2051 ibdev->poll_cq = hfi1_poll_cq;
2052 ibdev->req_notify_cq = hfi1_req_notify_cq;
2053 ibdev->get_dma_mr = hfi1_get_dma_mr;
2054 ibdev->reg_phys_mr = hfi1_reg_phys_mr;
2055 ibdev->reg_user_mr = hfi1_reg_user_mr;
2056 ibdev->dereg_mr = hfi1_dereg_mr;
2057 ibdev->alloc_mr = hfi1_alloc_mr;
2058 ibdev->alloc_fast_reg_page_list = hfi1_alloc_fast_reg_page_list;
2059 ibdev->free_fast_reg_page_list = hfi1_free_fast_reg_page_list;
2060 ibdev->alloc_fmr = hfi1_alloc_fmr;
2061 ibdev->map_phys_fmr = hfi1_map_phys_fmr;
2062 ibdev->unmap_fmr = hfi1_unmap_fmr;
2063 ibdev->dealloc_fmr = hfi1_dealloc_fmr;
2064 ibdev->attach_mcast = hfi1_multicast_attach;
2065 ibdev->detach_mcast = hfi1_multicast_detach;
2066 ibdev->process_mad = hfi1_process_mad;
2067 ibdev->mmap = hfi1_mmap;
2068 ibdev->dma_ops = &hfi1_dma_mapping_ops;
2069 ibdev->get_port_immutable = port_immutable;
2071 strncpy(ibdev->node_desc, init_utsname()->nodename,
2072 sizeof(ibdev->node_desc));
2074 ret = ib_register_device(ibdev, hfi1_create_port_files);
2078 ret = hfi1_create_agents(dev);
2082 ret = hfi1_verbs_register_sysfs(dd);
2089 hfi1_free_agents(dev);
2091 ib_unregister_device(ibdev);
2094 kmem_cache_destroy(dev->verbs_txreq_cache);
2095 vfree(dev->lk_table.table);
2099 dd_dev_err(dd, "cannot register verbs: %d!\n", -ret);
2104 void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
2106 struct hfi1_ibdev *dev = &dd->verbs_dev;
2107 struct ib_device *ibdev = &dev->ibdev;
2109 hfi1_verbs_unregister_sysfs(dd);
2111 hfi1_free_agents(dev);
2113 ib_unregister_device(ibdev);
2115 if (!list_empty(&dev->txwait))
2116 dd_dev_err(dd, "txwait list not empty!\n");
2117 if (!list_empty(&dev->memwait))
2118 dd_dev_err(dd, "memwait list not empty!\n");
2120 dd_dev_err(dd, "DMA MR not NULL!\n");
2123 del_timer_sync(&dev->mem_timer);
2124 kmem_cache_destroy(dev->verbs_txreq_cache);
2125 vfree(dev->lk_table.table);
2129 * This must be called with s_lock held.
2131 void hfi1_schedule_send(struct hfi1_qp *qp)
2133 if (hfi1_send_ok(qp)) {
2134 struct hfi1_ibport *ibp =
2135 to_iport(qp->ibqp.device, qp->port_num);
2136 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2138 iowait_schedule(&qp->s_iowait, ppd->hfi1_wq);
2142 void hfi1_cnp_rcv(struct hfi1_packet *packet)
2144 struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
2146 if (packet->qp->ibqp.qp_type == IB_QPT_UC)
2147 hfi1_uc_rcv(packet);
2148 else if (packet->qp->ibqp.qp_type == IB_QPT_UD)
2149 hfi1_ud_rcv(packet);