1 /*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
20 * Contact Information:
21 * linux-drivers@emulex.com
25 * Costa Mesa, CA 92626
26 *******************************************************************/
31 #include <linux/mutex.h>
32 #include <linux/list.h>
33 #include <linux/spinlock.h>
34 #include <linux/pci.h>
36 #include <rdma/ib_verbs.h>
37 #include <rdma/ib_user_verbs.h>
40 #include "ocrdma_sli.h"
42 #define OCRDMA_ROCE_DEV_VERSION "1.0.0"
43 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
45 #define OCRDMA_MAX_AH 512
47 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
49 struct ocrdma_dev_attr {
70 int max_pages_per_frmr;
75 u8 cq_overflow_detect;
81 u8 local_ca_ack_delay;
91 struct ocrdma_queue_info {
96 u16 entry_size; /* Size of an element in the queue */
97 u16 id; /* qid, where to ring the doorbell. */
103 struct ocrdma_queue_info q;
106 struct ocrdma_dev *dev;
111 struct ocrdma_queue_info sq;
112 struct ocrdma_queue_info cq;
117 struct mutex lock; /* for serializing mailbox commands on MQ */
118 wait_queue_head_t cmd_wait;
126 struct ib_device ibdev;
127 struct ocrdma_dev_attr attr;
129 struct mutex dev_lock; /* provides syncronise access to device data */
130 spinlock_t flush_q_lock ____cacheline_aligned;
132 struct ocrdma_cq **cq_tbl;
133 struct ocrdma_qp **qp_tbl;
135 struct ocrdma_eq *eq_tbl;
140 union ib_gid *sgid_tbl;
141 /* provided synchronization to sgid table for
142 * updating gid entries triggered by notifier.
144 spinlock_t sgid_lock;
147 struct ocrdma_cq *gsi_sqcq;
148 struct ocrdma_cq *gsi_rqcq;
151 struct ocrdma_av *va;
155 /* provide synchronization for av
160 struct ocrdma_pbl pbl;
165 struct mqe_ctx mqe_ctx;
167 struct be_dev_info nic_info;
169 struct list_head entry;
172 u64 stag_arr[OCRDMA_MAX_STAG];
178 struct ocrdma_cqe *va;
180 u32 getp; /* pointer to pending wrs to
181 * return to stack, wrap arounds
186 bool armed, solicited;
189 spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
192 /* syncronizes cq completion handler invoked from multiple context */
193 spinlock_t comp_handler_lock ____cacheline_aligned;
197 struct ocrdma_ucontext *ucontext;
201 /* head of all qp's sq and rq for which cqes need to be flushed
204 struct list_head sq_head, rq_head;
209 struct ocrdma_dev *dev;
210 struct ocrdma_ucontext *uctx;
219 struct ocrdma_av *av;
224 struct ocrdma_qp_hwq_info {
225 u8 *va; /* virtual address */
231 u16 dbid; /* qid, where to ring the doorbell. */
239 struct ocrdma_qp_hwq_info rq;
244 /* provide synchronization to multiple context(s) posting rqe */
245 spinlock_t q_lock ____cacheline_aligned;
247 struct ocrdma_pd *pd;
253 struct ocrdma_dev *dev;
256 struct ocrdma_qp_hwq_info sq;
259 uint16_t dpp_wqe_idx;
266 /* provide synchronization to multiple context(s) posting wqe, rqe */
267 spinlock_t q_lock ____cacheline_aligned;
268 struct ocrdma_cq *sq_cq;
269 /* list maintained per CQ to flush SQ errors */
270 struct list_head sq_entry;
273 struct ocrdma_qp_hwq_info rq;
275 struct ocrdma_cq *rq_cq;
276 struct ocrdma_srq *srq;
277 /* list maintained per CQ to flush RQ errors */
278 struct list_head rq_entry;
280 enum ocrdma_qp_state state; /* QP state */
282 u32 max_ord, max_ird;
285 struct ocrdma_pd *pd;
287 enum ib_qp_type qp_type;
297 struct ocrdma_hw_mr {
308 struct ocrdma_pbl *pbl_table;
319 struct ib_umem *umem;
320 struct ocrdma_hw_mr hwmr;
323 struct ocrdma_ucontext {
324 struct ib_ucontext ibucontext;
326 struct list_head mm_head;
327 struct mutex mm_list_lock; /* protects list entries of mm type */
328 struct ocrdma_pd *cntxt_pd;
343 struct list_head entry;
346 static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev)
348 return container_of(ibdev, struct ocrdma_dev, ibdev);
351 static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext
354 return container_of(ibucontext, struct ocrdma_ucontext, ibucontext);
357 static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd)
359 return container_of(ibpd, struct ocrdma_pd, ibpd);
362 static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq)
364 return container_of(ibcq, struct ocrdma_cq, ibcq);
367 static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp)
369 return container_of(ibqp, struct ocrdma_qp, ibqp);
372 static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr)
374 return container_of(ibmr, struct ocrdma_mr, ibmr);
377 static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah)
379 return container_of(ibah, struct ocrdma_ah, ibah);
382 static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq)
384 return container_of(ibsrq, struct ocrdma_srq, ibsrq);
388 static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp *qp)
390 return ((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY &&
391 qp->id < 128) ? 24 : 16);
394 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe)
397 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID;
398 return (cqe_valid == cq->phase);
401 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe)
403 return (le32_to_cpu(cqe->flags_status_srcqpn) &
404 OCRDMA_CQE_QTYPE) ? 0 : 1;
407 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe)
409 return (le32_to_cpu(cqe->flags_status_srcqpn) &
410 OCRDMA_CQE_INVALIDATE) ? 1 : 0;
413 static inline int is_cqe_imm(struct ocrdma_cqe *cqe)
415 return (le32_to_cpu(cqe->flags_status_srcqpn) &
416 OCRDMA_CQE_IMM) ? 1 : 0;
419 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe)
421 return (le32_to_cpu(cqe->flags_status_srcqpn) &
422 OCRDMA_CQE_WRITE_IMM) ? 1 : 0;