2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
40 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
41 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
43 static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
44 struct qpn_map *map, unsigned off)
46 return (map - qpt->map) * BITS_PER_PAGE + off;
49 static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
50 struct qpn_map *map, unsigned off,
55 if ((off & qpt->mask) >> 1 != r)
56 off = ((off & qpt->mask) ?
57 (off | qpt->mask) + 1 : off) | (r << 1);
59 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
64 * Convert the AETH credit code into the number of credits.
66 static u32 credit_table[31] = {
100 static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
102 unsigned long page = get_zeroed_page(GFP_KERNEL);
105 * Free the page if someone raced with us installing it.
108 spin_lock(&qpt->lock);
112 map->page = (void *)page;
113 spin_unlock(&qpt->lock);
117 * Allocate the next available QPN or
118 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
120 static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
121 enum ib_qp_type type, u8 port)
123 u32 i, offset, max_scan, qpn;
128 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
131 ret = type == IB_QPT_GSI;
132 n = 1 << (ret + 2 * (port - 1));
133 spin_lock(&qpt->lock);
138 spin_unlock(&qpt->lock);
142 r = smp_processor_id();
143 if (r >= dd->n_krcv_queues)
144 r %= dd->n_krcv_queues;
148 if (qpt->mask && ((qpn & qpt->mask) >> 1) != r)
149 qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) |
151 offset = qpn & BITS_PER_PAGE_MASK;
152 map = &qpt->map[qpn / BITS_PER_PAGE];
153 max_scan = qpt->nmaps - !offset;
155 if (unlikely(!map->page)) {
156 get_map_page(qpt, map);
157 if (unlikely(!map->page))
161 if (!test_and_set_bit(offset, map->page)) {
166 offset = find_next_offset(qpt, map, offset, r);
167 qpn = mk_qpn(qpt, map, offset);
169 * This test differs from alloc_pidmap().
170 * If find_next_offset() does find a zero
171 * bit, we don't need to check for QPN
172 * wrapping around past our starting QPN.
173 * We just need to be sure we don't loop
176 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
178 * In order to keep the number of pages allocated to a
179 * minimum, we scan the all existing pages before increasing
180 * the size of the bitmap table.
182 if (++i > max_scan) {
183 if (qpt->nmaps == QPNMAP_ENTRIES)
185 map = &qpt->map[qpt->nmaps++];
186 offset = qpt->mask ? (r << 1) : 0;
187 } else if (map < &qpt->map[qpt->nmaps]) {
189 offset = qpt->mask ? (r << 1) : 0;
192 offset = qpt->mask ? (r << 1) : 2;
194 qpn = mk_qpn(qpt, map, offset);
203 static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
207 map = qpt->map + qpn / BITS_PER_PAGE;
209 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
213 * Put the QP into the hash table.
214 * The hash table holds a reference to the QP.
216 static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
218 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
219 unsigned n = qp->ibqp.qp_num % dev->qp_table_size;
222 spin_lock_irqsave(&dev->qpt_lock, flags);
224 if (qp->ibqp.qp_num == 0)
226 else if (qp->ibqp.qp_num == 1)
229 qp->next = dev->qp_table[n];
230 dev->qp_table[n] = qp;
232 atomic_inc(&qp->refcount);
234 spin_unlock_irqrestore(&dev->qpt_lock, flags);
238 * Remove the QP from the table so it can't be found asynchronously by
239 * the receive interrupt routine.
241 static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
243 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
244 struct qib_qp *q, **qpp;
247 qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size];
249 spin_lock_irqsave(&dev->qpt_lock, flags);
251 if (ibp->qp0 == qp) {
253 atomic_dec(&qp->refcount);
254 } else if (ibp->qp1 == qp) {
256 atomic_dec(&qp->refcount);
258 for (; (q = *qpp) != NULL; qpp = &q->next)
262 atomic_dec(&qp->refcount);
266 spin_unlock_irqrestore(&dev->qpt_lock, flags);
270 * qib_free_all_qps - check for QPs still in use
271 * @qpt: the QP table to empty
273 * There should not be any QPs still in use.
274 * Free memory for table.
276 unsigned qib_free_all_qps(struct qib_devdata *dd)
278 struct qib_ibdev *dev = &dd->verbs_dev;
281 unsigned n, qp_inuse = 0;
283 for (n = 0; n < dd->num_pports; n++) {
284 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
286 if (!qib_mcast_tree_empty(ibp))
294 spin_lock_irqsave(&dev->qpt_lock, flags);
295 for (n = 0; n < dev->qp_table_size; n++) {
296 qp = dev->qp_table[n];
297 dev->qp_table[n] = NULL;
299 for (; qp; qp = qp->next)
302 spin_unlock_irqrestore(&dev->qpt_lock, flags);
308 * qib_lookup_qpn - return the QP with the given QPN
310 * @qpn: the QP number to look up
312 * The caller is responsible for decrementing the QP reference count
315 struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
317 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
321 spin_lock_irqsave(&dev->qpt_lock, flags);
328 for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp;
330 if (qp->ibqp.qp_num == qpn)
333 atomic_inc(&qp->refcount);
335 spin_unlock_irqrestore(&dev->qpt_lock, flags);
340 * qib_reset_qp - initialize the QP state to the reset state
341 * @qp: the QP to reset
344 static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
348 qp->qp_access_flags = 0;
349 atomic_set(&qp->s_dma_busy, 0);
350 qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
356 qp->s_sending_psn = 0;
357 qp->s_sending_hpsn = 0;
361 if (type == IB_QPT_RC) {
362 qp->s_state = IB_OPCODE_RC_SEND_LAST;
363 qp->r_state = IB_OPCODE_RC_SEND_LAST;
365 qp->s_state = IB_OPCODE_UC_SEND_LAST;
366 qp->r_state = IB_OPCODE_UC_SEND_LAST;
368 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
379 qp->s_mig_state = IB_MIG_MIGRATED;
380 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
381 qp->r_head_ack_queue = 0;
382 qp->s_tail_ack_queue = 0;
383 qp->s_num_rd_atomic = 0;
385 qp->r_rq.wq->head = 0;
386 qp->r_rq.wq->tail = 0;
388 qp->r_sge.num_sge = 0;
391 static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
395 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
396 while (qp->s_rdma_read_sge.num_sge) {
397 atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
398 if (--qp->s_rdma_read_sge.num_sge)
399 qp->s_rdma_read_sge.sge =
400 *qp->s_rdma_read_sge.sg_list++;
403 while (qp->r_sge.num_sge) {
404 atomic_dec(&qp->r_sge.sge.mr->refcount);
405 if (--qp->r_sge.num_sge)
406 qp->r_sge.sge = *qp->r_sge.sg_list++;
410 while (qp->s_last != qp->s_head) {
411 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
414 for (i = 0; i < wqe->wr.num_sge; i++) {
415 struct qib_sge *sge = &wqe->sg_list[i];
417 atomic_dec(&sge->mr->refcount);
419 if (qp->ibqp.qp_type == IB_QPT_UD ||
420 qp->ibqp.qp_type == IB_QPT_SMI ||
421 qp->ibqp.qp_type == IB_QPT_GSI)
422 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
423 if (++qp->s_last >= qp->s_size)
427 atomic_dec(&qp->s_rdma_mr->refcount);
428 qp->s_rdma_mr = NULL;
432 if (qp->ibqp.qp_type != IB_QPT_RC)
435 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
436 struct qib_ack_entry *e = &qp->s_ack_queue[n];
438 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
440 atomic_dec(&e->rdma_sge.mr->refcount);
441 e->rdma_sge.mr = NULL;
447 * qib_error_qp - put a QP into the error state
448 * @qp: the QP to put into the error state
449 * @err: the receive completion error to signal if a RWQE is active
451 * Flushes both send and receive work queues.
452 * Returns true if last WQE event should be generated.
453 * The QP r_lock and s_lock should be held and interrupts disabled.
454 * If we are already in error state, just return.
456 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
458 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
462 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
465 qp->state = IB_QPS_ERR;
467 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
468 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
469 del_timer(&qp->s_timer);
472 if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
473 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
475 spin_lock(&dev->pending_lock);
476 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
477 qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
478 list_del_init(&qp->iowait);
480 spin_unlock(&dev->pending_lock);
482 if (!(qp->s_flags & QIB_S_BUSY)) {
485 atomic_dec(&qp->s_rdma_mr->refcount);
486 qp->s_rdma_mr = NULL;
489 qib_put_txreq(qp->s_tx);
494 /* Schedule the sending tasklet to drain the send work queue. */
495 if (qp->s_last != qp->s_head)
496 qib_schedule_send(qp);
498 clear_mr_refs(qp, 0);
500 memset(&wc, 0, sizeof(wc));
502 wc.opcode = IB_WC_RECV;
504 if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
505 wc.wr_id = qp->r_wr_id;
507 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
509 wc.status = IB_WC_WR_FLUSH_ERR;
516 spin_lock(&qp->r_rq.lock);
518 /* sanity check pointers before trusting them */
521 if (head >= qp->r_rq.size)
524 if (tail >= qp->r_rq.size)
526 while (tail != head) {
527 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
528 if (++tail >= qp->r_rq.size)
530 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
534 spin_unlock(&qp->r_rq.lock);
535 } else if (qp->ibqp.event_handler)
543 * qib_modify_qp - modify the attributes of a queue pair
544 * @ibqp: the queue pair who's attributes we're modifying
545 * @attr: the new attributes
546 * @attr_mask: the mask of attributes to modify
547 * @udata: user data for libibverbs.so
549 * Returns 0 on success, otherwise returns an errno.
551 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
552 int attr_mask, struct ib_udata *udata)
554 struct qib_ibdev *dev = to_idev(ibqp->device);
555 struct qib_qp *qp = to_iqp(ibqp);
556 enum ib_qp_state cur_state, new_state;
561 u32 pmtu = 0; /* for gcc warning only */
563 spin_lock_irq(&qp->r_lock);
564 spin_lock(&qp->s_lock);
566 cur_state = attr_mask & IB_QP_CUR_STATE ?
567 attr->cur_qp_state : qp->state;
568 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
570 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
574 if (attr_mask & IB_QP_AV) {
575 if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
577 if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
581 if (attr_mask & IB_QP_ALT_PATH) {
582 if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
584 if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
586 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
590 if (attr_mask & IB_QP_PKEY_INDEX)
591 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
594 if (attr_mask & IB_QP_MIN_RNR_TIMER)
595 if (attr->min_rnr_timer > 31)
598 if (attr_mask & IB_QP_PORT)
599 if (qp->ibqp.qp_type == IB_QPT_SMI ||
600 qp->ibqp.qp_type == IB_QPT_GSI ||
601 attr->port_num == 0 ||
602 attr->port_num > ibqp->device->phys_port_cnt)
605 if (attr_mask & IB_QP_DEST_QPN)
606 if (attr->dest_qp_num > QIB_QPN_MASK)
609 if (attr_mask & IB_QP_RETRY_CNT)
610 if (attr->retry_cnt > 7)
613 if (attr_mask & IB_QP_RNR_RETRY)
614 if (attr->rnr_retry > 7)
618 * Don't allow invalid path_mtu values. OK to set greater
619 * than the active mtu (or even the max_cap, if we have tuned
620 * that to a small mtu. We'll set qp->path_mtu
621 * to the lesser of requested attribute mtu and active,
622 * for packetizing messages.
623 * Note that the QP port has to be set in INIT and MTU in RTR.
625 if (attr_mask & IB_QP_PATH_MTU) {
626 struct qib_devdata *dd = dd_from_dev(dev);
627 int mtu, pidx = qp->port_num - 1;
629 mtu = ib_mtu_enum_to_int(attr->path_mtu);
632 if (mtu > dd->pport[pidx].ibmtu) {
633 switch (dd->pport[pidx].ibmtu) {
653 pmtu = attr->path_mtu;
656 if (attr_mask & IB_QP_PATH_MIG_STATE) {
657 if (attr->path_mig_state == IB_MIG_REARM) {
658 if (qp->s_mig_state == IB_MIG_ARMED)
660 if (new_state != IB_QPS_RTS)
662 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
663 if (qp->s_mig_state == IB_MIG_REARM)
665 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
667 if (qp->s_mig_state == IB_MIG_ARMED)
673 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
674 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
679 if (qp->state != IB_QPS_RESET) {
680 qp->state = IB_QPS_RESET;
681 spin_lock(&dev->pending_lock);
682 if (!list_empty(&qp->iowait))
683 list_del_init(&qp->iowait);
684 spin_unlock(&dev->pending_lock);
685 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
686 spin_unlock(&qp->s_lock);
687 spin_unlock_irq(&qp->r_lock);
688 /* Stop the sending work queue and retry timer */
689 cancel_work_sync(&qp->s_work);
690 del_timer_sync(&qp->s_timer);
691 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
693 qib_put_txreq(qp->s_tx);
697 wait_event(qp->wait, !atomic_read(&qp->refcount));
698 spin_lock_irq(&qp->r_lock);
699 spin_lock(&qp->s_lock);
700 clear_mr_refs(qp, 1);
701 qib_reset_qp(qp, ibqp->qp_type);
706 /* Allow event to retrigger if QP set to RTR more than once */
707 qp->r_flags &= ~QIB_R_COMM_EST;
708 qp->state = new_state;
712 qp->s_draining = qp->s_last != qp->s_cur;
713 qp->state = new_state;
717 if (qp->ibqp.qp_type == IB_QPT_RC)
719 qp->state = new_state;
723 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
727 qp->state = new_state;
731 if (attr_mask & IB_QP_PKEY_INDEX)
732 qp->s_pkey_index = attr->pkey_index;
734 if (attr_mask & IB_QP_PORT)
735 qp->port_num = attr->port_num;
737 if (attr_mask & IB_QP_DEST_QPN)
738 qp->remote_qpn = attr->dest_qp_num;
740 if (attr_mask & IB_QP_SQ_PSN) {
741 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
742 qp->s_psn = qp->s_next_psn;
743 qp->s_sending_psn = qp->s_next_psn;
744 qp->s_last_psn = qp->s_next_psn - 1;
745 qp->s_sending_hpsn = qp->s_last_psn;
748 if (attr_mask & IB_QP_RQ_PSN)
749 qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
751 if (attr_mask & IB_QP_ACCESS_FLAGS)
752 qp->qp_access_flags = attr->qp_access_flags;
754 if (attr_mask & IB_QP_AV) {
755 qp->remote_ah_attr = attr->ah_attr;
756 qp->s_srate = attr->ah_attr.static_rate;
759 if (attr_mask & IB_QP_ALT_PATH) {
760 qp->alt_ah_attr = attr->alt_ah_attr;
761 qp->s_alt_pkey_index = attr->alt_pkey_index;
764 if (attr_mask & IB_QP_PATH_MIG_STATE) {
765 qp->s_mig_state = attr->path_mig_state;
767 qp->remote_ah_attr = qp->alt_ah_attr;
768 qp->port_num = qp->alt_ah_attr.port_num;
769 qp->s_pkey_index = qp->s_alt_pkey_index;
773 if (attr_mask & IB_QP_PATH_MTU)
776 if (attr_mask & IB_QP_RETRY_CNT) {
777 qp->s_retry_cnt = attr->retry_cnt;
778 qp->s_retry = attr->retry_cnt;
781 if (attr_mask & IB_QP_RNR_RETRY) {
782 qp->s_rnr_retry_cnt = attr->rnr_retry;
783 qp->s_rnr_retry = attr->rnr_retry;
786 if (attr_mask & IB_QP_MIN_RNR_TIMER)
787 qp->r_min_rnr_timer = attr->min_rnr_timer;
789 if (attr_mask & IB_QP_TIMEOUT)
790 qp->timeout = attr->timeout;
792 if (attr_mask & IB_QP_QKEY)
793 qp->qkey = attr->qkey;
795 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
796 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
798 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
799 qp->s_max_rd_atomic = attr->max_rd_atomic;
801 spin_unlock(&qp->s_lock);
802 spin_unlock_irq(&qp->r_lock);
804 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
808 ev.device = qp->ibqp.device;
809 ev.element.qp = &qp->ibqp;
810 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
811 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
814 ev.device = qp->ibqp.device;
815 ev.element.qp = &qp->ibqp;
816 ev.event = IB_EVENT_PATH_MIG;
817 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
823 spin_unlock(&qp->s_lock);
824 spin_unlock_irq(&qp->r_lock);
831 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
832 int attr_mask, struct ib_qp_init_attr *init_attr)
834 struct qib_qp *qp = to_iqp(ibqp);
836 attr->qp_state = qp->state;
837 attr->cur_qp_state = attr->qp_state;
838 attr->path_mtu = qp->path_mtu;
839 attr->path_mig_state = qp->s_mig_state;
840 attr->qkey = qp->qkey;
841 attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
842 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
843 attr->dest_qp_num = qp->remote_qpn;
844 attr->qp_access_flags = qp->qp_access_flags;
845 attr->cap.max_send_wr = qp->s_size - 1;
846 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
847 attr->cap.max_send_sge = qp->s_max_sge;
848 attr->cap.max_recv_sge = qp->r_rq.max_sge;
849 attr->cap.max_inline_data = 0;
850 attr->ah_attr = qp->remote_ah_attr;
851 attr->alt_ah_attr = qp->alt_ah_attr;
852 attr->pkey_index = qp->s_pkey_index;
853 attr->alt_pkey_index = qp->s_alt_pkey_index;
854 attr->en_sqd_async_notify = 0;
855 attr->sq_draining = qp->s_draining;
856 attr->max_rd_atomic = qp->s_max_rd_atomic;
857 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
858 attr->min_rnr_timer = qp->r_min_rnr_timer;
859 attr->port_num = qp->port_num;
860 attr->timeout = qp->timeout;
861 attr->retry_cnt = qp->s_retry_cnt;
862 attr->rnr_retry = qp->s_rnr_retry_cnt;
863 attr->alt_port_num = qp->alt_ah_attr.port_num;
864 attr->alt_timeout = qp->alt_timeout;
866 init_attr->event_handler = qp->ibqp.event_handler;
867 init_attr->qp_context = qp->ibqp.qp_context;
868 init_attr->send_cq = qp->ibqp.send_cq;
869 init_attr->recv_cq = qp->ibqp.recv_cq;
870 init_attr->srq = qp->ibqp.srq;
871 init_attr->cap = attr->cap;
872 if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
873 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
875 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
876 init_attr->qp_type = qp->ibqp.qp_type;
877 init_attr->port_num = qp->port_num;
882 * qib_compute_aeth - compute the AETH (syndrome + MSN)
883 * @qp: the queue pair to compute the AETH for
887 __be32 qib_compute_aeth(struct qib_qp *qp)
889 u32 aeth = qp->r_msn & QIB_MSN_MASK;
893 * Shared receive queues don't generate credits.
894 * Set the credit field to the invalid value.
896 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
900 struct qib_rwq *wq = qp->r_rq.wq;
904 /* sanity check pointers before trusting them */
906 if (head >= qp->r_rq.size)
909 if (tail >= qp->r_rq.size)
912 * Compute the number of credits available (RWQEs).
913 * XXX Not holding the r_rq.lock here so there is a small
914 * chance that the pair of reads are not atomic.
916 credits = head - tail;
917 if ((int)credits < 0)
918 credits += qp->r_rq.size;
920 * Binary search the credit table to find the code to
927 if (credit_table[x] == credits)
929 if (credit_table[x] > credits)
936 aeth |= x << QIB_AETH_CREDIT_SHIFT;
938 return cpu_to_be32(aeth);
942 * qib_create_qp - create a queue pair for a device
943 * @ibpd: the protection domain who's device we create the queue pair for
944 * @init_attr: the attributes of the queue pair
945 * @udata: user data for libibverbs.so
947 * Returns the queue pair on success, otherwise returns an errno.
949 * Called by the ib_create_qp() core verbs function.
951 struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
952 struct ib_qp_init_attr *init_attr,
953 struct ib_udata *udata)
957 struct qib_swqe *swq = NULL;
958 struct qib_ibdev *dev;
959 struct qib_devdata *dd;
964 if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
965 init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
966 ret = ERR_PTR(-EINVAL);
970 /* Check receive queue parameters if no SRQ is specified. */
971 if (!init_attr->srq) {
972 if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
973 init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
974 ret = ERR_PTR(-EINVAL);
977 if (init_attr->cap.max_send_sge +
978 init_attr->cap.max_send_wr +
979 init_attr->cap.max_recv_sge +
980 init_attr->cap.max_recv_wr == 0) {
981 ret = ERR_PTR(-EINVAL);
986 switch (init_attr->qp_type) {
989 if (init_attr->port_num == 0 ||
990 init_attr->port_num > ibpd->device->phys_port_cnt) {
991 ret = ERR_PTR(-EINVAL);
997 sz = sizeof(struct qib_sge) *
998 init_attr->cap.max_send_sge +
999 sizeof(struct qib_swqe);
1000 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
1002 ret = ERR_PTR(-ENOMEM);
1007 if (init_attr->srq) {
1008 struct qib_srq *srq = to_isrq(init_attr->srq);
1010 if (srq->rq.max_sge > 1)
1011 sg_list_sz = sizeof(*qp->r_sg_list) *
1012 (srq->rq.max_sge - 1);
1013 } else if (init_attr->cap.max_recv_sge > 1)
1014 sg_list_sz = sizeof(*qp->r_sg_list) *
1015 (init_attr->cap.max_recv_sge - 1);
1016 qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
1018 ret = ERR_PTR(-ENOMEM);
1024 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1025 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1026 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1027 sizeof(struct qib_rwqe);
1028 qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
1029 qp->r_rq.size * sz);
1031 ret = ERR_PTR(-ENOMEM);
1037 * ib_create_qp() will initialize qp->ibqp
1038 * except for qp->ibqp.qp_num.
1040 spin_lock_init(&qp->r_lock);
1041 spin_lock_init(&qp->s_lock);
1042 spin_lock_init(&qp->r_rq.lock);
1043 atomic_set(&qp->refcount, 0);
1044 init_waitqueue_head(&qp->wait);
1045 init_waitqueue_head(&qp->wait_dma);
1046 init_timer(&qp->s_timer);
1047 qp->s_timer.data = (unsigned long)qp;
1048 INIT_WORK(&qp->s_work, qib_do_send);
1049 INIT_LIST_HEAD(&qp->iowait);
1050 INIT_LIST_HEAD(&qp->rspwait);
1051 qp->state = IB_QPS_RESET;
1053 qp->s_size = init_attr->cap.max_send_wr + 1;
1054 qp->s_max_sge = init_attr->cap.max_send_sge;
1055 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1056 qp->s_flags = QIB_S_SIGNAL_REQ_WR;
1057 dev = to_idev(ibpd->device);
1058 dd = dd_from_dev(dev);
1059 err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
1060 init_attr->port_num);
1066 qp->ibqp.qp_num = err;
1067 qp->port_num = init_attr->port_num;
1068 qp->processor_id = smp_processor_id();
1069 qib_reset_qp(qp, init_attr->qp_type);
1073 /* Don't support raw QPs */
1074 ret = ERR_PTR(-ENOSYS);
1078 init_attr->cap.max_inline_data = 0;
1081 * Return the address of the RWQ as the offset to mmap.
1082 * See qib_mmap() for details.
1084 if (udata && udata->outlen >= sizeof(__u64)) {
1088 err = ib_copy_to_udata(udata, &offset,
1095 u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
1097 qp->ip = qib_create_mmap_info(dev, s,
1098 ibpd->uobject->context,
1101 ret = ERR_PTR(-ENOMEM);
1105 err = ib_copy_to_udata(udata, &(qp->ip->offset),
1106 sizeof(qp->ip->offset));
1114 spin_lock(&dev->n_qps_lock);
1115 if (dev->n_qps_allocated == ib_qib_max_qps) {
1116 spin_unlock(&dev->n_qps_lock);
1117 ret = ERR_PTR(-ENOMEM);
1121 dev->n_qps_allocated++;
1122 spin_unlock(&dev->n_qps_lock);
1125 spin_lock_irq(&dev->pending_lock);
1126 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
1127 spin_unlock_irq(&dev->pending_lock);
1135 kref_put(&qp->ip->ref, qib_release_mmap_info);
1138 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1148 * qib_destroy_qp - destroy a queue pair
1149 * @ibqp: the queue pair to destroy
1151 * Returns 0 on success.
1153 * Note that this can be called while the QP is actively sending or
1156 int qib_destroy_qp(struct ib_qp *ibqp)
1158 struct qib_qp *qp = to_iqp(ibqp);
1159 struct qib_ibdev *dev = to_idev(ibqp->device);
1161 /* Make sure HW and driver activity is stopped. */
1162 spin_lock_irq(&qp->s_lock);
1163 if (qp->state != IB_QPS_RESET) {
1164 qp->state = IB_QPS_RESET;
1165 spin_lock(&dev->pending_lock);
1166 if (!list_empty(&qp->iowait))
1167 list_del_init(&qp->iowait);
1168 spin_unlock(&dev->pending_lock);
1169 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
1170 spin_unlock_irq(&qp->s_lock);
1171 cancel_work_sync(&qp->s_work);
1172 del_timer_sync(&qp->s_timer);
1173 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
1175 qib_put_txreq(qp->s_tx);
1179 wait_event(qp->wait, !atomic_read(&qp->refcount));
1180 clear_mr_refs(qp, 1);
1182 spin_unlock_irq(&qp->s_lock);
1184 /* all user's cleaned up, mark it available */
1185 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1186 spin_lock(&dev->n_qps_lock);
1187 dev->n_qps_allocated--;
1188 spin_unlock(&dev->n_qps_lock);
1191 kref_put(&qp->ip->ref, qib_release_mmap_info);
1200 * qib_init_qpn_table - initialize the QP number table for a device
1201 * @qpt: the QPN table
1203 void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
1205 spin_lock_init(&qpt->lock);
1206 qpt->last = 1; /* start with QPN 2 */
1208 qpt->mask = dd->qpn_mask;
1212 * qib_free_qpn_table - free the QP number table for a device
1213 * @qpt: the QPN table
1215 void qib_free_qpn_table(struct qib_qpn_table *qpt)
1219 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1220 if (qpt->map[i].page)
1221 free_page((unsigned long) qpt->map[i].page);
1225 * qib_get_credit - flush the send work queue of a QP
1226 * @qp: the qp who's send work queue to flush
1227 * @aeth: the Acknowledge Extended Transport Header
1229 * The QP s_lock should be held.
1231 void qib_get_credit(struct qib_qp *qp, u32 aeth)
1233 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1236 * If the credit is invalid, we can send
1237 * as many packets as we like. Otherwise, we have to
1238 * honor the credit field.
1240 if (credit == QIB_AETH_CREDIT_INVAL) {
1241 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1242 qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
1243 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1244 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1245 qib_schedule_send(qp);
1248 } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1249 /* Compute new LSN (i.e., MSN + credit) */
1250 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1251 if (qib_cmp24(credit, qp->s_lsn) > 0) {
1253 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1254 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1255 qib_schedule_send(qp);