2 * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 * qib_alloc_lkey - allocate an lkey
38 * @mr: memory region that this lkey protects
39 * @dma_region: 0->normal key, 1->restricted DMA key
41 * Returns 0 if successful, otherwise returns -errno.
43 * Increments mr reference count and sets published
46 * Sets the lkey field mr for non-dma regions.
50 int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
56 struct qib_ibdev *dev = to_idev(mr->pd->device);
57 struct qib_lkey_table *rkt = &dev->lk_table;
59 spin_lock_irqsave(&rkt->lock, flags);
61 /* special case for dma_mr lkey == 0 */
63 /* should the dma_mr be relative to the pd? */
67 mr->lkey_published = 1;
72 /* Find the next available LKEY */
76 if (rkt->table[r] == NULL)
78 r = (r + 1) & (rkt->max - 1);
82 rkt->next = (r + 1) & (rkt->max - 1);
84 * Make sure lkey is never zero which is reserved to indicate an
88 mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
89 ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
97 mr->lkey_published = 1;
99 spin_unlock_irqrestore(&rkt->lock, flags);
103 spin_unlock_irqrestore(&rkt->lock, flags);
109 * qib_free_lkey - free an lkey
110 * @mr: mr to free from tables
112 void qib_free_lkey(struct qib_mregion *mr)
117 struct qib_ibdev *dev = to_idev(mr->pd->device);
118 struct qib_lkey_table *rkt = &dev->lk_table;
120 spin_lock_irqsave(&rkt->lock, flags);
121 if (!mr->lkey_published)
123 mr->lkey_published = 0;
126 spin_lock_irqsave(&dev->lk_table.lock, flags);
128 if (dev->dma_mr && dev->dma_mr == mr) {
129 qib_put_mr(dev->dma_mr);
133 r = lkey >> (32 - ib_qib_lkey_table_size);
134 qib_put_mr(dev->dma_mr);
135 rkt->table[r] = NULL;
138 spin_unlock_irqrestore(&dev->lk_table.lock, flags);
142 * qib_lkey_ok - check IB SGE for validity and initialize
143 * @rkt: table containing lkey to check SGE against
144 * @isge: outgoing internal SGE
148 * Return 1 if valid and successful, otherwise returns 0.
150 * Check the IB SGE for validity and initialize our internal version
153 int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
154 struct qib_sge *isge, struct ib_sge *sge, int acc)
156 struct qib_mregion *mr;
162 * We use LKEY == zero for kernel virtual addresses
163 * (see qib_get_dma_mr and qib_dma.c).
165 spin_lock_irqsave(&rkt->lock, flags);
166 if (sge->lkey == 0) {
167 struct qib_ibdev *dev = to_idev(pd->ibpd.device);
173 qib_get_mr(dev->dma_mr);
174 spin_unlock_irqrestore(&rkt->lock, flags);
176 isge->mr = dev->dma_mr;
177 isge->vaddr = (void *) sge->addr;
178 isge->length = sge->length;
179 isge->sge_length = sge->length;
184 mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))];
185 if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
186 mr->pd != &pd->ibpd))
189 off = sge->addr - mr->user_base;
190 if (unlikely(sge->addr < mr->user_base ||
191 off + sge->length > mr->length ||
192 (mr->access_flags & acc) != acc))
195 spin_unlock_irqrestore(&rkt->lock, flags);
198 if (mr->page_shift) {
200 page sizes are uniform power of 2 so no loop is necessary
201 entries_spanned_by_off is the number of times the loop below
204 size_t entries_spanned_by_off;
206 entries_spanned_by_off = off >> mr->page_shift;
207 off -= (entries_spanned_by_off << mr->page_shift);
208 m = entries_spanned_by_off/QIB_SEGSZ;
209 n = entries_spanned_by_off%QIB_SEGSZ;
213 while (off >= mr->map[m]->segs[n].length) {
214 off -= mr->map[m]->segs[n].length;
216 if (n >= QIB_SEGSZ) {
223 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
224 isge->length = mr->map[m]->segs[n].length - off;
225 isge->sge_length = sge->length;
231 spin_unlock_irqrestore(&rkt->lock, flags);
236 * qib_rkey_ok - check the IB virtual address, length, and RKEY
237 * @dev: infiniband device
239 * @len: length of data
240 * @vaddr: virtual address to place data
241 * @rkey: rkey to check
244 * Return 1 if successful, otherwise 0.
246 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
247 u32 len, u64 vaddr, u32 rkey, int acc)
249 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
250 struct qib_mregion *mr;
256 * We use RKEY == zero for kernel virtual addresses
257 * (see qib_get_dma_mr and qib_dma.c).
259 spin_lock_irqsave(&rkt->lock, flags);
261 struct qib_pd *pd = to_ipd(qp->ibqp.pd);
262 struct qib_ibdev *dev = to_idev(pd->ibpd.device);
268 qib_get_mr(dev->dma_mr);
269 spin_unlock_irqrestore(&rkt->lock, flags);
271 sge->mr = dev->dma_mr;
272 sge->vaddr = (void *) vaddr;
274 sge->sge_length = len;
280 mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
281 if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
284 off = vaddr - mr->iova;
285 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
286 (mr->access_flags & acc) == 0))
289 spin_unlock_irqrestore(&rkt->lock, flags);
292 if (mr->page_shift) {
294 page sizes are uniform power of 2 so no loop is necessary
295 entries_spanned_by_off is the number of times the loop below
298 size_t entries_spanned_by_off;
300 entries_spanned_by_off = off >> mr->page_shift;
301 off -= (entries_spanned_by_off << mr->page_shift);
302 m = entries_spanned_by_off/QIB_SEGSZ;
303 n = entries_spanned_by_off%QIB_SEGSZ;
307 while (off >= mr->map[m]->segs[n].length) {
308 off -= mr->map[m]->segs[n].length;
310 if (n >= QIB_SEGSZ) {
317 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
318 sge->length = mr->map[m]->segs[n].length - off;
319 sge->sge_length = len;
325 spin_unlock_irqrestore(&rkt->lock, flags);
330 * Initialize the memory region specified by the work reqeust.
332 int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
334 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
335 struct qib_pd *pd = to_ipd(qp->ibqp.pd);
336 struct qib_mregion *mr;
337 u32 rkey = wr->wr.fast_reg.rkey;
344 spin_lock_irqsave(&rkt->lock, flags);
345 if (pd->user || rkey == 0)
348 mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
349 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
352 if (wr->wr.fast_reg.page_list_len > mr->max_segs)
355 ps = 1UL << wr->wr.fast_reg.page_shift;
356 if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
359 mr->user_base = wr->wr.fast_reg.iova_start;
360 mr->iova = wr->wr.fast_reg.iova_start;
362 mr->length = wr->wr.fast_reg.length;
363 mr->access_flags = wr->wr.fast_reg.access_flags;
364 page_list = wr->wr.fast_reg.page_list->page_list;
367 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
368 mr->map[m]->segs[n].vaddr = (void *) page_list[i];
369 mr->map[m]->segs[n].length = ps;
370 if (++n == QIB_SEGSZ) {
378 spin_unlock_irqrestore(&rkt->lock, flags);