Merge branch 'acpi-ec'
[linux-drm-fsl-dcu.git] / drivers / infiniband / core / uverbs_cmd.c
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
5  * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/file.h>
37 #include <linux/fs.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
40
41 #include <asm/uaccess.h>
42
43 #include "uverbs.h"
44 #include "core_priv.h"
45
46 struct uverbs_lock_class {
47         struct lock_class_key   key;
48         char                    name[16];
49 };
50
51 static struct uverbs_lock_class pd_lock_class   = { .name = "PD-uobj" };
52 static struct uverbs_lock_class mr_lock_class   = { .name = "MR-uobj" };
53 static struct uverbs_lock_class mw_lock_class   = { .name = "MW-uobj" };
54 static struct uverbs_lock_class cq_lock_class   = { .name = "CQ-uobj" };
55 static struct uverbs_lock_class qp_lock_class   = { .name = "QP-uobj" };
56 static struct uverbs_lock_class ah_lock_class   = { .name = "AH-uobj" };
57 static struct uverbs_lock_class srq_lock_class  = { .name = "SRQ-uobj" };
58 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
59 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
60
61 /*
62  * The ib_uobject locking scheme is as follows:
63  *
64  * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
65  *   needs to be held during all idr operations.  When an object is
66  *   looked up, a reference must be taken on the object's kref before
67  *   dropping this lock.
68  *
69  * - Each object also has an rwsem.  This rwsem must be held for
70  *   reading while an operation that uses the object is performed.
71  *   For example, while registering an MR, the associated PD's
72  *   uobject.mutex must be held for reading.  The rwsem must be held
73  *   for writing while initializing or destroying an object.
74  *
75  * - In addition, each object has a "live" flag.  If this flag is not
76  *   set, then lookups of the object will fail even if it is found in
77  *   the idr.  This handles a reader that blocks and does not acquire
78  *   the rwsem until after the object is destroyed.  The destroy
79  *   operation will set the live flag to 0 and then drop the rwsem;
80  *   this will allow the reader to acquire the rwsem, see that the
81  *   live flag is 0, and then drop the rwsem and its reference to
82  *   object.  The underlying storage will not be freed until the last
83  *   reference to the object is dropped.
84  */
85
86 static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
87                       struct ib_ucontext *context, struct uverbs_lock_class *c)
88 {
89         uobj->user_handle = user_handle;
90         uobj->context     = context;
91         kref_init(&uobj->ref);
92         init_rwsem(&uobj->mutex);
93         lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
94         uobj->live        = 0;
95 }
96
97 static void release_uobj(struct kref *kref)
98 {
99         kfree(container_of(kref, struct ib_uobject, ref));
100 }
101
102 static void put_uobj(struct ib_uobject *uobj)
103 {
104         kref_put(&uobj->ref, release_uobj);
105 }
106
107 static void put_uobj_read(struct ib_uobject *uobj)
108 {
109         up_read(&uobj->mutex);
110         put_uobj(uobj);
111 }
112
113 static void put_uobj_write(struct ib_uobject *uobj)
114 {
115         up_write(&uobj->mutex);
116         put_uobj(uobj);
117 }
118
119 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
120 {
121         int ret;
122
123         idr_preload(GFP_KERNEL);
124         spin_lock(&ib_uverbs_idr_lock);
125
126         ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
127         if (ret >= 0)
128                 uobj->id = ret;
129
130         spin_unlock(&ib_uverbs_idr_lock);
131         idr_preload_end();
132
133         return ret < 0 ? ret : 0;
134 }
135
136 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
137 {
138         spin_lock(&ib_uverbs_idr_lock);
139         idr_remove(idr, uobj->id);
140         spin_unlock(&ib_uverbs_idr_lock);
141 }
142
143 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
144                                          struct ib_ucontext *context)
145 {
146         struct ib_uobject *uobj;
147
148         spin_lock(&ib_uverbs_idr_lock);
149         uobj = idr_find(idr, id);
150         if (uobj) {
151                 if (uobj->context == context)
152                         kref_get(&uobj->ref);
153                 else
154                         uobj = NULL;
155         }
156         spin_unlock(&ib_uverbs_idr_lock);
157
158         return uobj;
159 }
160
161 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
162                                         struct ib_ucontext *context, int nested)
163 {
164         struct ib_uobject *uobj;
165
166         uobj = __idr_get_uobj(idr, id, context);
167         if (!uobj)
168                 return NULL;
169
170         if (nested)
171                 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
172         else
173                 down_read(&uobj->mutex);
174         if (!uobj->live) {
175                 put_uobj_read(uobj);
176                 return NULL;
177         }
178
179         return uobj;
180 }
181
182 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
183                                          struct ib_ucontext *context)
184 {
185         struct ib_uobject *uobj;
186
187         uobj = __idr_get_uobj(idr, id, context);
188         if (!uobj)
189                 return NULL;
190
191         down_write(&uobj->mutex);
192         if (!uobj->live) {
193                 put_uobj_write(uobj);
194                 return NULL;
195         }
196
197         return uobj;
198 }
199
200 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
201                           int nested)
202 {
203         struct ib_uobject *uobj;
204
205         uobj = idr_read_uobj(idr, id, context, nested);
206         return uobj ? uobj->object : NULL;
207 }
208
209 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
210 {
211         return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
212 }
213
214 static void put_pd_read(struct ib_pd *pd)
215 {
216         put_uobj_read(pd->uobject);
217 }
218
219 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
220 {
221         return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
222 }
223
224 static void put_cq_read(struct ib_cq *cq)
225 {
226         put_uobj_read(cq->uobject);
227 }
228
229 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
230 {
231         return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
232 }
233
234 static void put_ah_read(struct ib_ah *ah)
235 {
236         put_uobj_read(ah->uobject);
237 }
238
239 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
240 {
241         return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
242 }
243
244 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
245 {
246         struct ib_uobject *uobj;
247
248         uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
249         return uobj ? uobj->object : NULL;
250 }
251
252 static void put_qp_read(struct ib_qp *qp)
253 {
254         put_uobj_read(qp->uobject);
255 }
256
257 static void put_qp_write(struct ib_qp *qp)
258 {
259         put_uobj_write(qp->uobject);
260 }
261
262 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
263 {
264         return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
265 }
266
267 static void put_srq_read(struct ib_srq *srq)
268 {
269         put_uobj_read(srq->uobject);
270 }
271
272 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
273                                      struct ib_uobject **uobj)
274 {
275         *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
276         return *uobj ? (*uobj)->object : NULL;
277 }
278
279 static void put_xrcd_read(struct ib_uobject *uobj)
280 {
281         put_uobj_read(uobj);
282 }
283
284 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
285                               const char __user *buf,
286                               int in_len, int out_len)
287 {
288         struct ib_uverbs_get_context      cmd;
289         struct ib_uverbs_get_context_resp resp;
290         struct ib_udata                   udata;
291         struct ib_device                 *ibdev = file->device->ib_dev;
292 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
293         struct ib_device_attr             dev_attr;
294 #endif
295         struct ib_ucontext               *ucontext;
296         struct file                      *filp;
297         int ret;
298
299         if (out_len < sizeof resp)
300                 return -ENOSPC;
301
302         if (copy_from_user(&cmd, buf, sizeof cmd))
303                 return -EFAULT;
304
305         mutex_lock(&file->mutex);
306
307         if (file->ucontext) {
308                 ret = -EINVAL;
309                 goto err;
310         }
311
312         INIT_UDATA(&udata, buf + sizeof cmd,
313                    (unsigned long) cmd.response + sizeof resp,
314                    in_len - sizeof cmd, out_len - sizeof resp);
315
316         ucontext = ibdev->alloc_ucontext(ibdev, &udata);
317         if (IS_ERR(ucontext)) {
318                 ret = PTR_ERR(ucontext);
319                 goto err;
320         }
321
322         ucontext->device = ibdev;
323         INIT_LIST_HEAD(&ucontext->pd_list);
324         INIT_LIST_HEAD(&ucontext->mr_list);
325         INIT_LIST_HEAD(&ucontext->mw_list);
326         INIT_LIST_HEAD(&ucontext->cq_list);
327         INIT_LIST_HEAD(&ucontext->qp_list);
328         INIT_LIST_HEAD(&ucontext->srq_list);
329         INIT_LIST_HEAD(&ucontext->ah_list);
330         INIT_LIST_HEAD(&ucontext->xrcd_list);
331         INIT_LIST_HEAD(&ucontext->rule_list);
332         rcu_read_lock();
333         ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
334         rcu_read_unlock();
335         ucontext->closing = 0;
336
337 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
338         ucontext->umem_tree = RB_ROOT;
339         init_rwsem(&ucontext->umem_rwsem);
340         ucontext->odp_mrs_count = 0;
341         INIT_LIST_HEAD(&ucontext->no_private_counters);
342
343         ret = ib_query_device(ibdev, &dev_attr);
344         if (ret)
345                 goto err_free;
346         if (!(dev_attr.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
347                 ucontext->invalidate_range = NULL;
348
349 #endif
350
351         resp.num_comp_vectors = file->device->num_comp_vectors;
352
353         ret = get_unused_fd_flags(O_CLOEXEC);
354         if (ret < 0)
355                 goto err_free;
356         resp.async_fd = ret;
357
358         filp = ib_uverbs_alloc_event_file(file, 1);
359         if (IS_ERR(filp)) {
360                 ret = PTR_ERR(filp);
361                 goto err_fd;
362         }
363
364         if (copy_to_user((void __user *) (unsigned long) cmd.response,
365                          &resp, sizeof resp)) {
366                 ret = -EFAULT;
367                 goto err_file;
368         }
369
370         file->async_file = filp->private_data;
371
372         INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
373                               ib_uverbs_event_handler);
374         ret = ib_register_event_handler(&file->event_handler);
375         if (ret)
376                 goto err_file;
377
378         kref_get(&file->async_file->ref);
379         kref_get(&file->ref);
380         file->ucontext = ucontext;
381
382         fd_install(resp.async_fd, filp);
383
384         mutex_unlock(&file->mutex);
385
386         return in_len;
387
388 err_file:
389         fput(filp);
390
391 err_fd:
392         put_unused_fd(resp.async_fd);
393
394 err_free:
395         put_pid(ucontext->tgid);
396         ibdev->dealloc_ucontext(ucontext);
397
398 err:
399         mutex_unlock(&file->mutex);
400         return ret;
401 }
402
403 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
404                                const char __user *buf,
405                                int in_len, int out_len)
406 {
407         struct ib_uverbs_query_device      cmd;
408         struct ib_uverbs_query_device_resp resp;
409         struct ib_device_attr              attr;
410         int                                ret;
411
412         if (out_len < sizeof resp)
413                 return -ENOSPC;
414
415         if (copy_from_user(&cmd, buf, sizeof cmd))
416                 return -EFAULT;
417
418         ret = ib_query_device(file->device->ib_dev, &attr);
419         if (ret)
420                 return ret;
421
422         memset(&resp, 0, sizeof resp);
423
424         resp.fw_ver                    = attr.fw_ver;
425         resp.node_guid                 = file->device->ib_dev->node_guid;
426         resp.sys_image_guid            = attr.sys_image_guid;
427         resp.max_mr_size               = attr.max_mr_size;
428         resp.page_size_cap             = attr.page_size_cap;
429         resp.vendor_id                 = attr.vendor_id;
430         resp.vendor_part_id            = attr.vendor_part_id;
431         resp.hw_ver                    = attr.hw_ver;
432         resp.max_qp                    = attr.max_qp;
433         resp.max_qp_wr                 = attr.max_qp_wr;
434         resp.device_cap_flags          = attr.device_cap_flags;
435         resp.max_sge                   = attr.max_sge;
436         resp.max_sge_rd                = attr.max_sge_rd;
437         resp.max_cq                    = attr.max_cq;
438         resp.max_cqe                   = attr.max_cqe;
439         resp.max_mr                    = attr.max_mr;
440         resp.max_pd                    = attr.max_pd;
441         resp.max_qp_rd_atom            = attr.max_qp_rd_atom;
442         resp.max_ee_rd_atom            = attr.max_ee_rd_atom;
443         resp.max_res_rd_atom           = attr.max_res_rd_atom;
444         resp.max_qp_init_rd_atom       = attr.max_qp_init_rd_atom;
445         resp.max_ee_init_rd_atom       = attr.max_ee_init_rd_atom;
446         resp.atomic_cap                = attr.atomic_cap;
447         resp.max_ee                    = attr.max_ee;
448         resp.max_rdd                   = attr.max_rdd;
449         resp.max_mw                    = attr.max_mw;
450         resp.max_raw_ipv6_qp           = attr.max_raw_ipv6_qp;
451         resp.max_raw_ethy_qp           = attr.max_raw_ethy_qp;
452         resp.max_mcast_grp             = attr.max_mcast_grp;
453         resp.max_mcast_qp_attach       = attr.max_mcast_qp_attach;
454         resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
455         resp.max_ah                    = attr.max_ah;
456         resp.max_fmr                   = attr.max_fmr;
457         resp.max_map_per_fmr           = attr.max_map_per_fmr;
458         resp.max_srq                   = attr.max_srq;
459         resp.max_srq_wr                = attr.max_srq_wr;
460         resp.max_srq_sge               = attr.max_srq_sge;
461         resp.max_pkeys                 = attr.max_pkeys;
462         resp.local_ca_ack_delay        = attr.local_ca_ack_delay;
463         resp.phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
464
465         if (copy_to_user((void __user *) (unsigned long) cmd.response,
466                          &resp, sizeof resp))
467                 return -EFAULT;
468
469         return in_len;
470 }
471
472 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
473                              const char __user *buf,
474                              int in_len, int out_len)
475 {
476         struct ib_uverbs_query_port      cmd;
477         struct ib_uverbs_query_port_resp resp;
478         struct ib_port_attr              attr;
479         int                              ret;
480
481         if (out_len < sizeof resp)
482                 return -ENOSPC;
483
484         if (copy_from_user(&cmd, buf, sizeof cmd))
485                 return -EFAULT;
486
487         ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
488         if (ret)
489                 return ret;
490
491         memset(&resp, 0, sizeof resp);
492
493         resp.state           = attr.state;
494         resp.max_mtu         = attr.max_mtu;
495         resp.active_mtu      = attr.active_mtu;
496         resp.gid_tbl_len     = attr.gid_tbl_len;
497         resp.port_cap_flags  = attr.port_cap_flags;
498         resp.max_msg_sz      = attr.max_msg_sz;
499         resp.bad_pkey_cntr   = attr.bad_pkey_cntr;
500         resp.qkey_viol_cntr  = attr.qkey_viol_cntr;
501         resp.pkey_tbl_len    = attr.pkey_tbl_len;
502         resp.lid             = attr.lid;
503         resp.sm_lid          = attr.sm_lid;
504         resp.lmc             = attr.lmc;
505         resp.max_vl_num      = attr.max_vl_num;
506         resp.sm_sl           = attr.sm_sl;
507         resp.subnet_timeout  = attr.subnet_timeout;
508         resp.init_type_reply = attr.init_type_reply;
509         resp.active_width    = attr.active_width;
510         resp.active_speed    = attr.active_speed;
511         resp.phys_state      = attr.phys_state;
512         resp.link_layer      = rdma_port_get_link_layer(file->device->ib_dev,
513                                                         cmd.port_num);
514
515         if (copy_to_user((void __user *) (unsigned long) cmd.response,
516                          &resp, sizeof resp))
517                 return -EFAULT;
518
519         return in_len;
520 }
521
522 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
523                            const char __user *buf,
524                            int in_len, int out_len)
525 {
526         struct ib_uverbs_alloc_pd      cmd;
527         struct ib_uverbs_alloc_pd_resp resp;
528         struct ib_udata                udata;
529         struct ib_uobject             *uobj;
530         struct ib_pd                  *pd;
531         int                            ret;
532
533         if (out_len < sizeof resp)
534                 return -ENOSPC;
535
536         if (copy_from_user(&cmd, buf, sizeof cmd))
537                 return -EFAULT;
538
539         INIT_UDATA(&udata, buf + sizeof cmd,
540                    (unsigned long) cmd.response + sizeof resp,
541                    in_len - sizeof cmd, out_len - sizeof resp);
542
543         uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
544         if (!uobj)
545                 return -ENOMEM;
546
547         init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
548         down_write(&uobj->mutex);
549
550         pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
551                                             file->ucontext, &udata);
552         if (IS_ERR(pd)) {
553                 ret = PTR_ERR(pd);
554                 goto err;
555         }
556
557         pd->device  = file->device->ib_dev;
558         pd->uobject = uobj;
559         atomic_set(&pd->usecnt, 0);
560
561         uobj->object = pd;
562         ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
563         if (ret)
564                 goto err_idr;
565
566         memset(&resp, 0, sizeof resp);
567         resp.pd_handle = uobj->id;
568
569         if (copy_to_user((void __user *) (unsigned long) cmd.response,
570                          &resp, sizeof resp)) {
571                 ret = -EFAULT;
572                 goto err_copy;
573         }
574
575         mutex_lock(&file->mutex);
576         list_add_tail(&uobj->list, &file->ucontext->pd_list);
577         mutex_unlock(&file->mutex);
578
579         uobj->live = 1;
580
581         up_write(&uobj->mutex);
582
583         return in_len;
584
585 err_copy:
586         idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
587
588 err_idr:
589         ib_dealloc_pd(pd);
590
591 err:
592         put_uobj_write(uobj);
593         return ret;
594 }
595
596 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
597                              const char __user *buf,
598                              int in_len, int out_len)
599 {
600         struct ib_uverbs_dealloc_pd cmd;
601         struct ib_uobject          *uobj;
602         int                         ret;
603
604         if (copy_from_user(&cmd, buf, sizeof cmd))
605                 return -EFAULT;
606
607         uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
608         if (!uobj)
609                 return -EINVAL;
610
611         ret = ib_dealloc_pd(uobj->object);
612         if (!ret)
613                 uobj->live = 0;
614
615         put_uobj_write(uobj);
616
617         if (ret)
618                 return ret;
619
620         idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
621
622         mutex_lock(&file->mutex);
623         list_del(&uobj->list);
624         mutex_unlock(&file->mutex);
625
626         put_uobj(uobj);
627
628         return in_len;
629 }
630
631 struct xrcd_table_entry {
632         struct rb_node  node;
633         struct ib_xrcd *xrcd;
634         struct inode   *inode;
635 };
636
637 static int xrcd_table_insert(struct ib_uverbs_device *dev,
638                             struct inode *inode,
639                             struct ib_xrcd *xrcd)
640 {
641         struct xrcd_table_entry *entry, *scan;
642         struct rb_node **p = &dev->xrcd_tree.rb_node;
643         struct rb_node *parent = NULL;
644
645         entry = kmalloc(sizeof *entry, GFP_KERNEL);
646         if (!entry)
647                 return -ENOMEM;
648
649         entry->xrcd  = xrcd;
650         entry->inode = inode;
651
652         while (*p) {
653                 parent = *p;
654                 scan = rb_entry(parent, struct xrcd_table_entry, node);
655
656                 if (inode < scan->inode) {
657                         p = &(*p)->rb_left;
658                 } else if (inode > scan->inode) {
659                         p = &(*p)->rb_right;
660                 } else {
661                         kfree(entry);
662                         return -EEXIST;
663                 }
664         }
665
666         rb_link_node(&entry->node, parent, p);
667         rb_insert_color(&entry->node, &dev->xrcd_tree);
668         igrab(inode);
669         return 0;
670 }
671
672 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
673                                                   struct inode *inode)
674 {
675         struct xrcd_table_entry *entry;
676         struct rb_node *p = dev->xrcd_tree.rb_node;
677
678         while (p) {
679                 entry = rb_entry(p, struct xrcd_table_entry, node);
680
681                 if (inode < entry->inode)
682                         p = p->rb_left;
683                 else if (inode > entry->inode)
684                         p = p->rb_right;
685                 else
686                         return entry;
687         }
688
689         return NULL;
690 }
691
692 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
693 {
694         struct xrcd_table_entry *entry;
695
696         entry = xrcd_table_search(dev, inode);
697         if (!entry)
698                 return NULL;
699
700         return entry->xrcd;
701 }
702
703 static void xrcd_table_delete(struct ib_uverbs_device *dev,
704                               struct inode *inode)
705 {
706         struct xrcd_table_entry *entry;
707
708         entry = xrcd_table_search(dev, inode);
709         if (entry) {
710                 iput(inode);
711                 rb_erase(&entry->node, &dev->xrcd_tree);
712                 kfree(entry);
713         }
714 }
715
716 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
717                             const char __user *buf, int in_len,
718                             int out_len)
719 {
720         struct ib_uverbs_open_xrcd      cmd;
721         struct ib_uverbs_open_xrcd_resp resp;
722         struct ib_udata                 udata;
723         struct ib_uxrcd_object         *obj;
724         struct ib_xrcd                 *xrcd = NULL;
725         struct fd                       f = {NULL, 0};
726         struct inode                   *inode = NULL;
727         int                             ret = 0;
728         int                             new_xrcd = 0;
729
730         if (out_len < sizeof resp)
731                 return -ENOSPC;
732
733         if (copy_from_user(&cmd, buf, sizeof cmd))
734                 return -EFAULT;
735
736         INIT_UDATA(&udata, buf + sizeof cmd,
737                    (unsigned long) cmd.response + sizeof resp,
738                    in_len - sizeof cmd, out_len - sizeof  resp);
739
740         mutex_lock(&file->device->xrcd_tree_mutex);
741
742         if (cmd.fd != -1) {
743                 /* search for file descriptor */
744                 f = fdget(cmd.fd);
745                 if (!f.file) {
746                         ret = -EBADF;
747                         goto err_tree_mutex_unlock;
748                 }
749
750                 inode = file_inode(f.file);
751                 xrcd = find_xrcd(file->device, inode);
752                 if (!xrcd && !(cmd.oflags & O_CREAT)) {
753                         /* no file descriptor. Need CREATE flag */
754                         ret = -EAGAIN;
755                         goto err_tree_mutex_unlock;
756                 }
757
758                 if (xrcd && cmd.oflags & O_EXCL) {
759                         ret = -EINVAL;
760                         goto err_tree_mutex_unlock;
761                 }
762         }
763
764         obj = kmalloc(sizeof *obj, GFP_KERNEL);
765         if (!obj) {
766                 ret = -ENOMEM;
767                 goto err_tree_mutex_unlock;
768         }
769
770         init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
771
772         down_write(&obj->uobject.mutex);
773
774         if (!xrcd) {
775                 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev,
776                                                         file->ucontext, &udata);
777                 if (IS_ERR(xrcd)) {
778                         ret = PTR_ERR(xrcd);
779                         goto err;
780                 }
781
782                 xrcd->inode   = inode;
783                 xrcd->device  = file->device->ib_dev;
784                 atomic_set(&xrcd->usecnt, 0);
785                 mutex_init(&xrcd->tgt_qp_mutex);
786                 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
787                 new_xrcd = 1;
788         }
789
790         atomic_set(&obj->refcnt, 0);
791         obj->uobject.object = xrcd;
792         ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
793         if (ret)
794                 goto err_idr;
795
796         memset(&resp, 0, sizeof resp);
797         resp.xrcd_handle = obj->uobject.id;
798
799         if (inode) {
800                 if (new_xrcd) {
801                         /* create new inode/xrcd table entry */
802                         ret = xrcd_table_insert(file->device, inode, xrcd);
803                         if (ret)
804                                 goto err_insert_xrcd;
805                 }
806                 atomic_inc(&xrcd->usecnt);
807         }
808
809         if (copy_to_user((void __user *) (unsigned long) cmd.response,
810                          &resp, sizeof resp)) {
811                 ret = -EFAULT;
812                 goto err_copy;
813         }
814
815         if (f.file)
816                 fdput(f);
817
818         mutex_lock(&file->mutex);
819         list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
820         mutex_unlock(&file->mutex);
821
822         obj->uobject.live = 1;
823         up_write(&obj->uobject.mutex);
824
825         mutex_unlock(&file->device->xrcd_tree_mutex);
826         return in_len;
827
828 err_copy:
829         if (inode) {
830                 if (new_xrcd)
831                         xrcd_table_delete(file->device, inode);
832                 atomic_dec(&xrcd->usecnt);
833         }
834
835 err_insert_xrcd:
836         idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
837
838 err_idr:
839         ib_dealloc_xrcd(xrcd);
840
841 err:
842         put_uobj_write(&obj->uobject);
843
844 err_tree_mutex_unlock:
845         if (f.file)
846                 fdput(f);
847
848         mutex_unlock(&file->device->xrcd_tree_mutex);
849
850         return ret;
851 }
852
853 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
854                              const char __user *buf, int in_len,
855                              int out_len)
856 {
857         struct ib_uverbs_close_xrcd cmd;
858         struct ib_uobject           *uobj;
859         struct ib_xrcd              *xrcd = NULL;
860         struct inode                *inode = NULL;
861         struct ib_uxrcd_object      *obj;
862         int                         live;
863         int                         ret = 0;
864
865         if (copy_from_user(&cmd, buf, sizeof cmd))
866                 return -EFAULT;
867
868         mutex_lock(&file->device->xrcd_tree_mutex);
869         uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
870         if (!uobj) {
871                 ret = -EINVAL;
872                 goto out;
873         }
874
875         xrcd  = uobj->object;
876         inode = xrcd->inode;
877         obj   = container_of(uobj, struct ib_uxrcd_object, uobject);
878         if (atomic_read(&obj->refcnt)) {
879                 put_uobj_write(uobj);
880                 ret = -EBUSY;
881                 goto out;
882         }
883
884         if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
885                 ret = ib_dealloc_xrcd(uobj->object);
886                 if (!ret)
887                         uobj->live = 0;
888         }
889
890         live = uobj->live;
891         if (inode && ret)
892                 atomic_inc(&xrcd->usecnt);
893
894         put_uobj_write(uobj);
895
896         if (ret)
897                 goto out;
898
899         if (inode && !live)
900                 xrcd_table_delete(file->device, inode);
901
902         idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
903         mutex_lock(&file->mutex);
904         list_del(&uobj->list);
905         mutex_unlock(&file->mutex);
906
907         put_uobj(uobj);
908         ret = in_len;
909
910 out:
911         mutex_unlock(&file->device->xrcd_tree_mutex);
912         return ret;
913 }
914
915 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
916                             struct ib_xrcd *xrcd)
917 {
918         struct inode *inode;
919
920         inode = xrcd->inode;
921         if (inode && !atomic_dec_and_test(&xrcd->usecnt))
922                 return;
923
924         ib_dealloc_xrcd(xrcd);
925
926         if (inode)
927                 xrcd_table_delete(dev, inode);
928 }
929
930 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
931                          const char __user *buf, int in_len,
932                          int out_len)
933 {
934         struct ib_uverbs_reg_mr      cmd;
935         struct ib_uverbs_reg_mr_resp resp;
936         struct ib_udata              udata;
937         struct ib_uobject           *uobj;
938         struct ib_pd                *pd;
939         struct ib_mr                *mr;
940         int                          ret;
941
942         if (out_len < sizeof resp)
943                 return -ENOSPC;
944
945         if (copy_from_user(&cmd, buf, sizeof cmd))
946                 return -EFAULT;
947
948         INIT_UDATA(&udata, buf + sizeof cmd,
949                    (unsigned long) cmd.response + sizeof resp,
950                    in_len - sizeof cmd, out_len - sizeof resp);
951
952         if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
953                 return -EINVAL;
954
955         ret = ib_check_mr_access(cmd.access_flags);
956         if (ret)
957                 return ret;
958
959         uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
960         if (!uobj)
961                 return -ENOMEM;
962
963         init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
964         down_write(&uobj->mutex);
965
966         pd = idr_read_pd(cmd.pd_handle, file->ucontext);
967         if (!pd) {
968                 ret = -EINVAL;
969                 goto err_free;
970         }
971
972         if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
973                 struct ib_device_attr attr;
974
975                 ret = ib_query_device(pd->device, &attr);
976                 if (ret || !(attr.device_cap_flags &
977                                 IB_DEVICE_ON_DEMAND_PAGING)) {
978                         pr_debug("ODP support not available\n");
979                         ret = -EINVAL;
980                         goto err_put;
981                 }
982         }
983
984         mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
985                                      cmd.access_flags, &udata);
986         if (IS_ERR(mr)) {
987                 ret = PTR_ERR(mr);
988                 goto err_put;
989         }
990
991         mr->device  = pd->device;
992         mr->pd      = pd;
993         mr->uobject = uobj;
994         atomic_inc(&pd->usecnt);
995         atomic_set(&mr->usecnt, 0);
996
997         uobj->object = mr;
998         ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
999         if (ret)
1000                 goto err_unreg;
1001
1002         memset(&resp, 0, sizeof resp);
1003         resp.lkey      = mr->lkey;
1004         resp.rkey      = mr->rkey;
1005         resp.mr_handle = uobj->id;
1006
1007         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1008                          &resp, sizeof resp)) {
1009                 ret = -EFAULT;
1010                 goto err_copy;
1011         }
1012
1013         put_pd_read(pd);
1014
1015         mutex_lock(&file->mutex);
1016         list_add_tail(&uobj->list, &file->ucontext->mr_list);
1017         mutex_unlock(&file->mutex);
1018
1019         uobj->live = 1;
1020
1021         up_write(&uobj->mutex);
1022
1023         return in_len;
1024
1025 err_copy:
1026         idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1027
1028 err_unreg:
1029         ib_dereg_mr(mr);
1030
1031 err_put:
1032         put_pd_read(pd);
1033
1034 err_free:
1035         put_uobj_write(uobj);
1036         return ret;
1037 }
1038
1039 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
1040                            const char __user *buf, int in_len,
1041                            int out_len)
1042 {
1043         struct ib_uverbs_rereg_mr      cmd;
1044         struct ib_uverbs_rereg_mr_resp resp;
1045         struct ib_udata              udata;
1046         struct ib_pd                *pd = NULL;
1047         struct ib_mr                *mr;
1048         struct ib_pd                *old_pd;
1049         int                          ret;
1050         struct ib_uobject           *uobj;
1051
1052         if (out_len < sizeof(resp))
1053                 return -ENOSPC;
1054
1055         if (copy_from_user(&cmd, buf, sizeof(cmd)))
1056                 return -EFAULT;
1057
1058         INIT_UDATA(&udata, buf + sizeof(cmd),
1059                    (unsigned long) cmd.response + sizeof(resp),
1060                    in_len - sizeof(cmd), out_len - sizeof(resp));
1061
1062         if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
1063                 return -EINVAL;
1064
1065         if ((cmd.flags & IB_MR_REREG_TRANS) &&
1066             (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
1067              (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
1068                         return -EINVAL;
1069
1070         uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle,
1071                               file->ucontext);
1072
1073         if (!uobj)
1074                 return -EINVAL;
1075
1076         mr = uobj->object;
1077
1078         if (cmd.flags & IB_MR_REREG_ACCESS) {
1079                 ret = ib_check_mr_access(cmd.access_flags);
1080                 if (ret)
1081                         goto put_uobjs;
1082         }
1083
1084         if (cmd.flags & IB_MR_REREG_PD) {
1085                 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1086                 if (!pd) {
1087                         ret = -EINVAL;
1088                         goto put_uobjs;
1089                 }
1090         }
1091
1092         if (atomic_read(&mr->usecnt)) {
1093                 ret = -EBUSY;
1094                 goto put_uobj_pd;
1095         }
1096
1097         old_pd = mr->pd;
1098         ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
1099                                         cmd.length, cmd.hca_va,
1100                                         cmd.access_flags, pd, &udata);
1101         if (!ret) {
1102                 if (cmd.flags & IB_MR_REREG_PD) {
1103                         atomic_inc(&pd->usecnt);
1104                         mr->pd = pd;
1105                         atomic_dec(&old_pd->usecnt);
1106                 }
1107         } else {
1108                 goto put_uobj_pd;
1109         }
1110
1111         memset(&resp, 0, sizeof(resp));
1112         resp.lkey      = mr->lkey;
1113         resp.rkey      = mr->rkey;
1114
1115         if (copy_to_user((void __user *)(unsigned long)cmd.response,
1116                          &resp, sizeof(resp)))
1117                 ret = -EFAULT;
1118         else
1119                 ret = in_len;
1120
1121 put_uobj_pd:
1122         if (cmd.flags & IB_MR_REREG_PD)
1123                 put_pd_read(pd);
1124
1125 put_uobjs:
1126
1127         put_uobj_write(mr->uobject);
1128
1129         return ret;
1130 }
1131
1132 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1133                            const char __user *buf, int in_len,
1134                            int out_len)
1135 {
1136         struct ib_uverbs_dereg_mr cmd;
1137         struct ib_mr             *mr;
1138         struct ib_uobject        *uobj;
1139         int                       ret = -EINVAL;
1140
1141         if (copy_from_user(&cmd, buf, sizeof cmd))
1142                 return -EFAULT;
1143
1144         uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1145         if (!uobj)
1146                 return -EINVAL;
1147
1148         mr = uobj->object;
1149
1150         ret = ib_dereg_mr(mr);
1151         if (!ret)
1152                 uobj->live = 0;
1153
1154         put_uobj_write(uobj);
1155
1156         if (ret)
1157                 return ret;
1158
1159         idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1160
1161         mutex_lock(&file->mutex);
1162         list_del(&uobj->list);
1163         mutex_unlock(&file->mutex);
1164
1165         put_uobj(uobj);
1166
1167         return in_len;
1168 }
1169
1170 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1171                          const char __user *buf, int in_len,
1172                          int out_len)
1173 {
1174         struct ib_uverbs_alloc_mw      cmd;
1175         struct ib_uverbs_alloc_mw_resp resp;
1176         struct ib_uobject             *uobj;
1177         struct ib_pd                  *pd;
1178         struct ib_mw                  *mw;
1179         int                            ret;
1180
1181         if (out_len < sizeof(resp))
1182                 return -ENOSPC;
1183
1184         if (copy_from_user(&cmd, buf, sizeof(cmd)))
1185                 return -EFAULT;
1186
1187         uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1188         if (!uobj)
1189                 return -ENOMEM;
1190
1191         init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1192         down_write(&uobj->mutex);
1193
1194         pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1195         if (!pd) {
1196                 ret = -EINVAL;
1197                 goto err_free;
1198         }
1199
1200         mw = pd->device->alloc_mw(pd, cmd.mw_type);
1201         if (IS_ERR(mw)) {
1202                 ret = PTR_ERR(mw);
1203                 goto err_put;
1204         }
1205
1206         mw->device  = pd->device;
1207         mw->pd      = pd;
1208         mw->uobject = uobj;
1209         atomic_inc(&pd->usecnt);
1210
1211         uobj->object = mw;
1212         ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1213         if (ret)
1214                 goto err_unalloc;
1215
1216         memset(&resp, 0, sizeof(resp));
1217         resp.rkey      = mw->rkey;
1218         resp.mw_handle = uobj->id;
1219
1220         if (copy_to_user((void __user *)(unsigned long)cmd.response,
1221                          &resp, sizeof(resp))) {
1222                 ret = -EFAULT;
1223                 goto err_copy;
1224         }
1225
1226         put_pd_read(pd);
1227
1228         mutex_lock(&file->mutex);
1229         list_add_tail(&uobj->list, &file->ucontext->mw_list);
1230         mutex_unlock(&file->mutex);
1231
1232         uobj->live = 1;
1233
1234         up_write(&uobj->mutex);
1235
1236         return in_len;
1237
1238 err_copy:
1239         idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1240
1241 err_unalloc:
1242         ib_dealloc_mw(mw);
1243
1244 err_put:
1245         put_pd_read(pd);
1246
1247 err_free:
1248         put_uobj_write(uobj);
1249         return ret;
1250 }
1251
1252 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1253                            const char __user *buf, int in_len,
1254                            int out_len)
1255 {
1256         struct ib_uverbs_dealloc_mw cmd;
1257         struct ib_mw               *mw;
1258         struct ib_uobject          *uobj;
1259         int                         ret = -EINVAL;
1260
1261         if (copy_from_user(&cmd, buf, sizeof(cmd)))
1262                 return -EFAULT;
1263
1264         uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1265         if (!uobj)
1266                 return -EINVAL;
1267
1268         mw = uobj->object;
1269
1270         ret = ib_dealloc_mw(mw);
1271         if (!ret)
1272                 uobj->live = 0;
1273
1274         put_uobj_write(uobj);
1275
1276         if (ret)
1277                 return ret;
1278
1279         idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1280
1281         mutex_lock(&file->mutex);
1282         list_del(&uobj->list);
1283         mutex_unlock(&file->mutex);
1284
1285         put_uobj(uobj);
1286
1287         return in_len;
1288 }
1289
1290 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1291                                       const char __user *buf, int in_len,
1292                                       int out_len)
1293 {
1294         struct ib_uverbs_create_comp_channel       cmd;
1295         struct ib_uverbs_create_comp_channel_resp  resp;
1296         struct file                               *filp;
1297         int ret;
1298
1299         if (out_len < sizeof resp)
1300                 return -ENOSPC;
1301
1302         if (copy_from_user(&cmd, buf, sizeof cmd))
1303                 return -EFAULT;
1304
1305         ret = get_unused_fd_flags(O_CLOEXEC);
1306         if (ret < 0)
1307                 return ret;
1308         resp.fd = ret;
1309
1310         filp = ib_uverbs_alloc_event_file(file, 0);
1311         if (IS_ERR(filp)) {
1312                 put_unused_fd(resp.fd);
1313                 return PTR_ERR(filp);
1314         }
1315
1316         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1317                          &resp, sizeof resp)) {
1318                 put_unused_fd(resp.fd);
1319                 fput(filp);
1320                 return -EFAULT;
1321         }
1322
1323         fd_install(resp.fd, filp);
1324         return in_len;
1325 }
1326
1327 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1328                             const char __user *buf, int in_len,
1329                             int out_len)
1330 {
1331         struct ib_uverbs_create_cq      cmd;
1332         struct ib_uverbs_create_cq_resp resp;
1333         struct ib_udata                 udata;
1334         struct ib_ucq_object           *obj;
1335         struct ib_uverbs_event_file    *ev_file = NULL;
1336         struct ib_cq                   *cq;
1337         int                             ret;
1338
1339         if (out_len < sizeof resp)
1340                 return -ENOSPC;
1341
1342         if (copy_from_user(&cmd, buf, sizeof cmd))
1343                 return -EFAULT;
1344
1345         INIT_UDATA(&udata, buf + sizeof cmd,
1346                    (unsigned long) cmd.response + sizeof resp,
1347                    in_len - sizeof cmd, out_len - sizeof resp);
1348
1349         if (cmd.comp_vector >= file->device->num_comp_vectors)
1350                 return -EINVAL;
1351
1352         obj = kmalloc(sizeof *obj, GFP_KERNEL);
1353         if (!obj)
1354                 return -ENOMEM;
1355
1356         init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class);
1357         down_write(&obj->uobject.mutex);
1358
1359         if (cmd.comp_channel >= 0) {
1360                 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
1361                 if (!ev_file) {
1362                         ret = -EINVAL;
1363                         goto err;
1364                 }
1365         }
1366
1367         obj->uverbs_file           = file;
1368         obj->comp_events_reported  = 0;
1369         obj->async_events_reported = 0;
1370         INIT_LIST_HEAD(&obj->comp_list);
1371         INIT_LIST_HEAD(&obj->async_list);
1372
1373         cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
1374                                              cmd.comp_vector,
1375                                              file->ucontext, &udata);
1376         if (IS_ERR(cq)) {
1377                 ret = PTR_ERR(cq);
1378                 goto err_file;
1379         }
1380
1381         cq->device        = file->device->ib_dev;
1382         cq->uobject       = &obj->uobject;
1383         cq->comp_handler  = ib_uverbs_comp_handler;
1384         cq->event_handler = ib_uverbs_cq_event_handler;
1385         cq->cq_context    = ev_file;
1386         atomic_set(&cq->usecnt, 0);
1387
1388         obj->uobject.object = cq;
1389         ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1390         if (ret)
1391                 goto err_free;
1392
1393         memset(&resp, 0, sizeof resp);
1394         resp.cq_handle = obj->uobject.id;
1395         resp.cqe       = cq->cqe;
1396
1397         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1398                          &resp, sizeof resp)) {
1399                 ret = -EFAULT;
1400                 goto err_copy;
1401         }
1402
1403         mutex_lock(&file->mutex);
1404         list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1405         mutex_unlock(&file->mutex);
1406
1407         obj->uobject.live = 1;
1408
1409         up_write(&obj->uobject.mutex);
1410
1411         return in_len;
1412
1413 err_copy:
1414         idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1415
1416 err_free:
1417         ib_destroy_cq(cq);
1418
1419 err_file:
1420         if (ev_file)
1421                 ib_uverbs_release_ucq(file, ev_file, obj);
1422
1423 err:
1424         put_uobj_write(&obj->uobject);
1425         return ret;
1426 }
1427
1428 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1429                             const char __user *buf, int in_len,
1430                             int out_len)
1431 {
1432         struct ib_uverbs_resize_cq      cmd;
1433         struct ib_uverbs_resize_cq_resp resp;
1434         struct ib_udata                 udata;
1435         struct ib_cq                    *cq;
1436         int                             ret = -EINVAL;
1437
1438         if (copy_from_user(&cmd, buf, sizeof cmd))
1439                 return -EFAULT;
1440
1441         INIT_UDATA(&udata, buf + sizeof cmd,
1442                    (unsigned long) cmd.response + sizeof resp,
1443                    in_len - sizeof cmd, out_len - sizeof resp);
1444
1445         cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1446         if (!cq)
1447                 return -EINVAL;
1448
1449         ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1450         if (ret)
1451                 goto out;
1452
1453         resp.cqe = cq->cqe;
1454
1455         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1456                          &resp, sizeof resp.cqe))
1457                 ret = -EFAULT;
1458
1459 out:
1460         put_cq_read(cq);
1461
1462         return ret ? ret : in_len;
1463 }
1464
1465 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1466 {
1467         struct ib_uverbs_wc tmp;
1468
1469         tmp.wr_id               = wc->wr_id;
1470         tmp.status              = wc->status;
1471         tmp.opcode              = wc->opcode;
1472         tmp.vendor_err          = wc->vendor_err;
1473         tmp.byte_len            = wc->byte_len;
1474         tmp.ex.imm_data         = (__u32 __force) wc->ex.imm_data;
1475         tmp.qp_num              = wc->qp->qp_num;
1476         tmp.src_qp              = wc->src_qp;
1477         tmp.wc_flags            = wc->wc_flags;
1478         tmp.pkey_index          = wc->pkey_index;
1479         tmp.slid                = wc->slid;
1480         tmp.sl                  = wc->sl;
1481         tmp.dlid_path_bits      = wc->dlid_path_bits;
1482         tmp.port_num            = wc->port_num;
1483         tmp.reserved            = 0;
1484
1485         if (copy_to_user(dest, &tmp, sizeof tmp))
1486                 return -EFAULT;
1487
1488         return 0;
1489 }
1490
1491 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1492                           const char __user *buf, int in_len,
1493                           int out_len)
1494 {
1495         struct ib_uverbs_poll_cq       cmd;
1496         struct ib_uverbs_poll_cq_resp  resp;
1497         u8 __user                     *header_ptr;
1498         u8 __user                     *data_ptr;
1499         struct ib_cq                  *cq;
1500         struct ib_wc                   wc;
1501         int                            ret;
1502
1503         if (copy_from_user(&cmd, buf, sizeof cmd))
1504                 return -EFAULT;
1505
1506         cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1507         if (!cq)
1508                 return -EINVAL;
1509
1510         /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1511         header_ptr = (void __user *)(unsigned long) cmd.response;
1512         data_ptr = header_ptr + sizeof resp;
1513
1514         memset(&resp, 0, sizeof resp);
1515         while (resp.count < cmd.ne) {
1516                 ret = ib_poll_cq(cq, 1, &wc);
1517                 if (ret < 0)
1518                         goto out_put;
1519                 if (!ret)
1520                         break;
1521
1522                 ret = copy_wc_to_user(data_ptr, &wc);
1523                 if (ret)
1524                         goto out_put;
1525
1526                 data_ptr += sizeof(struct ib_uverbs_wc);
1527                 ++resp.count;
1528         }
1529
1530         if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1531                 ret = -EFAULT;
1532                 goto out_put;
1533         }
1534
1535         ret = in_len;
1536
1537 out_put:
1538         put_cq_read(cq);
1539         return ret;
1540 }
1541
1542 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1543                                 const char __user *buf, int in_len,
1544                                 int out_len)
1545 {
1546         struct ib_uverbs_req_notify_cq cmd;
1547         struct ib_cq                  *cq;
1548
1549         if (copy_from_user(&cmd, buf, sizeof cmd))
1550                 return -EFAULT;
1551
1552         cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1553         if (!cq)
1554                 return -EINVAL;
1555
1556         ib_req_notify_cq(cq, cmd.solicited_only ?
1557                          IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1558
1559         put_cq_read(cq);
1560
1561         return in_len;
1562 }
1563
1564 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1565                              const char __user *buf, int in_len,
1566                              int out_len)
1567 {
1568         struct ib_uverbs_destroy_cq      cmd;
1569         struct ib_uverbs_destroy_cq_resp resp;
1570         struct ib_uobject               *uobj;
1571         struct ib_cq                    *cq;
1572         struct ib_ucq_object            *obj;
1573         struct ib_uverbs_event_file     *ev_file;
1574         int                              ret = -EINVAL;
1575
1576         if (copy_from_user(&cmd, buf, sizeof cmd))
1577                 return -EFAULT;
1578
1579         uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1580         if (!uobj)
1581                 return -EINVAL;
1582         cq      = uobj->object;
1583         ev_file = cq->cq_context;
1584         obj     = container_of(cq->uobject, struct ib_ucq_object, uobject);
1585
1586         ret = ib_destroy_cq(cq);
1587         if (!ret)
1588                 uobj->live = 0;
1589
1590         put_uobj_write(uobj);
1591
1592         if (ret)
1593                 return ret;
1594
1595         idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1596
1597         mutex_lock(&file->mutex);
1598         list_del(&uobj->list);
1599         mutex_unlock(&file->mutex);
1600
1601         ib_uverbs_release_ucq(file, ev_file, obj);
1602
1603         memset(&resp, 0, sizeof resp);
1604         resp.comp_events_reported  = obj->comp_events_reported;
1605         resp.async_events_reported = obj->async_events_reported;
1606
1607         put_uobj(uobj);
1608
1609         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1610                          &resp, sizeof resp))
1611                 return -EFAULT;
1612
1613         return in_len;
1614 }
1615
1616 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1617                             const char __user *buf, int in_len,
1618                             int out_len)
1619 {
1620         struct ib_uverbs_create_qp      cmd;
1621         struct ib_uverbs_create_qp_resp resp;
1622         struct ib_udata                 udata;
1623         struct ib_uqp_object           *obj;
1624         struct ib_device               *device;
1625         struct ib_pd                   *pd = NULL;
1626         struct ib_xrcd                 *xrcd = NULL;
1627         struct ib_uobject              *uninitialized_var(xrcd_uobj);
1628         struct ib_cq                   *scq = NULL, *rcq = NULL;
1629         struct ib_srq                  *srq = NULL;
1630         struct ib_qp                   *qp;
1631         struct ib_qp_init_attr          attr;
1632         int ret;
1633
1634         if (out_len < sizeof resp)
1635                 return -ENOSPC;
1636
1637         if (copy_from_user(&cmd, buf, sizeof cmd))
1638                 return -EFAULT;
1639
1640         if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1641                 return -EPERM;
1642
1643         INIT_UDATA(&udata, buf + sizeof cmd,
1644                    (unsigned long) cmd.response + sizeof resp,
1645                    in_len - sizeof cmd, out_len - sizeof resp);
1646
1647         obj = kzalloc(sizeof *obj, GFP_KERNEL);
1648         if (!obj)
1649                 return -ENOMEM;
1650
1651         init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1652         down_write(&obj->uevent.uobject.mutex);
1653
1654         if (cmd.qp_type == IB_QPT_XRC_TGT) {
1655                 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1656                 if (!xrcd) {
1657                         ret = -EINVAL;
1658                         goto err_put;
1659                 }
1660                 device = xrcd->device;
1661         } else {
1662                 if (cmd.qp_type == IB_QPT_XRC_INI) {
1663                         cmd.max_recv_wr = cmd.max_recv_sge = 0;
1664                 } else {
1665                         if (cmd.is_srq) {
1666                                 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1667                                 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1668                                         ret = -EINVAL;
1669                                         goto err_put;
1670                                 }
1671                         }
1672
1673                         if (cmd.recv_cq_handle != cmd.send_cq_handle) {
1674                                 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
1675                                 if (!rcq) {
1676                                         ret = -EINVAL;
1677                                         goto err_put;
1678                                 }
1679                         }
1680                 }
1681
1682                 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
1683                 rcq = rcq ?: scq;
1684                 pd  = idr_read_pd(cmd.pd_handle, file->ucontext);
1685                 if (!pd || !scq) {
1686                         ret = -EINVAL;
1687                         goto err_put;
1688                 }
1689
1690                 device = pd->device;
1691         }
1692
1693         attr.event_handler = ib_uverbs_qp_event_handler;
1694         attr.qp_context    = file;
1695         attr.send_cq       = scq;
1696         attr.recv_cq       = rcq;
1697         attr.srq           = srq;
1698         attr.xrcd          = xrcd;
1699         attr.sq_sig_type   = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1700         attr.qp_type       = cmd.qp_type;
1701         attr.create_flags  = 0;
1702
1703         attr.cap.max_send_wr     = cmd.max_send_wr;
1704         attr.cap.max_recv_wr     = cmd.max_recv_wr;
1705         attr.cap.max_send_sge    = cmd.max_send_sge;
1706         attr.cap.max_recv_sge    = cmd.max_recv_sge;
1707         attr.cap.max_inline_data = cmd.max_inline_data;
1708
1709         obj->uevent.events_reported     = 0;
1710         INIT_LIST_HEAD(&obj->uevent.event_list);
1711         INIT_LIST_HEAD(&obj->mcast_list);
1712
1713         if (cmd.qp_type == IB_QPT_XRC_TGT)
1714                 qp = ib_create_qp(pd, &attr);
1715         else
1716                 qp = device->create_qp(pd, &attr, &udata);
1717
1718         if (IS_ERR(qp)) {
1719                 ret = PTR_ERR(qp);
1720                 goto err_put;
1721         }
1722
1723         if (cmd.qp_type != IB_QPT_XRC_TGT) {
1724                 qp->real_qp       = qp;
1725                 qp->device        = device;
1726                 qp->pd            = pd;
1727                 qp->send_cq       = attr.send_cq;
1728                 qp->recv_cq       = attr.recv_cq;
1729                 qp->srq           = attr.srq;
1730                 qp->event_handler = attr.event_handler;
1731                 qp->qp_context    = attr.qp_context;
1732                 qp->qp_type       = attr.qp_type;
1733                 atomic_set(&qp->usecnt, 0);
1734                 atomic_inc(&pd->usecnt);
1735                 atomic_inc(&attr.send_cq->usecnt);
1736                 if (attr.recv_cq)
1737                         atomic_inc(&attr.recv_cq->usecnt);
1738                 if (attr.srq)
1739                         atomic_inc(&attr.srq->usecnt);
1740         }
1741         qp->uobject = &obj->uevent.uobject;
1742
1743         obj->uevent.uobject.object = qp;
1744         ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1745         if (ret)
1746                 goto err_destroy;
1747
1748         memset(&resp, 0, sizeof resp);
1749         resp.qpn             = qp->qp_num;
1750         resp.qp_handle       = obj->uevent.uobject.id;
1751         resp.max_recv_sge    = attr.cap.max_recv_sge;
1752         resp.max_send_sge    = attr.cap.max_send_sge;
1753         resp.max_recv_wr     = attr.cap.max_recv_wr;
1754         resp.max_send_wr     = attr.cap.max_send_wr;
1755         resp.max_inline_data = attr.cap.max_inline_data;
1756
1757         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1758                          &resp, sizeof resp)) {
1759                 ret = -EFAULT;
1760                 goto err_copy;
1761         }
1762
1763         if (xrcd) {
1764                 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1765                                           uobject);
1766                 atomic_inc(&obj->uxrcd->refcnt);
1767                 put_xrcd_read(xrcd_uobj);
1768         }
1769
1770         if (pd)
1771                 put_pd_read(pd);
1772         if (scq)
1773                 put_cq_read(scq);
1774         if (rcq && rcq != scq)
1775                 put_cq_read(rcq);
1776         if (srq)
1777                 put_srq_read(srq);
1778
1779         mutex_lock(&file->mutex);
1780         list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1781         mutex_unlock(&file->mutex);
1782
1783         obj->uevent.uobject.live = 1;
1784
1785         up_write(&obj->uevent.uobject.mutex);
1786
1787         return in_len;
1788
1789 err_copy:
1790         idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1791
1792 err_destroy:
1793         ib_destroy_qp(qp);
1794
1795 err_put:
1796         if (xrcd)
1797                 put_xrcd_read(xrcd_uobj);
1798         if (pd)
1799                 put_pd_read(pd);
1800         if (scq)
1801                 put_cq_read(scq);
1802         if (rcq && rcq != scq)
1803                 put_cq_read(rcq);
1804         if (srq)
1805                 put_srq_read(srq);
1806
1807         put_uobj_write(&obj->uevent.uobject);
1808         return ret;
1809 }
1810
1811 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1812                           const char __user *buf, int in_len, int out_len)
1813 {
1814         struct ib_uverbs_open_qp        cmd;
1815         struct ib_uverbs_create_qp_resp resp;
1816         struct ib_udata                 udata;
1817         struct ib_uqp_object           *obj;
1818         struct ib_xrcd                 *xrcd;
1819         struct ib_uobject              *uninitialized_var(xrcd_uobj);
1820         struct ib_qp                   *qp;
1821         struct ib_qp_open_attr          attr;
1822         int ret;
1823
1824         if (out_len < sizeof resp)
1825                 return -ENOSPC;
1826
1827         if (copy_from_user(&cmd, buf, sizeof cmd))
1828                 return -EFAULT;
1829
1830         INIT_UDATA(&udata, buf + sizeof cmd,
1831                    (unsigned long) cmd.response + sizeof resp,
1832                    in_len - sizeof cmd, out_len - sizeof resp);
1833
1834         obj = kmalloc(sizeof *obj, GFP_KERNEL);
1835         if (!obj)
1836                 return -ENOMEM;
1837
1838         init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1839         down_write(&obj->uevent.uobject.mutex);
1840
1841         xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1842         if (!xrcd) {
1843                 ret = -EINVAL;
1844                 goto err_put;
1845         }
1846
1847         attr.event_handler = ib_uverbs_qp_event_handler;
1848         attr.qp_context    = file;
1849         attr.qp_num        = cmd.qpn;
1850         attr.qp_type       = cmd.qp_type;
1851
1852         obj->uevent.events_reported = 0;
1853         INIT_LIST_HEAD(&obj->uevent.event_list);
1854         INIT_LIST_HEAD(&obj->mcast_list);
1855
1856         qp = ib_open_qp(xrcd, &attr);
1857         if (IS_ERR(qp)) {
1858                 ret = PTR_ERR(qp);
1859                 goto err_put;
1860         }
1861
1862         qp->uobject = &obj->uevent.uobject;
1863
1864         obj->uevent.uobject.object = qp;
1865         ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1866         if (ret)
1867                 goto err_destroy;
1868
1869         memset(&resp, 0, sizeof resp);
1870         resp.qpn       = qp->qp_num;
1871         resp.qp_handle = obj->uevent.uobject.id;
1872
1873         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1874                          &resp, sizeof resp)) {
1875                 ret = -EFAULT;
1876                 goto err_remove;
1877         }
1878
1879         obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1880         atomic_inc(&obj->uxrcd->refcnt);
1881         put_xrcd_read(xrcd_uobj);
1882
1883         mutex_lock(&file->mutex);
1884         list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1885         mutex_unlock(&file->mutex);
1886
1887         obj->uevent.uobject.live = 1;
1888
1889         up_write(&obj->uevent.uobject.mutex);
1890
1891         return in_len;
1892
1893 err_remove:
1894         idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1895
1896 err_destroy:
1897         ib_destroy_qp(qp);
1898
1899 err_put:
1900         put_xrcd_read(xrcd_uobj);
1901         put_uobj_write(&obj->uevent.uobject);
1902         return ret;
1903 }
1904
1905 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1906                            const char __user *buf, int in_len,
1907                            int out_len)
1908 {
1909         struct ib_uverbs_query_qp      cmd;
1910         struct ib_uverbs_query_qp_resp resp;
1911         struct ib_qp                   *qp;
1912         struct ib_qp_attr              *attr;
1913         struct ib_qp_init_attr         *init_attr;
1914         int                            ret;
1915
1916         if (copy_from_user(&cmd, buf, sizeof cmd))
1917                 return -EFAULT;
1918
1919         attr      = kmalloc(sizeof *attr, GFP_KERNEL);
1920         init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1921         if (!attr || !init_attr) {
1922                 ret = -ENOMEM;
1923                 goto out;
1924         }
1925
1926         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1927         if (!qp) {
1928                 ret = -EINVAL;
1929                 goto out;
1930         }
1931
1932         ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1933
1934         put_qp_read(qp);
1935
1936         if (ret)
1937                 goto out;
1938
1939         memset(&resp, 0, sizeof resp);
1940
1941         resp.qp_state               = attr->qp_state;
1942         resp.cur_qp_state           = attr->cur_qp_state;
1943         resp.path_mtu               = attr->path_mtu;
1944         resp.path_mig_state         = attr->path_mig_state;
1945         resp.qkey                   = attr->qkey;
1946         resp.rq_psn                 = attr->rq_psn;
1947         resp.sq_psn                 = attr->sq_psn;
1948         resp.dest_qp_num            = attr->dest_qp_num;
1949         resp.qp_access_flags        = attr->qp_access_flags;
1950         resp.pkey_index             = attr->pkey_index;
1951         resp.alt_pkey_index         = attr->alt_pkey_index;
1952         resp.sq_draining            = attr->sq_draining;
1953         resp.max_rd_atomic          = attr->max_rd_atomic;
1954         resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
1955         resp.min_rnr_timer          = attr->min_rnr_timer;
1956         resp.port_num               = attr->port_num;
1957         resp.timeout                = attr->timeout;
1958         resp.retry_cnt              = attr->retry_cnt;
1959         resp.rnr_retry              = attr->rnr_retry;
1960         resp.alt_port_num           = attr->alt_port_num;
1961         resp.alt_timeout            = attr->alt_timeout;
1962
1963         memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1964         resp.dest.flow_label        = attr->ah_attr.grh.flow_label;
1965         resp.dest.sgid_index        = attr->ah_attr.grh.sgid_index;
1966         resp.dest.hop_limit         = attr->ah_attr.grh.hop_limit;
1967         resp.dest.traffic_class     = attr->ah_attr.grh.traffic_class;
1968         resp.dest.dlid              = attr->ah_attr.dlid;
1969         resp.dest.sl                = attr->ah_attr.sl;
1970         resp.dest.src_path_bits     = attr->ah_attr.src_path_bits;
1971         resp.dest.static_rate       = attr->ah_attr.static_rate;
1972         resp.dest.is_global         = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1973         resp.dest.port_num          = attr->ah_attr.port_num;
1974
1975         memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1976         resp.alt_dest.flow_label    = attr->alt_ah_attr.grh.flow_label;
1977         resp.alt_dest.sgid_index    = attr->alt_ah_attr.grh.sgid_index;
1978         resp.alt_dest.hop_limit     = attr->alt_ah_attr.grh.hop_limit;
1979         resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1980         resp.alt_dest.dlid          = attr->alt_ah_attr.dlid;
1981         resp.alt_dest.sl            = attr->alt_ah_attr.sl;
1982         resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1983         resp.alt_dest.static_rate   = attr->alt_ah_attr.static_rate;
1984         resp.alt_dest.is_global     = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1985         resp.alt_dest.port_num      = attr->alt_ah_attr.port_num;
1986
1987         resp.max_send_wr            = init_attr->cap.max_send_wr;
1988         resp.max_recv_wr            = init_attr->cap.max_recv_wr;
1989         resp.max_send_sge           = init_attr->cap.max_send_sge;
1990         resp.max_recv_sge           = init_attr->cap.max_recv_sge;
1991         resp.max_inline_data        = init_attr->cap.max_inline_data;
1992         resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1993
1994         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1995                          &resp, sizeof resp))
1996                 ret = -EFAULT;
1997
1998 out:
1999         kfree(attr);
2000         kfree(init_attr);
2001
2002         return ret ? ret : in_len;
2003 }
2004
2005 /* Remove ignored fields set in the attribute mask */
2006 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
2007 {
2008         switch (qp_type) {
2009         case IB_QPT_XRC_INI:
2010                 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
2011         case IB_QPT_XRC_TGT:
2012                 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
2013                                 IB_QP_RNR_RETRY);
2014         default:
2015                 return mask;
2016         }
2017 }
2018
2019 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2020                             const char __user *buf, int in_len,
2021                             int out_len)
2022 {
2023         struct ib_uverbs_modify_qp cmd;
2024         struct ib_udata            udata;
2025         struct ib_qp              *qp;
2026         struct ib_qp_attr         *attr;
2027         int                        ret;
2028
2029         if (copy_from_user(&cmd, buf, sizeof cmd))
2030                 return -EFAULT;
2031
2032         INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2033                    out_len);
2034
2035         attr = kmalloc(sizeof *attr, GFP_KERNEL);
2036         if (!attr)
2037                 return -ENOMEM;
2038
2039         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2040         if (!qp) {
2041                 ret = -EINVAL;
2042                 goto out;
2043         }
2044
2045         attr->qp_state            = cmd.qp_state;
2046         attr->cur_qp_state        = cmd.cur_qp_state;
2047         attr->path_mtu            = cmd.path_mtu;
2048         attr->path_mig_state      = cmd.path_mig_state;
2049         attr->qkey                = cmd.qkey;
2050         attr->rq_psn              = cmd.rq_psn;
2051         attr->sq_psn              = cmd.sq_psn;
2052         attr->dest_qp_num         = cmd.dest_qp_num;
2053         attr->qp_access_flags     = cmd.qp_access_flags;
2054         attr->pkey_index          = cmd.pkey_index;
2055         attr->alt_pkey_index      = cmd.alt_pkey_index;
2056         attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
2057         attr->max_rd_atomic       = cmd.max_rd_atomic;
2058         attr->max_dest_rd_atomic  = cmd.max_dest_rd_atomic;
2059         attr->min_rnr_timer       = cmd.min_rnr_timer;
2060         attr->port_num            = cmd.port_num;
2061         attr->timeout             = cmd.timeout;
2062         attr->retry_cnt           = cmd.retry_cnt;
2063         attr->rnr_retry           = cmd.rnr_retry;
2064         attr->alt_port_num        = cmd.alt_port_num;
2065         attr->alt_timeout         = cmd.alt_timeout;
2066
2067         memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
2068         attr->ah_attr.grh.flow_label        = cmd.dest.flow_label;
2069         attr->ah_attr.grh.sgid_index        = cmd.dest.sgid_index;
2070         attr->ah_attr.grh.hop_limit         = cmd.dest.hop_limit;
2071         attr->ah_attr.grh.traffic_class     = cmd.dest.traffic_class;
2072         attr->ah_attr.dlid                  = cmd.dest.dlid;
2073         attr->ah_attr.sl                    = cmd.dest.sl;
2074         attr->ah_attr.src_path_bits         = cmd.dest.src_path_bits;
2075         attr->ah_attr.static_rate           = cmd.dest.static_rate;
2076         attr->ah_attr.ah_flags              = cmd.dest.is_global ? IB_AH_GRH : 0;
2077         attr->ah_attr.port_num              = cmd.dest.port_num;
2078
2079         memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
2080         attr->alt_ah_attr.grh.flow_label    = cmd.alt_dest.flow_label;
2081         attr->alt_ah_attr.grh.sgid_index    = cmd.alt_dest.sgid_index;
2082         attr->alt_ah_attr.grh.hop_limit     = cmd.alt_dest.hop_limit;
2083         attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
2084         attr->alt_ah_attr.dlid              = cmd.alt_dest.dlid;
2085         attr->alt_ah_attr.sl                = cmd.alt_dest.sl;
2086         attr->alt_ah_attr.src_path_bits     = cmd.alt_dest.src_path_bits;
2087         attr->alt_ah_attr.static_rate       = cmd.alt_dest.static_rate;
2088         attr->alt_ah_attr.ah_flags          = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
2089         attr->alt_ah_attr.port_num          = cmd.alt_dest.port_num;
2090
2091         if (qp->real_qp == qp) {
2092                 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
2093                 if (ret)
2094                         goto out;
2095                 ret = qp->device->modify_qp(qp, attr,
2096                         modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
2097         } else {
2098                 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
2099         }
2100
2101         put_qp_read(qp);
2102
2103         if (ret)
2104                 goto out;
2105
2106         ret = in_len;
2107
2108 out:
2109         kfree(attr);
2110
2111         return ret;
2112 }
2113
2114 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2115                              const char __user *buf, int in_len,
2116                              int out_len)
2117 {
2118         struct ib_uverbs_destroy_qp      cmd;
2119         struct ib_uverbs_destroy_qp_resp resp;
2120         struct ib_uobject               *uobj;
2121         struct ib_qp                    *qp;
2122         struct ib_uqp_object            *obj;
2123         int                              ret = -EINVAL;
2124
2125         if (copy_from_user(&cmd, buf, sizeof cmd))
2126                 return -EFAULT;
2127
2128         memset(&resp, 0, sizeof resp);
2129
2130         uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2131         if (!uobj)
2132                 return -EINVAL;
2133         qp  = uobj->object;
2134         obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2135
2136         if (!list_empty(&obj->mcast_list)) {
2137                 put_uobj_write(uobj);
2138                 return -EBUSY;
2139         }
2140
2141         ret = ib_destroy_qp(qp);
2142         if (!ret)
2143                 uobj->live = 0;
2144
2145         put_uobj_write(uobj);
2146
2147         if (ret)
2148                 return ret;
2149
2150         if (obj->uxrcd)
2151                 atomic_dec(&obj->uxrcd->refcnt);
2152
2153         idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2154
2155         mutex_lock(&file->mutex);
2156         list_del(&uobj->list);
2157         mutex_unlock(&file->mutex);
2158
2159         ib_uverbs_release_uevent(file, &obj->uevent);
2160
2161         resp.events_reported = obj->uevent.events_reported;
2162
2163         put_uobj(uobj);
2164
2165         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2166                          &resp, sizeof resp))
2167                 return -EFAULT;
2168
2169         return in_len;
2170 }
2171
2172 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2173                             const char __user *buf, int in_len,
2174                             int out_len)
2175 {
2176         struct ib_uverbs_post_send      cmd;
2177         struct ib_uverbs_post_send_resp resp;
2178         struct ib_uverbs_send_wr       *user_wr;
2179         struct ib_send_wr              *wr = NULL, *last, *next, *bad_wr;
2180         struct ib_qp                   *qp;
2181         int                             i, sg_ind;
2182         int                             is_ud;
2183         ssize_t                         ret = -EINVAL;
2184
2185         if (copy_from_user(&cmd, buf, sizeof cmd))
2186                 return -EFAULT;
2187
2188         if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2189             cmd.sge_count * sizeof (struct ib_uverbs_sge))
2190                 return -EINVAL;
2191
2192         if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2193                 return -EINVAL;
2194
2195         user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2196         if (!user_wr)
2197                 return -ENOMEM;
2198
2199         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2200         if (!qp)
2201                 goto out;
2202
2203         is_ud = qp->qp_type == IB_QPT_UD;
2204         sg_ind = 0;
2205         last = NULL;
2206         for (i = 0; i < cmd.wr_count; ++i) {
2207                 if (copy_from_user(user_wr,
2208                                    buf + sizeof cmd + i * cmd.wqe_size,
2209                                    cmd.wqe_size)) {
2210                         ret = -EFAULT;
2211                         goto out_put;
2212                 }
2213
2214                 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2215                         ret = -EINVAL;
2216                         goto out_put;
2217                 }
2218
2219                 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2220                                user_wr->num_sge * sizeof (struct ib_sge),
2221                                GFP_KERNEL);
2222                 if (!next) {
2223                         ret = -ENOMEM;
2224                         goto out_put;
2225                 }
2226
2227                 if (!last)
2228                         wr = next;
2229                 else
2230                         last->next = next;
2231                 last = next;
2232
2233                 next->next       = NULL;
2234                 next->wr_id      = user_wr->wr_id;
2235                 next->num_sge    = user_wr->num_sge;
2236                 next->opcode     = user_wr->opcode;
2237                 next->send_flags = user_wr->send_flags;
2238
2239                 if (is_ud) {
2240                         next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
2241                                                      file->ucontext);
2242                         if (!next->wr.ud.ah) {
2243                                 ret = -EINVAL;
2244                                 goto out_put;
2245                         }
2246                         next->wr.ud.remote_qpn  = user_wr->wr.ud.remote_qpn;
2247                         next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
2248                         if (next->opcode == IB_WR_SEND_WITH_IMM)
2249                                 next->ex.imm_data =
2250                                         (__be32 __force) user_wr->ex.imm_data;
2251                 } else {
2252                         switch (next->opcode) {
2253                         case IB_WR_RDMA_WRITE_WITH_IMM:
2254                                 next->ex.imm_data =
2255                                         (__be32 __force) user_wr->ex.imm_data;
2256                         case IB_WR_RDMA_WRITE:
2257                         case IB_WR_RDMA_READ:
2258                                 next->wr.rdma.remote_addr =
2259                                         user_wr->wr.rdma.remote_addr;
2260                                 next->wr.rdma.rkey        =
2261                                         user_wr->wr.rdma.rkey;
2262                                 break;
2263                         case IB_WR_SEND_WITH_IMM:
2264                                 next->ex.imm_data =
2265                                         (__be32 __force) user_wr->ex.imm_data;
2266                                 break;
2267                         case IB_WR_SEND_WITH_INV:
2268                                 next->ex.invalidate_rkey =
2269                                         user_wr->ex.invalidate_rkey;
2270                                 break;
2271                         case IB_WR_ATOMIC_CMP_AND_SWP:
2272                         case IB_WR_ATOMIC_FETCH_AND_ADD:
2273                                 next->wr.atomic.remote_addr =
2274                                         user_wr->wr.atomic.remote_addr;
2275                                 next->wr.atomic.compare_add =
2276                                         user_wr->wr.atomic.compare_add;
2277                                 next->wr.atomic.swap = user_wr->wr.atomic.swap;
2278                                 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
2279                                 break;
2280                         default:
2281                                 break;
2282                         }
2283                 }
2284
2285                 if (next->num_sge) {
2286                         next->sg_list = (void *) next +
2287                                 ALIGN(sizeof *next, sizeof (struct ib_sge));
2288                         if (copy_from_user(next->sg_list,
2289                                            buf + sizeof cmd +
2290                                            cmd.wr_count * cmd.wqe_size +
2291                                            sg_ind * sizeof (struct ib_sge),
2292                                            next->num_sge * sizeof (struct ib_sge))) {
2293                                 ret = -EFAULT;
2294                                 goto out_put;
2295                         }
2296                         sg_ind += next->num_sge;
2297                 } else
2298                         next->sg_list = NULL;
2299         }
2300
2301         resp.bad_wr = 0;
2302         ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2303         if (ret)
2304                 for (next = wr; next; next = next->next) {
2305                         ++resp.bad_wr;
2306                         if (next == bad_wr)
2307                                 break;
2308                 }
2309
2310         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2311                          &resp, sizeof resp))
2312                 ret = -EFAULT;
2313
2314 out_put:
2315         put_qp_read(qp);
2316
2317         while (wr) {
2318                 if (is_ud && wr->wr.ud.ah)
2319                         put_ah_read(wr->wr.ud.ah);
2320                 next = wr->next;
2321                 kfree(wr);
2322                 wr = next;
2323         }
2324
2325 out:
2326         kfree(user_wr);
2327
2328         return ret ? ret : in_len;
2329 }
2330
2331 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2332                                                     int in_len,
2333                                                     u32 wr_count,
2334                                                     u32 sge_count,
2335                                                     u32 wqe_size)
2336 {
2337         struct ib_uverbs_recv_wr *user_wr;
2338         struct ib_recv_wr        *wr = NULL, *last, *next;
2339         int                       sg_ind;
2340         int                       i;
2341         int                       ret;
2342
2343         if (in_len < wqe_size * wr_count +
2344             sge_count * sizeof (struct ib_uverbs_sge))
2345                 return ERR_PTR(-EINVAL);
2346
2347         if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2348                 return ERR_PTR(-EINVAL);
2349
2350         user_wr = kmalloc(wqe_size, GFP_KERNEL);
2351         if (!user_wr)
2352                 return ERR_PTR(-ENOMEM);
2353
2354         sg_ind = 0;
2355         last = NULL;
2356         for (i = 0; i < wr_count; ++i) {
2357                 if (copy_from_user(user_wr, buf + i * wqe_size,
2358                                    wqe_size)) {
2359                         ret = -EFAULT;
2360                         goto err;
2361                 }
2362
2363                 if (user_wr->num_sge + sg_ind > sge_count) {
2364                         ret = -EINVAL;
2365                         goto err;
2366                 }
2367
2368                 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2369                                user_wr->num_sge * sizeof (struct ib_sge),
2370                                GFP_KERNEL);
2371                 if (!next) {
2372                         ret = -ENOMEM;
2373                         goto err;
2374                 }
2375
2376                 if (!last)
2377                         wr = next;
2378                 else
2379                         last->next = next;
2380                 last = next;
2381
2382                 next->next       = NULL;
2383                 next->wr_id      = user_wr->wr_id;
2384                 next->num_sge    = user_wr->num_sge;
2385
2386                 if (next->num_sge) {
2387                         next->sg_list = (void *) next +
2388                                 ALIGN(sizeof *next, sizeof (struct ib_sge));
2389                         if (copy_from_user(next->sg_list,
2390                                            buf + wr_count * wqe_size +
2391                                            sg_ind * sizeof (struct ib_sge),
2392                                            next->num_sge * sizeof (struct ib_sge))) {
2393                                 ret = -EFAULT;
2394                                 goto err;
2395                         }
2396                         sg_ind += next->num_sge;
2397                 } else
2398                         next->sg_list = NULL;
2399         }
2400
2401         kfree(user_wr);
2402         return wr;
2403
2404 err:
2405         kfree(user_wr);
2406
2407         while (wr) {
2408                 next = wr->next;
2409                 kfree(wr);
2410                 wr = next;
2411         }
2412
2413         return ERR_PTR(ret);
2414 }
2415
2416 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2417                             const char __user *buf, int in_len,
2418                             int out_len)
2419 {
2420         struct ib_uverbs_post_recv      cmd;
2421         struct ib_uverbs_post_recv_resp resp;
2422         struct ib_recv_wr              *wr, *next, *bad_wr;
2423         struct ib_qp                   *qp;
2424         ssize_t                         ret = -EINVAL;
2425
2426         if (copy_from_user(&cmd, buf, sizeof cmd))
2427                 return -EFAULT;
2428
2429         wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2430                                        in_len - sizeof cmd, cmd.wr_count,
2431                                        cmd.sge_count, cmd.wqe_size);
2432         if (IS_ERR(wr))
2433                 return PTR_ERR(wr);
2434
2435         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2436         if (!qp)
2437                 goto out;
2438
2439         resp.bad_wr = 0;
2440         ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2441
2442         put_qp_read(qp);
2443
2444         if (ret)
2445                 for (next = wr; next; next = next->next) {
2446                         ++resp.bad_wr;
2447                         if (next == bad_wr)
2448                                 break;
2449                 }
2450
2451         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2452                          &resp, sizeof resp))
2453                 ret = -EFAULT;
2454
2455 out:
2456         while (wr) {
2457                 next = wr->next;
2458                 kfree(wr);
2459                 wr = next;
2460         }
2461
2462         return ret ? ret : in_len;
2463 }
2464
2465 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2466                                 const char __user *buf, int in_len,
2467                                 int out_len)
2468 {
2469         struct ib_uverbs_post_srq_recv      cmd;
2470         struct ib_uverbs_post_srq_recv_resp resp;
2471         struct ib_recv_wr                  *wr, *next, *bad_wr;
2472         struct ib_srq                      *srq;
2473         ssize_t                             ret = -EINVAL;
2474
2475         if (copy_from_user(&cmd, buf, sizeof cmd))
2476                 return -EFAULT;
2477
2478         wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2479                                        in_len - sizeof cmd, cmd.wr_count,
2480                                        cmd.sge_count, cmd.wqe_size);
2481         if (IS_ERR(wr))
2482                 return PTR_ERR(wr);
2483
2484         srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2485         if (!srq)
2486                 goto out;
2487
2488         resp.bad_wr = 0;
2489         ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2490
2491         put_srq_read(srq);
2492
2493         if (ret)
2494                 for (next = wr; next; next = next->next) {
2495                         ++resp.bad_wr;
2496                         if (next == bad_wr)
2497                                 break;
2498                 }
2499
2500         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2501                          &resp, sizeof resp))
2502                 ret = -EFAULT;
2503
2504 out:
2505         while (wr) {
2506                 next = wr->next;
2507                 kfree(wr);
2508                 wr = next;
2509         }
2510
2511         return ret ? ret : in_len;
2512 }
2513
2514 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2515                             const char __user *buf, int in_len,
2516                             int out_len)
2517 {
2518         struct ib_uverbs_create_ah       cmd;
2519         struct ib_uverbs_create_ah_resp  resp;
2520         struct ib_uobject               *uobj;
2521         struct ib_pd                    *pd;
2522         struct ib_ah                    *ah;
2523         struct ib_ah_attr               attr;
2524         int ret;
2525
2526         if (out_len < sizeof resp)
2527                 return -ENOSPC;
2528
2529         if (copy_from_user(&cmd, buf, sizeof cmd))
2530                 return -EFAULT;
2531
2532         uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2533         if (!uobj)
2534                 return -ENOMEM;
2535
2536         init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
2537         down_write(&uobj->mutex);
2538
2539         pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2540         if (!pd) {
2541                 ret = -EINVAL;
2542                 goto err;
2543         }
2544
2545         attr.dlid              = cmd.attr.dlid;
2546         attr.sl                = cmd.attr.sl;
2547         attr.src_path_bits     = cmd.attr.src_path_bits;
2548         attr.static_rate       = cmd.attr.static_rate;
2549         attr.ah_flags          = cmd.attr.is_global ? IB_AH_GRH : 0;
2550         attr.port_num          = cmd.attr.port_num;
2551         attr.grh.flow_label    = cmd.attr.grh.flow_label;
2552         attr.grh.sgid_index    = cmd.attr.grh.sgid_index;
2553         attr.grh.hop_limit     = cmd.attr.grh.hop_limit;
2554         attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2555         attr.vlan_id           = 0;
2556         memset(&attr.dmac, 0, sizeof(attr.dmac));
2557         memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2558
2559         ah = ib_create_ah(pd, &attr);
2560         if (IS_ERR(ah)) {
2561                 ret = PTR_ERR(ah);
2562                 goto err_put;
2563         }
2564
2565         ah->uobject  = uobj;
2566         uobj->object = ah;
2567
2568         ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2569         if (ret)
2570                 goto err_destroy;
2571
2572         resp.ah_handle = uobj->id;
2573
2574         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2575                          &resp, sizeof resp)) {
2576                 ret = -EFAULT;
2577                 goto err_copy;
2578         }
2579
2580         put_pd_read(pd);
2581
2582         mutex_lock(&file->mutex);
2583         list_add_tail(&uobj->list, &file->ucontext->ah_list);
2584         mutex_unlock(&file->mutex);
2585
2586         uobj->live = 1;
2587
2588         up_write(&uobj->mutex);
2589
2590         return in_len;
2591
2592 err_copy:
2593         idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2594
2595 err_destroy:
2596         ib_destroy_ah(ah);
2597
2598 err_put:
2599         put_pd_read(pd);
2600
2601 err:
2602         put_uobj_write(uobj);
2603         return ret;
2604 }
2605
2606 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2607                              const char __user *buf, int in_len, int out_len)
2608 {
2609         struct ib_uverbs_destroy_ah cmd;
2610         struct ib_ah               *ah;
2611         struct ib_uobject          *uobj;
2612         int                         ret;
2613
2614         if (copy_from_user(&cmd, buf, sizeof cmd))
2615                 return -EFAULT;
2616
2617         uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2618         if (!uobj)
2619                 return -EINVAL;
2620         ah = uobj->object;
2621
2622         ret = ib_destroy_ah(ah);
2623         if (!ret)
2624                 uobj->live = 0;
2625
2626         put_uobj_write(uobj);
2627
2628         if (ret)
2629                 return ret;
2630
2631         idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2632
2633         mutex_lock(&file->mutex);
2634         list_del(&uobj->list);
2635         mutex_unlock(&file->mutex);
2636
2637         put_uobj(uobj);
2638
2639         return in_len;
2640 }
2641
2642 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2643                                const char __user *buf, int in_len,
2644                                int out_len)
2645 {
2646         struct ib_uverbs_attach_mcast cmd;
2647         struct ib_qp                 *qp;
2648         struct ib_uqp_object         *obj;
2649         struct ib_uverbs_mcast_entry *mcast;
2650         int                           ret;
2651
2652         if (copy_from_user(&cmd, buf, sizeof cmd))
2653                 return -EFAULT;
2654
2655         qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2656         if (!qp)
2657                 return -EINVAL;
2658
2659         obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2660
2661         list_for_each_entry(mcast, &obj->mcast_list, list)
2662                 if (cmd.mlid == mcast->lid &&
2663                     !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2664                         ret = 0;
2665                         goto out_put;
2666                 }
2667
2668         mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2669         if (!mcast) {
2670                 ret = -ENOMEM;
2671                 goto out_put;
2672         }
2673
2674         mcast->lid = cmd.mlid;
2675         memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2676
2677         ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2678         if (!ret)
2679                 list_add_tail(&mcast->list, &obj->mcast_list);
2680         else
2681                 kfree(mcast);
2682
2683 out_put:
2684         put_qp_write(qp);
2685
2686         return ret ? ret : in_len;
2687 }
2688
2689 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2690                                const char __user *buf, int in_len,
2691                                int out_len)
2692 {
2693         struct ib_uverbs_detach_mcast cmd;
2694         struct ib_uqp_object         *obj;
2695         struct ib_qp                 *qp;
2696         struct ib_uverbs_mcast_entry *mcast;
2697         int                           ret = -EINVAL;
2698
2699         if (copy_from_user(&cmd, buf, sizeof cmd))
2700                 return -EFAULT;
2701
2702         qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2703         if (!qp)
2704                 return -EINVAL;
2705
2706         ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2707         if (ret)
2708                 goto out_put;
2709
2710         obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2711
2712         list_for_each_entry(mcast, &obj->mcast_list, list)
2713                 if (cmd.mlid == mcast->lid &&
2714                     !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2715                         list_del(&mcast->list);
2716                         kfree(mcast);
2717                         break;
2718                 }
2719
2720 out_put:
2721         put_qp_write(qp);
2722
2723         return ret ? ret : in_len;
2724 }
2725
2726 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2727                                 union ib_flow_spec *ib_spec)
2728 {
2729         if (kern_spec->reserved)
2730                 return -EINVAL;
2731
2732         ib_spec->type = kern_spec->type;
2733
2734         switch (ib_spec->type) {
2735         case IB_FLOW_SPEC_ETH:
2736                 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
2737                 if (ib_spec->eth.size != kern_spec->eth.size)
2738                         return -EINVAL;
2739                 memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
2740                        sizeof(struct ib_flow_eth_filter));
2741                 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
2742                        sizeof(struct ib_flow_eth_filter));
2743                 break;
2744         case IB_FLOW_SPEC_IPV4:
2745                 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
2746                 if (ib_spec->ipv4.size != kern_spec->ipv4.size)
2747                         return -EINVAL;
2748                 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
2749                        sizeof(struct ib_flow_ipv4_filter));
2750                 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
2751                        sizeof(struct ib_flow_ipv4_filter));
2752                 break;
2753         case IB_FLOW_SPEC_TCP:
2754         case IB_FLOW_SPEC_UDP:
2755                 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
2756                 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
2757                         return -EINVAL;
2758                 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
2759                        sizeof(struct ib_flow_tcp_udp_filter));
2760                 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
2761                        sizeof(struct ib_flow_tcp_udp_filter));
2762                 break;
2763         default:
2764                 return -EINVAL;
2765         }
2766         return 0;
2767 }
2768
2769 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2770                              struct ib_udata *ucore,
2771                              struct ib_udata *uhw)
2772 {
2773         struct ib_uverbs_create_flow      cmd;
2774         struct ib_uverbs_create_flow_resp resp;
2775         struct ib_uobject                 *uobj;
2776         struct ib_flow                    *flow_id;
2777         struct ib_uverbs_flow_attr        *kern_flow_attr;
2778         struct ib_flow_attr               *flow_attr;
2779         struct ib_qp                      *qp;
2780         int err = 0;
2781         void *kern_spec;
2782         void *ib_spec;
2783         int i;
2784
2785         if (ucore->inlen < sizeof(cmd))
2786                 return -EINVAL;
2787
2788         if (ucore->outlen < sizeof(resp))
2789                 return -ENOSPC;
2790
2791         err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2792         if (err)
2793                 return err;
2794
2795         ucore->inbuf += sizeof(cmd);
2796         ucore->inlen -= sizeof(cmd);
2797
2798         if (cmd.comp_mask)
2799                 return -EINVAL;
2800
2801         if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
2802              !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
2803                 return -EPERM;
2804
2805         if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
2806                 return -EINVAL;
2807
2808         if (cmd.flow_attr.size > ucore->inlen ||
2809             cmd.flow_attr.size >
2810             (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
2811                 return -EINVAL;
2812
2813         if (cmd.flow_attr.reserved[0] ||
2814             cmd.flow_attr.reserved[1])
2815                 return -EINVAL;
2816
2817         if (cmd.flow_attr.num_of_specs) {
2818                 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
2819                                          GFP_KERNEL);
2820                 if (!kern_flow_attr)
2821                         return -ENOMEM;
2822
2823                 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
2824                 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
2825                                          cmd.flow_attr.size);
2826                 if (err)
2827                         goto err_free_attr;
2828         } else {
2829                 kern_flow_attr = &cmd.flow_attr;
2830         }
2831
2832         uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
2833         if (!uobj) {
2834                 err = -ENOMEM;
2835                 goto err_free_attr;
2836         }
2837         init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
2838         down_write(&uobj->mutex);
2839
2840         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2841         if (!qp) {
2842                 err = -EINVAL;
2843                 goto err_uobj;
2844         }
2845
2846         flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
2847         if (!flow_attr) {
2848                 err = -ENOMEM;
2849                 goto err_put;
2850         }
2851
2852         flow_attr->type = kern_flow_attr->type;
2853         flow_attr->priority = kern_flow_attr->priority;
2854         flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
2855         flow_attr->port = kern_flow_attr->port;
2856         flow_attr->flags = kern_flow_attr->flags;
2857         flow_attr->size = sizeof(*flow_attr);
2858
2859         kern_spec = kern_flow_attr + 1;
2860         ib_spec = flow_attr + 1;
2861         for (i = 0; i < flow_attr->num_of_specs &&
2862              cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
2863              cmd.flow_attr.size >=
2864              ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
2865                 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
2866                 if (err)
2867                         goto err_free;
2868                 flow_attr->size +=
2869                         ((union ib_flow_spec *) ib_spec)->size;
2870                 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
2871                 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
2872                 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
2873         }
2874         if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
2875                 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2876                         i, cmd.flow_attr.size);
2877                 err = -EINVAL;
2878                 goto err_free;
2879         }
2880         flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
2881         if (IS_ERR(flow_id)) {
2882                 err = PTR_ERR(flow_id);
2883                 goto err_free;
2884         }
2885         flow_id->qp = qp;
2886         flow_id->uobject = uobj;
2887         uobj->object = flow_id;
2888
2889         err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
2890         if (err)
2891                 goto destroy_flow;
2892
2893         memset(&resp, 0, sizeof(resp));
2894         resp.flow_handle = uobj->id;
2895
2896         err = ib_copy_to_udata(ucore,
2897                                &resp, sizeof(resp));
2898         if (err)
2899                 goto err_copy;
2900
2901         put_qp_read(qp);
2902         mutex_lock(&file->mutex);
2903         list_add_tail(&uobj->list, &file->ucontext->rule_list);
2904         mutex_unlock(&file->mutex);
2905
2906         uobj->live = 1;
2907
2908         up_write(&uobj->mutex);
2909         kfree(flow_attr);
2910         if (cmd.flow_attr.num_of_specs)
2911                 kfree(kern_flow_attr);
2912         return 0;
2913 err_copy:
2914         idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2915 destroy_flow:
2916         ib_destroy_flow(flow_id);
2917 err_free:
2918         kfree(flow_attr);
2919 err_put:
2920         put_qp_read(qp);
2921 err_uobj:
2922         put_uobj_write(uobj);
2923 err_free_attr:
2924         if (cmd.flow_attr.num_of_specs)
2925                 kfree(kern_flow_attr);
2926         return err;
2927 }
2928
2929 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
2930                               struct ib_udata *ucore,
2931                               struct ib_udata *uhw)
2932 {
2933         struct ib_uverbs_destroy_flow   cmd;
2934         struct ib_flow                  *flow_id;
2935         struct ib_uobject               *uobj;
2936         int                             ret;
2937
2938         if (ucore->inlen < sizeof(cmd))
2939                 return -EINVAL;
2940
2941         ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2942         if (ret)
2943                 return ret;
2944
2945         if (cmd.comp_mask)
2946                 return -EINVAL;
2947
2948         uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
2949                               file->ucontext);
2950         if (!uobj)
2951                 return -EINVAL;
2952         flow_id = uobj->object;
2953
2954         ret = ib_destroy_flow(flow_id);
2955         if (!ret)
2956                 uobj->live = 0;
2957
2958         put_uobj_write(uobj);
2959
2960         idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2961
2962         mutex_lock(&file->mutex);
2963         list_del(&uobj->list);
2964         mutex_unlock(&file->mutex);
2965
2966         put_uobj(uobj);
2967
2968         return ret;
2969 }
2970
2971 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
2972                                 struct ib_uverbs_create_xsrq *cmd,
2973                                 struct ib_udata *udata)
2974 {
2975         struct ib_uverbs_create_srq_resp resp;
2976         struct ib_usrq_object           *obj;
2977         struct ib_pd                    *pd;
2978         struct ib_srq                   *srq;
2979         struct ib_uobject               *uninitialized_var(xrcd_uobj);
2980         struct ib_srq_init_attr          attr;
2981         int ret;
2982
2983         obj = kmalloc(sizeof *obj, GFP_KERNEL);
2984         if (!obj)
2985                 return -ENOMEM;
2986
2987         init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
2988         down_write(&obj->uevent.uobject.mutex);
2989
2990         if (cmd->srq_type == IB_SRQT_XRC) {
2991                 attr.ext.xrc.xrcd  = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
2992                 if (!attr.ext.xrc.xrcd) {
2993                         ret = -EINVAL;
2994                         goto err;
2995                 }
2996
2997                 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2998                 atomic_inc(&obj->uxrcd->refcnt);
2999
3000                 attr.ext.xrc.cq  = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
3001                 if (!attr.ext.xrc.cq) {
3002                         ret = -EINVAL;
3003                         goto err_put_xrcd;
3004                 }
3005         }
3006
3007         pd  = idr_read_pd(cmd->pd_handle, file->ucontext);
3008         if (!pd) {
3009                 ret = -EINVAL;
3010                 goto err_put_cq;
3011         }
3012
3013         attr.event_handler  = ib_uverbs_srq_event_handler;
3014         attr.srq_context    = file;
3015         attr.srq_type       = cmd->srq_type;
3016         attr.attr.max_wr    = cmd->max_wr;
3017         attr.attr.max_sge   = cmd->max_sge;
3018         attr.attr.srq_limit = cmd->srq_limit;
3019
3020         obj->uevent.events_reported = 0;
3021         INIT_LIST_HEAD(&obj->uevent.event_list);
3022
3023         srq = pd->device->create_srq(pd, &attr, udata);
3024         if (IS_ERR(srq)) {
3025                 ret = PTR_ERR(srq);
3026                 goto err_put;
3027         }
3028
3029         srq->device        = pd->device;
3030         srq->pd            = pd;
3031         srq->srq_type      = cmd->srq_type;
3032         srq->uobject       = &obj->uevent.uobject;
3033         srq->event_handler = attr.event_handler;
3034         srq->srq_context   = attr.srq_context;
3035
3036         if (cmd->srq_type == IB_SRQT_XRC) {
3037                 srq->ext.xrc.cq   = attr.ext.xrc.cq;
3038                 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3039                 atomic_inc(&attr.ext.xrc.cq->usecnt);
3040                 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3041         }
3042
3043         atomic_inc(&pd->usecnt);
3044         atomic_set(&srq->usecnt, 0);
3045
3046         obj->uevent.uobject.object = srq;
3047         ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3048         if (ret)
3049                 goto err_destroy;
3050
3051         memset(&resp, 0, sizeof resp);
3052         resp.srq_handle = obj->uevent.uobject.id;
3053         resp.max_wr     = attr.attr.max_wr;
3054         resp.max_sge    = attr.attr.max_sge;
3055         if (cmd->srq_type == IB_SRQT_XRC)
3056                 resp.srqn = srq->ext.xrc.srq_num;
3057
3058         if (copy_to_user((void __user *) (unsigned long) cmd->response,
3059                          &resp, sizeof resp)) {
3060                 ret = -EFAULT;
3061                 goto err_copy;
3062         }
3063
3064         if (cmd->srq_type == IB_SRQT_XRC) {
3065                 put_uobj_read(xrcd_uobj);
3066                 put_cq_read(attr.ext.xrc.cq);
3067         }
3068         put_pd_read(pd);
3069
3070         mutex_lock(&file->mutex);
3071         list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
3072         mutex_unlock(&file->mutex);
3073
3074         obj->uevent.uobject.live = 1;
3075
3076         up_write(&obj->uevent.uobject.mutex);
3077
3078         return 0;
3079
3080 err_copy:
3081         idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3082
3083 err_destroy:
3084         ib_destroy_srq(srq);
3085
3086 err_put:
3087         put_pd_read(pd);
3088
3089 err_put_cq:
3090         if (cmd->srq_type == IB_SRQT_XRC)
3091                 put_cq_read(attr.ext.xrc.cq);
3092
3093 err_put_xrcd:
3094         if (cmd->srq_type == IB_SRQT_XRC) {
3095                 atomic_dec(&obj->uxrcd->refcnt);
3096                 put_uobj_read(xrcd_uobj);
3097         }
3098
3099 err:
3100         put_uobj_write(&obj->uevent.uobject);
3101         return ret;
3102 }
3103
3104 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3105                              const char __user *buf, int in_len,
3106                              int out_len)
3107 {
3108         struct ib_uverbs_create_srq      cmd;
3109         struct ib_uverbs_create_xsrq     xcmd;
3110         struct ib_uverbs_create_srq_resp resp;
3111         struct ib_udata                  udata;
3112         int ret;
3113
3114         if (out_len < sizeof resp)
3115                 return -ENOSPC;
3116
3117         if (copy_from_user(&cmd, buf, sizeof cmd))
3118                 return -EFAULT;
3119
3120         xcmd.response    = cmd.response;
3121         xcmd.user_handle = cmd.user_handle;
3122         xcmd.srq_type    = IB_SRQT_BASIC;
3123         xcmd.pd_handle   = cmd.pd_handle;
3124         xcmd.max_wr      = cmd.max_wr;
3125         xcmd.max_sge     = cmd.max_sge;
3126         xcmd.srq_limit   = cmd.srq_limit;
3127
3128         INIT_UDATA(&udata, buf + sizeof cmd,
3129                    (unsigned long) cmd.response + sizeof resp,
3130                    in_len - sizeof cmd, out_len - sizeof resp);
3131
3132         ret = __uverbs_create_xsrq(file, &xcmd, &udata);
3133         if (ret)
3134                 return ret;
3135
3136         return in_len;
3137 }
3138
3139 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3140                               const char __user *buf, int in_len, int out_len)
3141 {
3142         struct ib_uverbs_create_xsrq     cmd;
3143         struct ib_uverbs_create_srq_resp resp;
3144         struct ib_udata                  udata;
3145         int ret;
3146
3147         if (out_len < sizeof resp)
3148                 return -ENOSPC;
3149
3150         if (copy_from_user(&cmd, buf, sizeof cmd))
3151                 return -EFAULT;
3152
3153         INIT_UDATA(&udata, buf + sizeof cmd,
3154                    (unsigned long) cmd.response + sizeof resp,
3155                    in_len - sizeof cmd, out_len - sizeof resp);
3156
3157         ret = __uverbs_create_xsrq(file, &cmd, &udata);
3158         if (ret)
3159                 return ret;
3160
3161         return in_len;
3162 }
3163
3164 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3165                              const char __user *buf, int in_len,
3166                              int out_len)
3167 {
3168         struct ib_uverbs_modify_srq cmd;
3169         struct ib_udata             udata;
3170         struct ib_srq              *srq;
3171         struct ib_srq_attr          attr;
3172         int                         ret;
3173
3174         if (copy_from_user(&cmd, buf, sizeof cmd))
3175                 return -EFAULT;
3176
3177         INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3178                    out_len);
3179
3180         srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3181         if (!srq)
3182                 return -EINVAL;
3183
3184         attr.max_wr    = cmd.max_wr;
3185         attr.srq_limit = cmd.srq_limit;
3186
3187         ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3188
3189         put_srq_read(srq);
3190
3191         return ret ? ret : in_len;
3192 }
3193
3194 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3195                             const char __user *buf,
3196                             int in_len, int out_len)
3197 {
3198         struct ib_uverbs_query_srq      cmd;
3199         struct ib_uverbs_query_srq_resp resp;
3200         struct ib_srq_attr              attr;
3201         struct ib_srq                   *srq;
3202         int                             ret;
3203
3204         if (out_len < sizeof resp)
3205                 return -ENOSPC;
3206
3207         if (copy_from_user(&cmd, buf, sizeof cmd))
3208                 return -EFAULT;
3209
3210         srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3211         if (!srq)
3212                 return -EINVAL;
3213
3214         ret = ib_query_srq(srq, &attr);
3215
3216         put_srq_read(srq);
3217
3218         if (ret)
3219                 return ret;
3220
3221         memset(&resp, 0, sizeof resp);
3222
3223         resp.max_wr    = attr.max_wr;
3224         resp.max_sge   = attr.max_sge;
3225         resp.srq_limit = attr.srq_limit;
3226
3227         if (copy_to_user((void __user *) (unsigned long) cmd.response,
3228                          &resp, sizeof resp))
3229                 return -EFAULT;
3230
3231         return in_len;
3232 }
3233
3234 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3235                               const char __user *buf, int in_len,
3236                               int out_len)
3237 {
3238         struct ib_uverbs_destroy_srq      cmd;
3239         struct ib_uverbs_destroy_srq_resp resp;
3240         struct ib_uobject                *uobj;
3241         struct ib_srq                    *srq;
3242         struct ib_uevent_object          *obj;
3243         int                               ret = -EINVAL;
3244         struct ib_usrq_object            *us;
3245         enum ib_srq_type                  srq_type;
3246
3247         if (copy_from_user(&cmd, buf, sizeof cmd))
3248                 return -EFAULT;
3249
3250         uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3251         if (!uobj)
3252                 return -EINVAL;
3253         srq = uobj->object;
3254         obj = container_of(uobj, struct ib_uevent_object, uobject);
3255         srq_type = srq->srq_type;
3256
3257         ret = ib_destroy_srq(srq);
3258         if (!ret)
3259                 uobj->live = 0;
3260
3261         put_uobj_write(uobj);
3262
3263         if (ret)
3264                 return ret;
3265
3266         if (srq_type == IB_SRQT_XRC) {
3267                 us = container_of(obj, struct ib_usrq_object, uevent);
3268                 atomic_dec(&us->uxrcd->refcnt);
3269         }
3270
3271         idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
3272
3273         mutex_lock(&file->mutex);
3274         list_del(&uobj->list);
3275         mutex_unlock(&file->mutex);
3276
3277         ib_uverbs_release_uevent(file, obj);
3278
3279         memset(&resp, 0, sizeof resp);
3280         resp.events_reported = obj->events_reported;
3281
3282         put_uobj(uobj);
3283
3284         if (copy_to_user((void __user *) (unsigned long) cmd.response,
3285                          &resp, sizeof resp))
3286                 ret = -EFAULT;
3287
3288         return ret ? ret : in_len;
3289 }