Merge branch 'acpi-ec'
[linux-drm-fsl-dcu.git] / drivers / infiniband / hw / mlx4 / main.c
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 #include <net/ipv6.h>
43 #include <net/addrconf.h>
44
45 #include <rdma/ib_smi.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_addr.h>
48
49 #include <linux/mlx4/driver.h>
50 #include <linux/mlx4/cmd.h>
51 #include <linux/mlx4/qp.h>
52
53 #include "mlx4_ib.h"
54 #include "user.h"
55
56 #define DRV_NAME        MLX4_IB_DRV_NAME
57 #define DRV_VERSION     "2.2-1"
58 #define DRV_RELDATE     "Feb 2014"
59
60 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
61 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
62 #define MLX4_IB_CARD_REV_A0   0xA0
63
64 MODULE_AUTHOR("Roland Dreier");
65 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
66 MODULE_LICENSE("Dual BSD/GPL");
67 MODULE_VERSION(DRV_VERSION);
68
69 int mlx4_ib_sm_guid_assign = 1;
70 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
71 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
72
73 static const char mlx4_ib_version[] =
74         DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
75         DRV_VERSION " (" DRV_RELDATE ")\n";
76
77 struct update_gid_work {
78         struct work_struct      work;
79         union ib_gid            gids[128];
80         struct mlx4_ib_dev     *dev;
81         int                     port;
82 };
83
84 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
85
86 static struct workqueue_struct *wq;
87
88 static void init_query_mad(struct ib_smp *mad)
89 {
90         mad->base_version  = 1;
91         mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
92         mad->class_version = 1;
93         mad->method        = IB_MGMT_METHOD_GET;
94 }
95
96 static union ib_gid zgid;
97
98 static int check_flow_steering_support(struct mlx4_dev *dev)
99 {
100         int eth_num_ports = 0;
101         int ib_num_ports = 0;
102
103         int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
104
105         if (dmfs) {
106                 int i;
107                 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
108                         eth_num_ports++;
109                 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
110                         ib_num_ports++;
111                 dmfs &= (!ib_num_ports ||
112                          (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
113                         (!eth_num_ports ||
114                          (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
115                 if (ib_num_ports && mlx4_is_mfunc(dev)) {
116                         pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
117                         dmfs = 0;
118                 }
119         }
120         return dmfs;
121 }
122
123 static int num_ib_ports(struct mlx4_dev *dev)
124 {
125         int ib_ports = 0;
126         int i;
127
128         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
129                 ib_ports++;
130
131         return ib_ports;
132 }
133
134 static int mlx4_ib_query_device(struct ib_device *ibdev,
135                                 struct ib_device_attr *props)
136 {
137         struct mlx4_ib_dev *dev = to_mdev(ibdev);
138         struct ib_smp *in_mad  = NULL;
139         struct ib_smp *out_mad = NULL;
140         int err = -ENOMEM;
141         int have_ib_ports;
142
143         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
144         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
145         if (!in_mad || !out_mad)
146                 goto out;
147
148         init_query_mad(in_mad);
149         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
150
151         err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
152                            1, NULL, NULL, in_mad, out_mad);
153         if (err)
154                 goto out;
155
156         memset(props, 0, sizeof *props);
157
158         have_ib_ports = num_ib_ports(dev->dev);
159
160         props->fw_ver = dev->dev->caps.fw_ver;
161         props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
162                 IB_DEVICE_PORT_ACTIVE_EVENT             |
163                 IB_DEVICE_SYS_IMAGE_GUID                |
164                 IB_DEVICE_RC_RNR_NAK_GEN                |
165                 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
166         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
167                 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
168         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
169                 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
170         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
171                 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
172         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
173                 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
174         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
175                 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
176         if (dev->dev->caps.max_gso_sz &&
177             (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
178             (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
179                 props->device_cap_flags |= IB_DEVICE_UD_TSO;
180         if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
181                 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
182         if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
183             (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
184             (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
185                 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
186         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
187                 props->device_cap_flags |= IB_DEVICE_XRC;
188         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
189                 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
190         if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
191                 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
192                         props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
193                 else
194                         props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
195         if (dev->steering_support ==  MLX4_STEERING_MODE_DEVICE_MANAGED)
196                 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
197         }
198
199         props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
200                 0xffffff;
201         props->vendor_part_id      = dev->dev->pdev->device;
202         props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
203         memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
204
205         props->max_mr_size         = ~0ull;
206         props->page_size_cap       = dev->dev->caps.page_size_cap;
207         props->max_qp              = dev->dev->quotas.qp;
208         props->max_qp_wr           = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
209         props->max_sge             = min(dev->dev->caps.max_sq_sg,
210                                          dev->dev->caps.max_rq_sg);
211         props->max_cq              = dev->dev->quotas.cq;
212         props->max_cqe             = dev->dev->caps.max_cqes;
213         props->max_mr              = dev->dev->quotas.mpt;
214         props->max_pd              = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
215         props->max_qp_rd_atom      = dev->dev->caps.max_qp_dest_rdma;
216         props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
217         props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
218         props->max_srq             = dev->dev->quotas.srq;
219         props->max_srq_wr          = dev->dev->caps.max_srq_wqes - 1;
220         props->max_srq_sge         = dev->dev->caps.max_srq_sge;
221         props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
222         props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
223         props->atomic_cap          = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
224                 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
225         props->masked_atomic_cap   = props->atomic_cap;
226         props->max_pkeys           = dev->dev->caps.pkey_table_len[1];
227         props->max_mcast_grp       = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
228         props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
229         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
230                                            props->max_mcast_grp;
231         props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
232
233 out:
234         kfree(in_mad);
235         kfree(out_mad);
236
237         return err;
238 }
239
240 static enum rdma_link_layer
241 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
242 {
243         struct mlx4_dev *dev = to_mdev(device)->dev;
244
245         return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
246                 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
247 }
248
249 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
250                               struct ib_port_attr *props, int netw_view)
251 {
252         struct ib_smp *in_mad  = NULL;
253         struct ib_smp *out_mad = NULL;
254         int ext_active_speed;
255         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
256         int err = -ENOMEM;
257
258         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
259         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
260         if (!in_mad || !out_mad)
261                 goto out;
262
263         init_query_mad(in_mad);
264         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
265         in_mad->attr_mod = cpu_to_be32(port);
266
267         if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
268                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
269
270         err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
271                                 in_mad, out_mad);
272         if (err)
273                 goto out;
274
275
276         props->lid              = be16_to_cpup((__be16 *) (out_mad->data + 16));
277         props->lmc              = out_mad->data[34] & 0x7;
278         props->sm_lid           = be16_to_cpup((__be16 *) (out_mad->data + 18));
279         props->sm_sl            = out_mad->data[36] & 0xf;
280         props->state            = out_mad->data[32] & 0xf;
281         props->phys_state       = out_mad->data[33] >> 4;
282         props->port_cap_flags   = be32_to_cpup((__be32 *) (out_mad->data + 20));
283         if (netw_view)
284                 props->gid_tbl_len = out_mad->data[50];
285         else
286                 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
287         props->max_msg_sz       = to_mdev(ibdev)->dev->caps.max_msg_sz;
288         props->pkey_tbl_len     = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
289         props->bad_pkey_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 46));
290         props->qkey_viol_cntr   = be16_to_cpup((__be16 *) (out_mad->data + 48));
291         props->active_width     = out_mad->data[31] & 0xf;
292         props->active_speed     = out_mad->data[35] >> 4;
293         props->max_mtu          = out_mad->data[41] & 0xf;
294         props->active_mtu       = out_mad->data[36] >> 4;
295         props->subnet_timeout   = out_mad->data[51] & 0x1f;
296         props->max_vl_num       = out_mad->data[37] >> 4;
297         props->init_type_reply  = out_mad->data[41] >> 4;
298
299         /* Check if extended speeds (EDR/FDR/...) are supported */
300         if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
301                 ext_active_speed = out_mad->data[62] >> 4;
302
303                 switch (ext_active_speed) {
304                 case 1:
305                         props->active_speed = IB_SPEED_FDR;
306                         break;
307                 case 2:
308                         props->active_speed = IB_SPEED_EDR;
309                         break;
310                 }
311         }
312
313         /* If reported active speed is QDR, check if is FDR-10 */
314         if (props->active_speed == IB_SPEED_QDR) {
315                 init_query_mad(in_mad);
316                 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
317                 in_mad->attr_mod = cpu_to_be32(port);
318
319                 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
320                                    NULL, NULL, in_mad, out_mad);
321                 if (err)
322                         goto out;
323
324                 /* Checking LinkSpeedActive for FDR-10 */
325                 if (out_mad->data[15] & 0x1)
326                         props->active_speed = IB_SPEED_FDR10;
327         }
328
329         /* Avoid wrong speed value returned by FW if the IB link is down. */
330         if (props->state == IB_PORT_DOWN)
331                  props->active_speed = IB_SPEED_SDR;
332
333 out:
334         kfree(in_mad);
335         kfree(out_mad);
336         return err;
337 }
338
339 static u8 state_to_phys_state(enum ib_port_state state)
340 {
341         return state == IB_PORT_ACTIVE ? 5 : 3;
342 }
343
344 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
345                                struct ib_port_attr *props, int netw_view)
346 {
347
348         struct mlx4_ib_dev *mdev = to_mdev(ibdev);
349         struct mlx4_ib_iboe *iboe = &mdev->iboe;
350         struct net_device *ndev;
351         enum ib_mtu tmp;
352         struct mlx4_cmd_mailbox *mailbox;
353         int err = 0;
354
355         mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
356         if (IS_ERR(mailbox))
357                 return PTR_ERR(mailbox);
358
359         err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
360                            MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
361                            MLX4_CMD_WRAPPED);
362         if (err)
363                 goto out;
364
365         props->active_width     =  (((u8 *)mailbox->buf)[5] == 0x40) ?
366                                                 IB_WIDTH_4X : IB_WIDTH_1X;
367         props->active_speed     = IB_SPEED_QDR;
368         props->port_cap_flags   = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
369         props->gid_tbl_len      = mdev->dev->caps.gid_table_len[port];
370         props->max_msg_sz       = mdev->dev->caps.max_msg_sz;
371         props->pkey_tbl_len     = 1;
372         props->max_mtu          = IB_MTU_4096;
373         props->max_vl_num       = 2;
374         props->state            = IB_PORT_DOWN;
375         props->phys_state       = state_to_phys_state(props->state);
376         props->active_mtu       = IB_MTU_256;
377         spin_lock_bh(&iboe->lock);
378         ndev = iboe->netdevs[port - 1];
379         if (!ndev)
380                 goto out_unlock;
381
382         tmp = iboe_get_mtu(ndev->mtu);
383         props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
384
385         props->state            = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
386                                         IB_PORT_ACTIVE : IB_PORT_DOWN;
387         props->phys_state       = state_to_phys_state(props->state);
388 out_unlock:
389         spin_unlock_bh(&iboe->lock);
390 out:
391         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
392         return err;
393 }
394
395 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
396                          struct ib_port_attr *props, int netw_view)
397 {
398         int err;
399
400         memset(props, 0, sizeof *props);
401
402         err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
403                 ib_link_query_port(ibdev, port, props, netw_view) :
404                                 eth_link_query_port(ibdev, port, props, netw_view);
405
406         return err;
407 }
408
409 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
410                               struct ib_port_attr *props)
411 {
412         /* returns host view */
413         return __mlx4_ib_query_port(ibdev, port, props, 0);
414 }
415
416 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
417                         union ib_gid *gid, int netw_view)
418 {
419         struct ib_smp *in_mad  = NULL;
420         struct ib_smp *out_mad = NULL;
421         int err = -ENOMEM;
422         struct mlx4_ib_dev *dev = to_mdev(ibdev);
423         int clear = 0;
424         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
425
426         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
427         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
428         if (!in_mad || !out_mad)
429                 goto out;
430
431         init_query_mad(in_mad);
432         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
433         in_mad->attr_mod = cpu_to_be32(port);
434
435         if (mlx4_is_mfunc(dev->dev) && netw_view)
436                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
437
438         err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
439         if (err)
440                 goto out;
441
442         memcpy(gid->raw, out_mad->data + 8, 8);
443
444         if (mlx4_is_mfunc(dev->dev) && !netw_view) {
445                 if (index) {
446                         /* For any index > 0, return the null guid */
447                         err = 0;
448                         clear = 1;
449                         goto out;
450                 }
451         }
452
453         init_query_mad(in_mad);
454         in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
455         in_mad->attr_mod = cpu_to_be32(index / 8);
456
457         err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
458                            NULL, NULL, in_mad, out_mad);
459         if (err)
460                 goto out;
461
462         memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
463
464 out:
465         if (clear)
466                 memset(gid->raw + 8, 0, 8);
467         kfree(in_mad);
468         kfree(out_mad);
469         return err;
470 }
471
472 static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
473                           union ib_gid *gid)
474 {
475         struct mlx4_ib_dev *dev = to_mdev(ibdev);
476
477         *gid = dev->iboe.gid_table[port - 1][index];
478
479         return 0;
480 }
481
482 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
483                              union ib_gid *gid)
484 {
485         if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
486                 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
487         else
488                 return iboe_query_gid(ibdev, port, index, gid);
489 }
490
491 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
492                          u16 *pkey, int netw_view)
493 {
494         struct ib_smp *in_mad  = NULL;
495         struct ib_smp *out_mad = NULL;
496         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
497         int err = -ENOMEM;
498
499         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
500         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
501         if (!in_mad || !out_mad)
502                 goto out;
503
504         init_query_mad(in_mad);
505         in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
506         in_mad->attr_mod = cpu_to_be32(index / 32);
507
508         if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
509                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
510
511         err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
512                            in_mad, out_mad);
513         if (err)
514                 goto out;
515
516         *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
517
518 out:
519         kfree(in_mad);
520         kfree(out_mad);
521         return err;
522 }
523
524 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
525 {
526         return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
527 }
528
529 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
530                                  struct ib_device_modify *props)
531 {
532         struct mlx4_cmd_mailbox *mailbox;
533         unsigned long flags;
534
535         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
536                 return -EOPNOTSUPP;
537
538         if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
539                 return 0;
540
541         if (mlx4_is_slave(to_mdev(ibdev)->dev))
542                 return -EOPNOTSUPP;
543
544         spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
545         memcpy(ibdev->node_desc, props->node_desc, 64);
546         spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
547
548         /*
549          * If possible, pass node desc to FW, so it can generate
550          * a 144 trap.  If cmd fails, just ignore.
551          */
552         mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
553         if (IS_ERR(mailbox))
554                 return 0;
555
556         memcpy(mailbox->buf, props->node_desc, 64);
557         mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
558                  MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
559
560         mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
561
562         return 0;
563 }
564
565 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
566                             u32 cap_mask)
567 {
568         struct mlx4_cmd_mailbox *mailbox;
569         int err;
570
571         mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
572         if (IS_ERR(mailbox))
573                 return PTR_ERR(mailbox);
574
575         if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
576                 *(u8 *) mailbox->buf         = !!reset_qkey_viols << 6;
577                 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
578         } else {
579                 ((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
580                 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
581         }
582
583         err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
584                        MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
585
586         mlx4_free_cmd_mailbox(dev->dev, mailbox);
587         return err;
588 }
589
590 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
591                                struct ib_port_modify *props)
592 {
593         struct mlx4_ib_dev *mdev = to_mdev(ibdev);
594         u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
595         struct ib_port_attr attr;
596         u32 cap_mask;
597         int err;
598
599         /* return OK if this is RoCE. CM calls ib_modify_port() regardless
600          * of whether port link layer is ETH or IB. For ETH ports, qkey
601          * violations and port capabilities are not meaningful.
602          */
603         if (is_eth)
604                 return 0;
605
606         mutex_lock(&mdev->cap_mask_mutex);
607
608         err = mlx4_ib_query_port(ibdev, port, &attr);
609         if (err)
610                 goto out;
611
612         cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
613                 ~props->clr_port_cap_mask;
614
615         err = mlx4_ib_SET_PORT(mdev, port,
616                                !!(mask & IB_PORT_RESET_QKEY_CNTR),
617                                cap_mask);
618
619 out:
620         mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
621         return err;
622 }
623
624 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
625                                                   struct ib_udata *udata)
626 {
627         struct mlx4_ib_dev *dev = to_mdev(ibdev);
628         struct mlx4_ib_ucontext *context;
629         struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
630         struct mlx4_ib_alloc_ucontext_resp resp;
631         int err;
632
633         if (!dev->ib_active)
634                 return ERR_PTR(-EAGAIN);
635
636         if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
637                 resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
638                 resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
639                 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
640         } else {
641                 resp.dev_caps         = dev->dev->caps.userspace_caps;
642                 resp.qp_tab_size      = dev->dev->caps.num_qps;
643                 resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
644                 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
645                 resp.cqe_size         = dev->dev->caps.cqe_size;
646         }
647
648         context = kmalloc(sizeof *context, GFP_KERNEL);
649         if (!context)
650                 return ERR_PTR(-ENOMEM);
651
652         err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
653         if (err) {
654                 kfree(context);
655                 return ERR_PTR(err);
656         }
657
658         INIT_LIST_HEAD(&context->db_page_list);
659         mutex_init(&context->db_page_mutex);
660
661         if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
662                 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
663         else
664                 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
665
666         if (err) {
667                 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
668                 kfree(context);
669                 return ERR_PTR(-EFAULT);
670         }
671
672         return &context->ibucontext;
673 }
674
675 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
676 {
677         struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
678
679         mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
680         kfree(context);
681
682         return 0;
683 }
684
685 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
686 {
687         struct mlx4_ib_dev *dev = to_mdev(context->device);
688
689         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
690                 return -EINVAL;
691
692         if (vma->vm_pgoff == 0) {
693                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
694
695                 if (io_remap_pfn_range(vma, vma->vm_start,
696                                        to_mucontext(context)->uar.pfn,
697                                        PAGE_SIZE, vma->vm_page_prot))
698                         return -EAGAIN;
699         } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
700                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
701
702                 if (io_remap_pfn_range(vma, vma->vm_start,
703                                        to_mucontext(context)->uar.pfn +
704                                        dev->dev->caps.num_uars,
705                                        PAGE_SIZE, vma->vm_page_prot))
706                         return -EAGAIN;
707         } else
708                 return -EINVAL;
709
710         return 0;
711 }
712
713 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
714                                       struct ib_ucontext *context,
715                                       struct ib_udata *udata)
716 {
717         struct mlx4_ib_pd *pd;
718         int err;
719
720         pd = kmalloc(sizeof *pd, GFP_KERNEL);
721         if (!pd)
722                 return ERR_PTR(-ENOMEM);
723
724         err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
725         if (err) {
726                 kfree(pd);
727                 return ERR_PTR(err);
728         }
729
730         if (context)
731                 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
732                         mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
733                         kfree(pd);
734                         return ERR_PTR(-EFAULT);
735                 }
736
737         return &pd->ibpd;
738 }
739
740 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
741 {
742         mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
743         kfree(pd);
744
745         return 0;
746 }
747
748 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
749                                           struct ib_ucontext *context,
750                                           struct ib_udata *udata)
751 {
752         struct mlx4_ib_xrcd *xrcd;
753         int err;
754
755         if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
756                 return ERR_PTR(-ENOSYS);
757
758         xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
759         if (!xrcd)
760                 return ERR_PTR(-ENOMEM);
761
762         err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
763         if (err)
764                 goto err1;
765
766         xrcd->pd = ib_alloc_pd(ibdev);
767         if (IS_ERR(xrcd->pd)) {
768                 err = PTR_ERR(xrcd->pd);
769                 goto err2;
770         }
771
772         xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
773         if (IS_ERR(xrcd->cq)) {
774                 err = PTR_ERR(xrcd->cq);
775                 goto err3;
776         }
777
778         return &xrcd->ibxrcd;
779
780 err3:
781         ib_dealloc_pd(xrcd->pd);
782 err2:
783         mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
784 err1:
785         kfree(xrcd);
786         return ERR_PTR(err);
787 }
788
789 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
790 {
791         ib_destroy_cq(to_mxrcd(xrcd)->cq);
792         ib_dealloc_pd(to_mxrcd(xrcd)->pd);
793         mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
794         kfree(xrcd);
795
796         return 0;
797 }
798
799 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
800 {
801         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
802         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
803         struct mlx4_ib_gid_entry *ge;
804
805         ge = kzalloc(sizeof *ge, GFP_KERNEL);
806         if (!ge)
807                 return -ENOMEM;
808
809         ge->gid = *gid;
810         if (mlx4_ib_add_mc(mdev, mqp, gid)) {
811                 ge->port = mqp->port;
812                 ge->added = 1;
813         }
814
815         mutex_lock(&mqp->mutex);
816         list_add_tail(&ge->list, &mqp->gid_list);
817         mutex_unlock(&mqp->mutex);
818
819         return 0;
820 }
821
822 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
823                    union ib_gid *gid)
824 {
825         struct net_device *ndev;
826         int ret = 0;
827
828         if (!mqp->port)
829                 return 0;
830
831         spin_lock_bh(&mdev->iboe.lock);
832         ndev = mdev->iboe.netdevs[mqp->port - 1];
833         if (ndev)
834                 dev_hold(ndev);
835         spin_unlock_bh(&mdev->iboe.lock);
836
837         if (ndev) {
838                 ret = 1;
839                 dev_put(ndev);
840         }
841
842         return ret;
843 }
844
845 struct mlx4_ib_steering {
846         struct list_head list;
847         u64 reg_id;
848         union ib_gid gid;
849 };
850
851 static int parse_flow_attr(struct mlx4_dev *dev,
852                            u32 qp_num,
853                            union ib_flow_spec *ib_spec,
854                            struct _rule_hw *mlx4_spec)
855 {
856         enum mlx4_net_trans_rule_id type;
857
858         switch (ib_spec->type) {
859         case IB_FLOW_SPEC_ETH:
860                 type = MLX4_NET_TRANS_RULE_ID_ETH;
861                 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
862                        ETH_ALEN);
863                 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
864                        ETH_ALEN);
865                 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
866                 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
867                 break;
868         case IB_FLOW_SPEC_IB:
869                 type = MLX4_NET_TRANS_RULE_ID_IB;
870                 mlx4_spec->ib.l3_qpn =
871                         cpu_to_be32(qp_num);
872                 mlx4_spec->ib.qpn_mask =
873                         cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
874                 break;
875
876
877         case IB_FLOW_SPEC_IPV4:
878                 type = MLX4_NET_TRANS_RULE_ID_IPV4;
879                 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
880                 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
881                 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
882                 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
883                 break;
884
885         case IB_FLOW_SPEC_TCP:
886         case IB_FLOW_SPEC_UDP:
887                 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
888                                         MLX4_NET_TRANS_RULE_ID_TCP :
889                                         MLX4_NET_TRANS_RULE_ID_UDP;
890                 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
891                 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
892                 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
893                 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
894                 break;
895
896         default:
897                 return -EINVAL;
898         }
899         if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
900             mlx4_hw_rule_sz(dev, type) < 0)
901                 return -EINVAL;
902         mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
903         mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
904         return mlx4_hw_rule_sz(dev, type);
905 }
906
907 struct default_rules {
908         __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
909         __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
910         __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
911         __u8  link_layer;
912 };
913 static const struct default_rules default_table[] = {
914         {
915                 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
916                 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
917                 .rules_create_list = {IB_FLOW_SPEC_IB},
918                 .link_layer = IB_LINK_LAYER_INFINIBAND
919         }
920 };
921
922 static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
923                                          struct ib_flow_attr *flow_attr)
924 {
925         int i, j, k;
926         void *ib_flow;
927         const struct default_rules *pdefault_rules = default_table;
928         u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
929
930         for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
931                 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
932                 memset(&field_types, 0, sizeof(field_types));
933
934                 if (link_layer != pdefault_rules->link_layer)
935                         continue;
936
937                 ib_flow = flow_attr + 1;
938                 /* we assume the specs are sorted */
939                 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
940                      j < flow_attr->num_of_specs; k++) {
941                         union ib_flow_spec *current_flow =
942                                 (union ib_flow_spec *)ib_flow;
943
944                         /* same layer but different type */
945                         if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
946                              (pdefault_rules->mandatory_fields[k] &
947                               IB_FLOW_SPEC_LAYER_MASK)) &&
948                             (current_flow->type !=
949                              pdefault_rules->mandatory_fields[k]))
950                                 goto out;
951
952                         /* same layer, try match next one */
953                         if (current_flow->type ==
954                             pdefault_rules->mandatory_fields[k]) {
955                                 j++;
956                                 ib_flow +=
957                                         ((union ib_flow_spec *)ib_flow)->size;
958                         }
959                 }
960
961                 ib_flow = flow_attr + 1;
962                 for (j = 0; j < flow_attr->num_of_specs;
963                      j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
964                         for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
965                                 /* same layer and same type */
966                                 if (((union ib_flow_spec *)ib_flow)->type ==
967                                     pdefault_rules->mandatory_not_fields[k])
968                                         goto out;
969
970                 return i;
971         }
972 out:
973         return -1;
974 }
975
976 static int __mlx4_ib_create_default_rules(
977                 struct mlx4_ib_dev *mdev,
978                 struct ib_qp *qp,
979                 const struct default_rules *pdefault_rules,
980                 struct _rule_hw *mlx4_spec) {
981         int size = 0;
982         int i;
983
984         for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
985                 int ret;
986                 union ib_flow_spec ib_spec;
987                 switch (pdefault_rules->rules_create_list[i]) {
988                 case 0:
989                         /* no rule */
990                         continue;
991                 case IB_FLOW_SPEC_IB:
992                         ib_spec.type = IB_FLOW_SPEC_IB;
993                         ib_spec.size = sizeof(struct ib_flow_spec_ib);
994
995                         break;
996                 default:
997                         /* invalid rule */
998                         return -EINVAL;
999                 }
1000                 /* We must put empty rule, qpn is being ignored */
1001                 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1002                                       mlx4_spec);
1003                 if (ret < 0) {
1004                         pr_info("invalid parsing\n");
1005                         return -EINVAL;
1006                 }
1007
1008                 mlx4_spec = (void *)mlx4_spec + ret;
1009                 size += ret;
1010         }
1011         return size;
1012 }
1013
1014 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1015                           int domain,
1016                           enum mlx4_net_trans_promisc_mode flow_type,
1017                           u64 *reg_id)
1018 {
1019         int ret, i;
1020         int size = 0;
1021         void *ib_flow;
1022         struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1023         struct mlx4_cmd_mailbox *mailbox;
1024         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1025         int default_flow;
1026
1027         static const u16 __mlx4_domain[] = {
1028                 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1029                 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1030                 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1031                 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1032         };
1033
1034         if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1035                 pr_err("Invalid priority value %d\n", flow_attr->priority);
1036                 return -EINVAL;
1037         }
1038
1039         if (domain >= IB_FLOW_DOMAIN_NUM) {
1040                 pr_err("Invalid domain value %d\n", domain);
1041                 return -EINVAL;
1042         }
1043
1044         if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1045                 return -EINVAL;
1046
1047         mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1048         if (IS_ERR(mailbox))
1049                 return PTR_ERR(mailbox);
1050         ctrl = mailbox->buf;
1051
1052         ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1053                                  flow_attr->priority);
1054         ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1055         ctrl->port = flow_attr->port;
1056         ctrl->qpn = cpu_to_be32(qp->qp_num);
1057
1058         ib_flow = flow_attr + 1;
1059         size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1060         /* Add default flows */
1061         default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1062         if (default_flow >= 0) {
1063                 ret = __mlx4_ib_create_default_rules(
1064                                 mdev, qp, default_table + default_flow,
1065                                 mailbox->buf + size);
1066                 if (ret < 0) {
1067                         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1068                         return -EINVAL;
1069                 }
1070                 size += ret;
1071         }
1072         for (i = 0; i < flow_attr->num_of_specs; i++) {
1073                 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1074                                       mailbox->buf + size);
1075                 if (ret < 0) {
1076                         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1077                         return -EINVAL;
1078                 }
1079                 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1080                 size += ret;
1081         }
1082
1083         ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1084                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1085                            MLX4_CMD_NATIVE);
1086         if (ret == -ENOMEM)
1087                 pr_err("mcg table is full. Fail to register network rule.\n");
1088         else if (ret == -ENXIO)
1089                 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1090         else if (ret)
1091                 pr_err("Invalid argumant. Fail to register network rule.\n");
1092
1093         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1094         return ret;
1095 }
1096
1097 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1098 {
1099         int err;
1100         err = mlx4_cmd(dev, reg_id, 0, 0,
1101                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1102                        MLX4_CMD_NATIVE);
1103         if (err)
1104                 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1105                        reg_id);
1106         return err;
1107 }
1108
1109 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1110                                     u64 *reg_id)
1111 {
1112         void *ib_flow;
1113         union ib_flow_spec *ib_spec;
1114         struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1115         int err = 0;
1116
1117         if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1118             dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
1119                 return 0; /* do nothing */
1120
1121         ib_flow = flow_attr + 1;
1122         ib_spec = (union ib_flow_spec *)ib_flow;
1123
1124         if (ib_spec->type !=  IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1125                 return 0; /* do nothing */
1126
1127         err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1128                                     flow_attr->port, qp->qp_num,
1129                                     MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1130                                     reg_id);
1131         return err;
1132 }
1133
1134 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1135                                     struct ib_flow_attr *flow_attr,
1136                                     int domain)
1137 {
1138         int err = 0, i = 0;
1139         struct mlx4_ib_flow *mflow;
1140         enum mlx4_net_trans_promisc_mode type[2];
1141
1142         memset(type, 0, sizeof(type));
1143
1144         mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1145         if (!mflow) {
1146                 err = -ENOMEM;
1147                 goto err_free;
1148         }
1149
1150         switch (flow_attr->type) {
1151         case IB_FLOW_ATTR_NORMAL:
1152                 type[0] = MLX4_FS_REGULAR;
1153                 break;
1154
1155         case IB_FLOW_ATTR_ALL_DEFAULT:
1156                 type[0] = MLX4_FS_ALL_DEFAULT;
1157                 break;
1158
1159         case IB_FLOW_ATTR_MC_DEFAULT:
1160                 type[0] = MLX4_FS_MC_DEFAULT;
1161                 break;
1162
1163         case IB_FLOW_ATTR_SNIFFER:
1164                 type[0] = MLX4_FS_UC_SNIFFER;
1165                 type[1] = MLX4_FS_MC_SNIFFER;
1166                 break;
1167
1168         default:
1169                 err = -EINVAL;
1170                 goto err_free;
1171         }
1172
1173         while (i < ARRAY_SIZE(type) && type[i]) {
1174                 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1175                                             &mflow->reg_id[i]);
1176                 if (err)
1177                         goto err_create_flow;
1178                 i++;
1179         }
1180
1181         if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1182                 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
1183                 if (err)
1184                         goto err_create_flow;
1185                 i++;
1186         }
1187
1188         return &mflow->ibflow;
1189
1190 err_create_flow:
1191         while (i) {
1192                 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
1193                 i--;
1194         }
1195 err_free:
1196         kfree(mflow);
1197         return ERR_PTR(err);
1198 }
1199
1200 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1201 {
1202         int err, ret = 0;
1203         int i = 0;
1204         struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1205         struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1206
1207         while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) {
1208                 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]);
1209                 if (err)
1210                         ret = err;
1211                 i++;
1212         }
1213
1214         kfree(mflow);
1215         return ret;
1216 }
1217
1218 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1219 {
1220         int err;
1221         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1222         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1223         u64 reg_id;
1224         struct mlx4_ib_steering *ib_steering = NULL;
1225         enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1226                 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1227
1228         if (mdev->dev->caps.steering_mode ==
1229             MLX4_STEERING_MODE_DEVICE_MANAGED) {
1230                 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1231                 if (!ib_steering)
1232                         return -ENOMEM;
1233         }
1234
1235         err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1236                                     !!(mqp->flags &
1237                                        MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1238                                     prot, &reg_id);
1239         if (err)
1240                 goto err_malloc;
1241
1242         err = add_gid_entry(ibqp, gid);
1243         if (err)
1244                 goto err_add;
1245
1246         if (ib_steering) {
1247                 memcpy(ib_steering->gid.raw, gid->raw, 16);
1248                 ib_steering->reg_id = reg_id;
1249                 mutex_lock(&mqp->mutex);
1250                 list_add(&ib_steering->list, &mqp->steering_rules);
1251                 mutex_unlock(&mqp->mutex);
1252         }
1253         return 0;
1254
1255 err_add:
1256         mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1257                               prot, reg_id);
1258 err_malloc:
1259         kfree(ib_steering);
1260
1261         return err;
1262 }
1263
1264 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1265 {
1266         struct mlx4_ib_gid_entry *ge;
1267         struct mlx4_ib_gid_entry *tmp;
1268         struct mlx4_ib_gid_entry *ret = NULL;
1269
1270         list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1271                 if (!memcmp(raw, ge->gid.raw, 16)) {
1272                         ret = ge;
1273                         break;
1274                 }
1275         }
1276
1277         return ret;
1278 }
1279
1280 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1281 {
1282         int err;
1283         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1284         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1285         struct net_device *ndev;
1286         struct mlx4_ib_gid_entry *ge;
1287         u64 reg_id = 0;
1288         enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1289                 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1290
1291         if (mdev->dev->caps.steering_mode ==
1292             MLX4_STEERING_MODE_DEVICE_MANAGED) {
1293                 struct mlx4_ib_steering *ib_steering;
1294
1295                 mutex_lock(&mqp->mutex);
1296                 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1297                         if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1298                                 list_del(&ib_steering->list);
1299                                 break;
1300                         }
1301                 }
1302                 mutex_unlock(&mqp->mutex);
1303                 if (&ib_steering->list == &mqp->steering_rules) {
1304                         pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1305                         return -EINVAL;
1306                 }
1307                 reg_id = ib_steering->reg_id;
1308                 kfree(ib_steering);
1309         }
1310
1311         err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1312                                     prot, reg_id);
1313         if (err)
1314                 return err;
1315
1316         mutex_lock(&mqp->mutex);
1317         ge = find_gid_entry(mqp, gid->raw);
1318         if (ge) {
1319                 spin_lock_bh(&mdev->iboe.lock);
1320                 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1321                 if (ndev)
1322                         dev_hold(ndev);
1323                 spin_unlock_bh(&mdev->iboe.lock);
1324                 if (ndev)
1325                         dev_put(ndev);
1326                 list_del(&ge->list);
1327                 kfree(ge);
1328         } else
1329                 pr_warn("could not find mgid entry\n");
1330
1331         mutex_unlock(&mqp->mutex);
1332
1333         return 0;
1334 }
1335
1336 static int init_node_data(struct mlx4_ib_dev *dev)
1337 {
1338         struct ib_smp *in_mad  = NULL;
1339         struct ib_smp *out_mad = NULL;
1340         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1341         int err = -ENOMEM;
1342
1343         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
1344         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1345         if (!in_mad || !out_mad)
1346                 goto out;
1347
1348         init_query_mad(in_mad);
1349         in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1350         if (mlx4_is_master(dev->dev))
1351                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
1352
1353         err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1354         if (err)
1355                 goto out;
1356
1357         memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1358
1359         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1360
1361         err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1362         if (err)
1363                 goto out;
1364
1365         dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1366         memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1367
1368 out:
1369         kfree(in_mad);
1370         kfree(out_mad);
1371         return err;
1372 }
1373
1374 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1375                         char *buf)
1376 {
1377         struct mlx4_ib_dev *dev =
1378                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1379         return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
1380 }
1381
1382 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1383                            char *buf)
1384 {
1385         struct mlx4_ib_dev *dev =
1386                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1387         return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
1388                        (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
1389                        (int) dev->dev->caps.fw_ver & 0xffff);
1390 }
1391
1392 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1393                         char *buf)
1394 {
1395         struct mlx4_ib_dev *dev =
1396                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1397         return sprintf(buf, "%x\n", dev->dev->rev_id);
1398 }
1399
1400 static ssize_t show_board(struct device *device, struct device_attribute *attr,
1401                           char *buf)
1402 {
1403         struct mlx4_ib_dev *dev =
1404                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1405         return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
1406                        dev->dev->board_id);
1407 }
1408
1409 static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
1410 static DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
1411 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
1412 static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
1413
1414 static struct device_attribute *mlx4_class_attributes[] = {
1415         &dev_attr_hw_rev,
1416         &dev_attr_fw_ver,
1417         &dev_attr_hca_type,
1418         &dev_attr_board_id
1419 };
1420
1421 static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
1422                                      struct net_device *dev)
1423 {
1424         memcpy(eui, dev->dev_addr, 3);
1425         memcpy(eui + 5, dev->dev_addr + 3, 3);
1426         if (vlan_id < 0x1000) {
1427                 eui[3] = vlan_id >> 8;
1428                 eui[4] = vlan_id & 0xff;
1429         } else {
1430                 eui[3] = 0xff;
1431                 eui[4] = 0xfe;
1432         }
1433         eui[0] ^= 2;
1434 }
1435
1436 static void update_gids_task(struct work_struct *work)
1437 {
1438         struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
1439         struct mlx4_cmd_mailbox *mailbox;
1440         union ib_gid *gids;
1441         int err;
1442         struct mlx4_dev *dev = gw->dev->dev;
1443
1444         if (!gw->dev->ib_active)
1445                 return;
1446
1447         mailbox = mlx4_alloc_cmd_mailbox(dev);
1448         if (IS_ERR(mailbox)) {
1449                 pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
1450                 return;
1451         }
1452
1453         gids = mailbox->buf;
1454         memcpy(gids, gw->gids, sizeof gw->gids);
1455
1456         err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1457                        1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1458                        MLX4_CMD_WRAPPED);
1459         if (err)
1460                 pr_warn("set port command failed\n");
1461         else
1462                 mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
1463
1464         mlx4_free_cmd_mailbox(dev, mailbox);
1465         kfree(gw);
1466 }
1467
1468 static void reset_gids_task(struct work_struct *work)
1469 {
1470         struct update_gid_work *gw =
1471                         container_of(work, struct update_gid_work, work);
1472         struct mlx4_cmd_mailbox *mailbox;
1473         union ib_gid *gids;
1474         int err;
1475         struct mlx4_dev *dev = gw->dev->dev;
1476
1477         if (!gw->dev->ib_active)
1478                 return;
1479
1480         mailbox = mlx4_alloc_cmd_mailbox(dev);
1481         if (IS_ERR(mailbox)) {
1482                 pr_warn("reset gid table failed\n");
1483                 goto free;
1484         }
1485
1486         gids = mailbox->buf;
1487         memcpy(gids, gw->gids, sizeof(gw->gids));
1488
1489         if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
1490                                     IB_LINK_LAYER_ETHERNET) {
1491                 err = mlx4_cmd(dev, mailbox->dma,
1492                                MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1493                                1, MLX4_CMD_SET_PORT,
1494                                MLX4_CMD_TIME_CLASS_B,
1495                                MLX4_CMD_WRAPPED);
1496                 if (err)
1497                         pr_warn(KERN_WARNING
1498                                 "set port %d command failed\n", gw->port);
1499         }
1500
1501         mlx4_free_cmd_mailbox(dev, mailbox);
1502 free:
1503         kfree(gw);
1504 }
1505
1506 static int update_gid_table(struct mlx4_ib_dev *dev, int port,
1507                             union ib_gid *gid, int clear,
1508                             int default_gid)
1509 {
1510         struct update_gid_work *work;
1511         int i;
1512         int need_update = 0;
1513         int free = -1;
1514         int found = -1;
1515         int max_gids;
1516
1517         if (default_gid) {
1518                 free = 0;
1519         } else {
1520                 max_gids = dev->dev->caps.gid_table_len[port];
1521                 for (i = 1; i < max_gids; ++i) {
1522                         if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
1523                                     sizeof(*gid)))
1524                                 found = i;
1525
1526                         if (clear) {
1527                                 if (found >= 0) {
1528                                         need_update = 1;
1529                                         dev->iboe.gid_table[port - 1][found] =
1530                                                 zgid;
1531                                         break;
1532                                 }
1533                         } else {
1534                                 if (found >= 0)
1535                                         break;
1536
1537                                 if (free < 0 &&
1538                                     !memcmp(&dev->iboe.gid_table[port - 1][i],
1539                                             &zgid, sizeof(*gid)))
1540                                         free = i;
1541                         }
1542                 }
1543         }
1544
1545         if (found == -1 && !clear && free >= 0) {
1546                 dev->iboe.gid_table[port - 1][free] = *gid;
1547                 need_update = 1;
1548         }
1549
1550         if (!need_update)
1551                 return 0;
1552
1553         work = kzalloc(sizeof(*work), GFP_ATOMIC);
1554         if (!work)
1555                 return -ENOMEM;
1556
1557         memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
1558         INIT_WORK(&work->work, update_gids_task);
1559         work->port = port;
1560         work->dev = dev;
1561         queue_work(wq, &work->work);
1562
1563         return 0;
1564 }
1565
1566 static void mlx4_make_default_gid(struct  net_device *dev, union ib_gid *gid)
1567 {
1568         gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
1569         mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
1570 }
1571
1572
1573 static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
1574 {
1575         struct update_gid_work *work;
1576
1577         work = kzalloc(sizeof(*work), GFP_ATOMIC);
1578         if (!work)
1579                 return -ENOMEM;
1580
1581         memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
1582         memset(work->gids, 0, sizeof(work->gids));
1583         INIT_WORK(&work->work, reset_gids_task);
1584         work->dev = dev;
1585         work->port = port;
1586         queue_work(wq, &work->work);
1587         return 0;
1588 }
1589
1590 static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
1591                               struct mlx4_ib_dev *ibdev, union ib_gid *gid)
1592 {
1593         struct mlx4_ib_iboe *iboe;
1594         int port = 0;
1595         struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
1596                                 rdma_vlan_dev_real_dev(event_netdev) :
1597                                 event_netdev;
1598         union ib_gid default_gid;
1599
1600         mlx4_make_default_gid(real_dev, &default_gid);
1601
1602         if (!memcmp(gid, &default_gid, sizeof(*gid)))
1603                 return 0;
1604
1605         if (event != NETDEV_DOWN && event != NETDEV_UP)
1606                 return 0;
1607
1608         if ((real_dev != event_netdev) &&
1609             (event == NETDEV_DOWN) &&
1610             rdma_link_local_addr((struct in6_addr *)gid))
1611                 return 0;
1612
1613         iboe = &ibdev->iboe;
1614         spin_lock_bh(&iboe->lock);
1615
1616         for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
1617                 if ((netif_is_bond_master(real_dev) &&
1618                      (real_dev == iboe->masters[port - 1])) ||
1619                      (!netif_is_bond_master(real_dev) &&
1620                      (real_dev == iboe->netdevs[port - 1])))
1621                         update_gid_table(ibdev, port, gid,
1622                                          event == NETDEV_DOWN, 0);
1623
1624         spin_unlock_bh(&iboe->lock);
1625         return 0;
1626
1627 }
1628
1629 static u8 mlx4_ib_get_dev_port(struct net_device *dev,
1630                                struct mlx4_ib_dev *ibdev)
1631 {
1632         u8 port = 0;
1633         struct mlx4_ib_iboe *iboe;
1634         struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ?
1635                                 rdma_vlan_dev_real_dev(dev) : dev;
1636
1637         iboe = &ibdev->iboe;
1638
1639         for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
1640                 if ((netif_is_bond_master(real_dev) &&
1641                      (real_dev == iboe->masters[port - 1])) ||
1642                      (!netif_is_bond_master(real_dev) &&
1643                      (real_dev == iboe->netdevs[port - 1])))
1644                         break;
1645
1646         if ((port == 0) || (port > ibdev->dev->caps.num_ports))
1647                 return 0;
1648         else
1649                 return port;
1650 }
1651
1652 static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event,
1653                                 void *ptr)
1654 {
1655         struct mlx4_ib_dev *ibdev;
1656         struct in_ifaddr *ifa = ptr;
1657         union ib_gid gid;
1658         struct net_device *event_netdev = ifa->ifa_dev->dev;
1659
1660         ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
1661
1662         ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet);
1663
1664         mlx4_ib_addr_event(event, event_netdev, ibdev, &gid);
1665         return NOTIFY_DONE;
1666 }
1667
1668 #if IS_ENABLED(CONFIG_IPV6)
1669 static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
1670                                 void *ptr)
1671 {
1672         struct mlx4_ib_dev *ibdev;
1673         struct inet6_ifaddr *ifa = ptr;
1674         union  ib_gid *gid = (union ib_gid *)&ifa->addr;
1675         struct net_device *event_netdev = ifa->idev->dev;
1676
1677         ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet6);
1678
1679         mlx4_ib_addr_event(event, event_netdev, ibdev, gid);
1680         return NOTIFY_DONE;
1681 }
1682 #endif
1683
1684 #define MLX4_IB_INVALID_MAC     ((u64)-1)
1685 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
1686                                struct net_device *dev,
1687                                int port)
1688 {
1689         u64 new_smac = 0;
1690         u64 release_mac = MLX4_IB_INVALID_MAC;
1691         struct mlx4_ib_qp *qp;
1692
1693         read_lock(&dev_base_lock);
1694         new_smac = mlx4_mac_to_u64(dev->dev_addr);
1695         read_unlock(&dev_base_lock);
1696
1697         atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
1698
1699         /* no need for update QP1 and mac registration in non-SRIOV */
1700         if (!mlx4_is_mfunc(ibdev->dev))
1701                 return;
1702
1703         mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
1704         qp = ibdev->qp1_proxy[port - 1];
1705         if (qp) {
1706                 int new_smac_index;
1707                 u64 old_smac;
1708                 struct mlx4_update_qp_params update_params;
1709
1710                 mutex_lock(&qp->mutex);
1711                 old_smac = qp->pri.smac;
1712                 if (new_smac == old_smac)
1713                         goto unlock;
1714
1715                 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
1716
1717                 if (new_smac_index < 0)
1718                         goto unlock;
1719
1720                 update_params.smac_index = new_smac_index;
1721                 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
1722                                    &update_params)) {
1723                         release_mac = new_smac;
1724                         goto unlock;
1725                 }
1726                 /* if old port was zero, no mac was yet registered for this QP */
1727                 if (qp->pri.smac_port)
1728                         release_mac = old_smac;
1729                 qp->pri.smac = new_smac;
1730                 qp->pri.smac_port = port;
1731                 qp->pri.smac_index = new_smac_index;
1732         }
1733
1734 unlock:
1735         if (release_mac != MLX4_IB_INVALID_MAC)
1736                 mlx4_unregister_mac(ibdev->dev, port, release_mac);
1737         if (qp)
1738                 mutex_unlock(&qp->mutex);
1739         mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
1740 }
1741
1742 static void mlx4_ib_get_dev_addr(struct net_device *dev,
1743                                  struct mlx4_ib_dev *ibdev, u8 port)
1744 {
1745         struct in_device *in_dev;
1746 #if IS_ENABLED(CONFIG_IPV6)
1747         struct inet6_dev *in6_dev;
1748         union ib_gid  *pgid;
1749         struct inet6_ifaddr *ifp;
1750         union ib_gid default_gid;
1751 #endif
1752         union ib_gid gid;
1753
1754
1755         if ((port == 0) || (port > ibdev->dev->caps.num_ports))
1756                 return;
1757
1758         /* IPv4 gids */
1759         in_dev = in_dev_get(dev);
1760         if (in_dev) {
1761                 for_ifa(in_dev) {
1762                         /*ifa->ifa_address;*/
1763                         ipv6_addr_set_v4mapped(ifa->ifa_address,
1764                                                (struct in6_addr *)&gid);
1765                         update_gid_table(ibdev, port, &gid, 0, 0);
1766                 }
1767                 endfor_ifa(in_dev);
1768                 in_dev_put(in_dev);
1769         }
1770 #if IS_ENABLED(CONFIG_IPV6)
1771         mlx4_make_default_gid(dev, &default_gid);
1772         /* IPv6 gids */
1773         in6_dev = in6_dev_get(dev);
1774         if (in6_dev) {
1775                 read_lock_bh(&in6_dev->lock);
1776                 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
1777                         pgid = (union ib_gid *)&ifp->addr;
1778                         if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
1779                                 continue;
1780                         update_gid_table(ibdev, port, pgid, 0, 0);
1781                 }
1782                 read_unlock_bh(&in6_dev->lock);
1783                 in6_dev_put(in6_dev);
1784         }
1785 #endif
1786 }
1787
1788 static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
1789                                  struct  net_device *dev, u8 port)
1790 {
1791         union ib_gid gid;
1792         mlx4_make_default_gid(dev, &gid);
1793         update_gid_table(ibdev, port, &gid, 0, 1);
1794 }
1795
1796 static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
1797 {
1798         struct  net_device *dev;
1799         struct mlx4_ib_iboe *iboe = &ibdev->iboe;
1800         int i;
1801         int err = 0;
1802
1803         for (i = 1; i <= ibdev->num_ports; ++i) {
1804                 if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ==
1805                     IB_LINK_LAYER_ETHERNET) {
1806                         err = reset_gid_table(ibdev, i);
1807                         if (err)
1808                                 goto out;
1809                 }
1810         }
1811
1812         read_lock(&dev_base_lock);
1813         spin_lock_bh(&iboe->lock);
1814
1815         for_each_netdev(&init_net, dev) {
1816                 u8 port = mlx4_ib_get_dev_port(dev, ibdev);
1817                 /* port will be non-zero only for ETH ports */
1818                 if (port) {
1819                         mlx4_ib_set_default_gid(ibdev, dev, port);
1820                         mlx4_ib_get_dev_addr(dev, ibdev, port);
1821                 }
1822         }
1823
1824         spin_unlock_bh(&iboe->lock);
1825         read_unlock(&dev_base_lock);
1826 out:
1827         return err;
1828 }
1829
1830 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1831                                  struct net_device *dev,
1832                                  unsigned long event)
1833
1834 {
1835         struct mlx4_ib_iboe *iboe;
1836         int update_qps_port = -1;
1837         int port;
1838
1839         iboe = &ibdev->iboe;
1840
1841         spin_lock_bh(&iboe->lock);
1842         mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1843                 enum ib_port_state      port_state = IB_PORT_NOP;
1844                 struct net_device *old_master = iboe->masters[port - 1];
1845                 struct net_device *curr_netdev;
1846                 struct net_device *curr_master;
1847
1848                 iboe->netdevs[port - 1] =
1849                         mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
1850                 if (iboe->netdevs[port - 1])
1851                         mlx4_ib_set_default_gid(ibdev,
1852                                                 iboe->netdevs[port - 1], port);
1853                 curr_netdev = iboe->netdevs[port - 1];
1854
1855                 if (iboe->netdevs[port - 1] &&
1856                     netif_is_bond_slave(iboe->netdevs[port - 1])) {
1857                         iboe->masters[port - 1] = netdev_master_upper_dev_get(
1858                                 iboe->netdevs[port - 1]);
1859                 } else {
1860                         iboe->masters[port - 1] = NULL;
1861                 }
1862                 curr_master = iboe->masters[port - 1];
1863
1864                 if (dev == iboe->netdevs[port - 1] &&
1865                     (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
1866                      event == NETDEV_UP || event == NETDEV_CHANGE))
1867                         update_qps_port = port;
1868
1869                 if (curr_netdev) {
1870                         port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
1871                                                 IB_PORT_ACTIVE : IB_PORT_DOWN;
1872                         mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
1873                         if (curr_master) {
1874                                 /* if using bonding/team and a slave port is down, we
1875                                  * don't want the bond IP based gids in the table since
1876                                  * flows that select port by gid may get the down port.
1877                                 */
1878                                 if (port_state == IB_PORT_DOWN) {
1879                                         reset_gid_table(ibdev, port);
1880                                         mlx4_ib_set_default_gid(ibdev,
1881                                                                 curr_netdev,
1882                                                                 port);
1883                                 } else {
1884                                         /* gids from the upper dev (bond/team)
1885                                          * should appear in port's gid table
1886                                         */
1887                                         mlx4_ib_get_dev_addr(curr_master,
1888                                                              ibdev, port);
1889                                 }
1890                         }
1891                         /* if bonding is used it is possible that we add it to
1892                          * masters only after IP address is assigned to the
1893                          * net bonding interface.
1894                         */
1895                         if (curr_master && (old_master != curr_master)) {
1896                                 reset_gid_table(ibdev, port);
1897                                 mlx4_ib_set_default_gid(ibdev,
1898                                                         curr_netdev, port);
1899                                 mlx4_ib_get_dev_addr(curr_master, ibdev, port);
1900                         }
1901
1902                         if (!curr_master && (old_master != curr_master)) {
1903                                 reset_gid_table(ibdev, port);
1904                                 mlx4_ib_set_default_gid(ibdev,
1905                                                         curr_netdev, port);
1906                                 mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
1907                         }
1908                 } else {
1909                         reset_gid_table(ibdev, port);
1910                 }
1911         }
1912
1913         spin_unlock_bh(&iboe->lock);
1914
1915         if (update_qps_port > 0)
1916                 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
1917 }
1918
1919 static int mlx4_ib_netdev_event(struct notifier_block *this,
1920                                 unsigned long event, void *ptr)
1921 {
1922         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1923         struct mlx4_ib_dev *ibdev;
1924
1925         if (!net_eq(dev_net(dev), &init_net))
1926                 return NOTIFY_DONE;
1927
1928         ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1929         mlx4_ib_scan_netdevs(ibdev, dev, event);
1930
1931         return NOTIFY_DONE;
1932 }
1933
1934 static void init_pkeys(struct mlx4_ib_dev *ibdev)
1935 {
1936         int port;
1937         int slave;
1938         int i;
1939
1940         if (mlx4_is_master(ibdev->dev)) {
1941                 for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
1942                         for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1943                                 for (i = 0;
1944                                      i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1945                                      ++i) {
1946                                         ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
1947                                         /* master has the identity virt2phys pkey mapping */
1948                                                 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
1949                                                         ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
1950                                         mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
1951                                                              ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
1952                                 }
1953                         }
1954                 }
1955                 /* initialize pkey cache */
1956                 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1957                         for (i = 0;
1958                              i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1959                              ++i)
1960                                 ibdev->pkeys.phys_pkey_cache[port-1][i] =
1961                                         (i) ? 0 : 0xFFFF;
1962                 }
1963         }
1964 }
1965
1966 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1967 {
1968         char name[80];
1969         int eq_per_port = 0;
1970         int added_eqs = 0;
1971         int total_eqs = 0;
1972         int i, j, eq;
1973
1974         /* Legacy mode or comp_pool is not large enough */
1975         if (dev->caps.comp_pool == 0 ||
1976             dev->caps.num_ports > dev->caps.comp_pool)
1977                 return;
1978
1979         eq_per_port = dev->caps.comp_pool / dev->caps.num_ports;
1980
1981         /* Init eq table */
1982         added_eqs = 0;
1983         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
1984                 added_eqs += eq_per_port;
1985
1986         total_eqs = dev->caps.num_comp_vectors + added_eqs;
1987
1988         ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
1989         if (!ibdev->eq_table)
1990                 return;
1991
1992         ibdev->eq_added = added_eqs;
1993
1994         eq = 0;
1995         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
1996                 for (j = 0; j < eq_per_port; j++) {
1997                         snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
1998                                  i, j, dev->pdev->bus->name);
1999                         /* Set IRQ for specific name (per ring) */
2000                         if (mlx4_assign_eq(dev, name, NULL,
2001                                            &ibdev->eq_table[eq])) {
2002                                 /* Use legacy (same as mlx4_en driver) */
2003                                 pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
2004                                 ibdev->eq_table[eq] =
2005                                         (eq % dev->caps.num_comp_vectors);
2006                         }
2007                         eq++;
2008                 }
2009         }
2010
2011         /* Fill the reset of the vector with legacy EQ */
2012         for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
2013                 ibdev->eq_table[eq++] = i;
2014
2015         /* Advertise the new number of EQs to clients */
2016         ibdev->ib_dev.num_comp_vectors = total_eqs;
2017 }
2018
2019 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2020 {
2021         int i;
2022
2023         /* no additional eqs were added */
2024         if (!ibdev->eq_table)
2025                 return;
2026
2027         /* Reset the advertised EQ number */
2028         ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
2029
2030         /* Free only the added eqs */
2031         for (i = 0; i < ibdev->eq_added; i++) {
2032                 /* Don't free legacy eqs if used */
2033                 if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
2034                         continue;
2035                 mlx4_release_eq(dev, ibdev->eq_table[i]);
2036         }
2037
2038         kfree(ibdev->eq_table);
2039 }
2040
2041 static void *mlx4_ib_add(struct mlx4_dev *dev)
2042 {
2043         struct mlx4_ib_dev *ibdev;
2044         int num_ports = 0;
2045         int i, j;
2046         int err;
2047         struct mlx4_ib_iboe *iboe;
2048         int ib_num_ports = 0;
2049
2050         pr_info_once("%s", mlx4_ib_version);
2051
2052         num_ports = 0;
2053         mlx4_foreach_ib_transport_port(i, dev)
2054                 num_ports++;
2055
2056         /* No point in registering a device with no ports... */
2057         if (num_ports == 0)
2058                 return NULL;
2059
2060         ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
2061         if (!ibdev) {
2062                 dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
2063                 return NULL;
2064         }
2065
2066         iboe = &ibdev->iboe;
2067
2068         if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2069                 goto err_dealloc;
2070
2071         if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2072                 goto err_pd;
2073
2074         ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2075                                  PAGE_SIZE);
2076         if (!ibdev->uar_map)
2077                 goto err_uar;
2078         MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2079
2080         ibdev->dev = dev;
2081
2082         strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2083         ibdev->ib_dev.owner             = THIS_MODULE;
2084         ibdev->ib_dev.node_type         = RDMA_NODE_IB_CA;
2085         ibdev->ib_dev.local_dma_lkey    = dev->caps.reserved_lkey;
2086         ibdev->num_ports                = num_ports;
2087         ibdev->ib_dev.phys_port_cnt     = ibdev->num_ports;
2088         ibdev->ib_dev.num_comp_vectors  = dev->caps.num_comp_vectors;
2089         ibdev->ib_dev.dma_device        = &dev->pdev->dev;
2090
2091         if (dev->caps.userspace_caps)
2092                 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2093         else
2094                 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2095
2096         ibdev->ib_dev.uverbs_cmd_mask   =
2097                 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
2098                 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
2099                 (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
2100                 (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
2101                 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
2102                 (1ull << IB_USER_VERBS_CMD_REG_MR)              |
2103                 (1ull << IB_USER_VERBS_CMD_REREG_MR)            |
2104                 (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
2105                 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2106                 (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
2107                 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
2108                 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
2109                 (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
2110                 (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
2111                 (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
2112                 (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
2113                 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
2114                 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
2115                 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
2116                 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
2117                 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
2118                 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
2119                 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
2120                 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
2121
2122         ibdev->ib_dev.query_device      = mlx4_ib_query_device;
2123         ibdev->ib_dev.query_port        = mlx4_ib_query_port;
2124         ibdev->ib_dev.get_link_layer    = mlx4_ib_port_link_layer;
2125         ibdev->ib_dev.query_gid         = mlx4_ib_query_gid;
2126         ibdev->ib_dev.query_pkey        = mlx4_ib_query_pkey;
2127         ibdev->ib_dev.modify_device     = mlx4_ib_modify_device;
2128         ibdev->ib_dev.modify_port       = mlx4_ib_modify_port;
2129         ibdev->ib_dev.alloc_ucontext    = mlx4_ib_alloc_ucontext;
2130         ibdev->ib_dev.dealloc_ucontext  = mlx4_ib_dealloc_ucontext;
2131         ibdev->ib_dev.mmap              = mlx4_ib_mmap;
2132         ibdev->ib_dev.alloc_pd          = mlx4_ib_alloc_pd;
2133         ibdev->ib_dev.dealloc_pd        = mlx4_ib_dealloc_pd;
2134         ibdev->ib_dev.create_ah         = mlx4_ib_create_ah;
2135         ibdev->ib_dev.query_ah          = mlx4_ib_query_ah;
2136         ibdev->ib_dev.destroy_ah        = mlx4_ib_destroy_ah;
2137         ibdev->ib_dev.create_srq        = mlx4_ib_create_srq;
2138         ibdev->ib_dev.modify_srq        = mlx4_ib_modify_srq;
2139         ibdev->ib_dev.query_srq         = mlx4_ib_query_srq;
2140         ibdev->ib_dev.destroy_srq       = mlx4_ib_destroy_srq;
2141         ibdev->ib_dev.post_srq_recv     = mlx4_ib_post_srq_recv;
2142         ibdev->ib_dev.create_qp         = mlx4_ib_create_qp;
2143         ibdev->ib_dev.modify_qp         = mlx4_ib_modify_qp;
2144         ibdev->ib_dev.query_qp          = mlx4_ib_query_qp;
2145         ibdev->ib_dev.destroy_qp        = mlx4_ib_destroy_qp;
2146         ibdev->ib_dev.post_send         = mlx4_ib_post_send;
2147         ibdev->ib_dev.post_recv         = mlx4_ib_post_recv;
2148         ibdev->ib_dev.create_cq         = mlx4_ib_create_cq;
2149         ibdev->ib_dev.modify_cq         = mlx4_ib_modify_cq;
2150         ibdev->ib_dev.resize_cq         = mlx4_ib_resize_cq;
2151         ibdev->ib_dev.destroy_cq        = mlx4_ib_destroy_cq;
2152         ibdev->ib_dev.poll_cq           = mlx4_ib_poll_cq;
2153         ibdev->ib_dev.req_notify_cq     = mlx4_ib_arm_cq;
2154         ibdev->ib_dev.get_dma_mr        = mlx4_ib_get_dma_mr;
2155         ibdev->ib_dev.reg_user_mr       = mlx4_ib_reg_user_mr;
2156         ibdev->ib_dev.rereg_user_mr     = mlx4_ib_rereg_user_mr;
2157         ibdev->ib_dev.dereg_mr          = mlx4_ib_dereg_mr;
2158         ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
2159         ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
2160         ibdev->ib_dev.free_fast_reg_page_list  = mlx4_ib_free_fast_reg_page_list;
2161         ibdev->ib_dev.attach_mcast      = mlx4_ib_mcg_attach;
2162         ibdev->ib_dev.detach_mcast      = mlx4_ib_mcg_detach;
2163         ibdev->ib_dev.process_mad       = mlx4_ib_process_mad;
2164
2165         if (!mlx4_is_slave(ibdev->dev)) {
2166                 ibdev->ib_dev.alloc_fmr         = mlx4_ib_fmr_alloc;
2167                 ibdev->ib_dev.map_phys_fmr      = mlx4_ib_map_phys_fmr;
2168                 ibdev->ib_dev.unmap_fmr         = mlx4_ib_unmap_fmr;
2169                 ibdev->ib_dev.dealloc_fmr       = mlx4_ib_fmr_dealloc;
2170         }
2171
2172         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2173             dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2174                 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
2175                 ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw;
2176                 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2177
2178                 ibdev->ib_dev.uverbs_cmd_mask |=
2179                         (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2180                         (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2181         }
2182
2183         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2184                 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2185                 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2186                 ibdev->ib_dev.uverbs_cmd_mask |=
2187                         (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2188                         (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2189         }
2190
2191         if (check_flow_steering_support(dev)) {
2192                 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2193                 ibdev->ib_dev.create_flow       = mlx4_ib_create_flow;
2194                 ibdev->ib_dev.destroy_flow      = mlx4_ib_destroy_flow;
2195
2196                 ibdev->ib_dev.uverbs_ex_cmd_mask        |=
2197                         (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2198                         (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
2199         }
2200
2201         mlx4_ib_alloc_eqs(dev, ibdev);
2202
2203         spin_lock_init(&iboe->lock);
2204
2205         if (init_node_data(ibdev))
2206                 goto err_map;
2207
2208         for (i = 0; i < ibdev->num_ports; ++i) {
2209                 mutex_init(&ibdev->qp1_proxy_lock[i]);
2210                 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2211                                                 IB_LINK_LAYER_ETHERNET) {
2212                         err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
2213                         if (err)
2214                                 ibdev->counters[i] = -1;
2215                 } else {
2216                         ibdev->counters[i] = -1;
2217                 }
2218         }
2219
2220         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2221                 ib_num_ports++;
2222
2223         spin_lock_init(&ibdev->sm_lock);
2224         mutex_init(&ibdev->cap_mask_mutex);
2225
2226         if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2227             ib_num_ports) {
2228                 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2229                 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2230                                             MLX4_IB_UC_STEER_QPN_ALIGN,
2231                                             &ibdev->steer_qpn_base, 0);
2232                 if (err)
2233                         goto err_counter;
2234
2235                 ibdev->ib_uc_qpns_bitmap =
2236                         kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2237                                 sizeof(long),
2238                                 GFP_KERNEL);
2239                 if (!ibdev->ib_uc_qpns_bitmap) {
2240                         dev_err(&dev->pdev->dev, "bit map alloc failed\n");
2241                         goto err_steer_qp_release;
2242                 }
2243
2244                 bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
2245
2246                 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2247                                 dev, ibdev->steer_qpn_base,
2248                                 ibdev->steer_qpn_base +
2249                                 ibdev->steer_qpn_count - 1);
2250                 if (err)
2251                         goto err_steer_free_bitmap;
2252         }
2253
2254         for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2255                 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2256
2257         if (ib_register_device(&ibdev->ib_dev, NULL))
2258                 goto err_steer_free_bitmap;
2259
2260         if (mlx4_ib_mad_init(ibdev))
2261                 goto err_reg;
2262
2263         if (mlx4_ib_init_sriov(ibdev))
2264                 goto err_mad;
2265
2266         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
2267                 if (!iboe->nb.notifier_call) {
2268                         iboe->nb.notifier_call = mlx4_ib_netdev_event;
2269                         err = register_netdevice_notifier(&iboe->nb);
2270                         if (err) {
2271                                 iboe->nb.notifier_call = NULL;
2272                                 goto err_notif;
2273                         }
2274                 }
2275                 if (!iboe->nb_inet.notifier_call) {
2276                         iboe->nb_inet.notifier_call = mlx4_ib_inet_event;
2277                         err = register_inetaddr_notifier(&iboe->nb_inet);
2278                         if (err) {
2279                                 iboe->nb_inet.notifier_call = NULL;
2280                                 goto err_notif;
2281                         }
2282                 }
2283 #if IS_ENABLED(CONFIG_IPV6)
2284                 if (!iboe->nb_inet6.notifier_call) {
2285                         iboe->nb_inet6.notifier_call = mlx4_ib_inet6_event;
2286                         err = register_inet6addr_notifier(&iboe->nb_inet6);
2287                         if (err) {
2288                                 iboe->nb_inet6.notifier_call = NULL;
2289                                 goto err_notif;
2290                         }
2291                 }
2292 #endif
2293                 if (mlx4_ib_init_gid_table(ibdev))
2294                         goto err_notif;
2295         }
2296
2297         for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2298                 if (device_create_file(&ibdev->ib_dev.dev,
2299                                        mlx4_class_attributes[j]))
2300                         goto err_notif;
2301         }
2302
2303         ibdev->ib_active = true;
2304
2305         if (mlx4_is_mfunc(ibdev->dev))
2306                 init_pkeys(ibdev);
2307
2308         /* create paravirt contexts for any VFs which are active */
2309         if (mlx4_is_master(ibdev->dev)) {
2310                 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2311                         if (j == mlx4_master_func_num(ibdev->dev))
2312                                 continue;
2313                         if (mlx4_is_slave_active(ibdev->dev, j))
2314                                 do_slave_init(ibdev, j, 1);
2315                 }
2316         }
2317         return ibdev;
2318
2319 err_notif:
2320         if (ibdev->iboe.nb.notifier_call) {
2321                 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2322                         pr_warn("failure unregistering notifier\n");
2323                 ibdev->iboe.nb.notifier_call = NULL;
2324         }
2325         if (ibdev->iboe.nb_inet.notifier_call) {
2326                 if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
2327                         pr_warn("failure unregistering notifier\n");
2328                 ibdev->iboe.nb_inet.notifier_call = NULL;
2329         }
2330 #if IS_ENABLED(CONFIG_IPV6)
2331         if (ibdev->iboe.nb_inet6.notifier_call) {
2332                 if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
2333                         pr_warn("failure unregistering notifier\n");
2334                 ibdev->iboe.nb_inet6.notifier_call = NULL;
2335         }
2336 #endif
2337         flush_workqueue(wq);
2338
2339         mlx4_ib_close_sriov(ibdev);
2340
2341 err_mad:
2342         mlx4_ib_mad_cleanup(ibdev);
2343
2344 err_reg:
2345         ib_unregister_device(&ibdev->ib_dev);
2346
2347 err_steer_free_bitmap:
2348         kfree(ibdev->ib_uc_qpns_bitmap);
2349
2350 err_steer_qp_release:
2351         if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
2352                 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2353                                       ibdev->steer_qpn_count);
2354 err_counter:
2355         for (; i; --i)
2356                 if (ibdev->counters[i - 1] != -1)
2357                         mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
2358
2359 err_map:
2360         iounmap(ibdev->uar_map);
2361
2362 err_uar:
2363         mlx4_uar_free(dev, &ibdev->priv_uar);
2364
2365 err_pd:
2366         mlx4_pd_free(dev, ibdev->priv_pdn);
2367
2368 err_dealloc:
2369         ib_dealloc_device(&ibdev->ib_dev);
2370
2371         return NULL;
2372 }
2373
2374 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2375 {
2376         int offset;
2377
2378         WARN_ON(!dev->ib_uc_qpns_bitmap);
2379
2380         offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2381                                          dev->steer_qpn_count,
2382                                          get_count_order(count));
2383         if (offset < 0)
2384                 return offset;
2385
2386         *qpn = dev->steer_qpn_base + offset;
2387         return 0;
2388 }
2389
2390 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2391 {
2392         if (!qpn ||
2393             dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2394                 return;
2395
2396         BUG_ON(qpn < dev->steer_qpn_base);
2397
2398         bitmap_release_region(dev->ib_uc_qpns_bitmap,
2399                               qpn - dev->steer_qpn_base,
2400                               get_count_order(count));
2401 }
2402
2403 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2404                          int is_attach)
2405 {
2406         int err;
2407         size_t flow_size;
2408         struct ib_flow_attr *flow = NULL;
2409         struct ib_flow_spec_ib *ib_spec;
2410
2411         if (is_attach) {
2412                 flow_size = sizeof(struct ib_flow_attr) +
2413                             sizeof(struct ib_flow_spec_ib);
2414                 flow = kzalloc(flow_size, GFP_KERNEL);
2415                 if (!flow)
2416                         return -ENOMEM;
2417                 flow->port = mqp->port;
2418                 flow->num_of_specs = 1;
2419                 flow->size = flow_size;
2420                 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2421                 ib_spec->type = IB_FLOW_SPEC_IB;
2422                 ib_spec->size = sizeof(struct ib_flow_spec_ib);
2423                 /* Add an empty rule for IB L2 */
2424                 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2425
2426                 err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
2427                                             IB_FLOW_DOMAIN_NIC,
2428                                             MLX4_FS_REGULAR,
2429                                             &mqp->reg_id);
2430         } else {
2431                 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2432         }
2433         kfree(flow);
2434         return err;
2435 }
2436
2437 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2438 {
2439         struct mlx4_ib_dev *ibdev = ibdev_ptr;
2440         int p;
2441
2442         ibdev->ib_active = false;
2443         flush_workqueue(wq);
2444
2445         mlx4_ib_close_sriov(ibdev);
2446         mlx4_ib_mad_cleanup(ibdev);
2447         ib_unregister_device(&ibdev->ib_dev);
2448         if (ibdev->iboe.nb.notifier_call) {
2449                 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2450                         pr_warn("failure unregistering notifier\n");
2451                 ibdev->iboe.nb.notifier_call = NULL;
2452         }
2453
2454         if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2455                 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2456                                       ibdev->steer_qpn_count);
2457                 kfree(ibdev->ib_uc_qpns_bitmap);
2458         }
2459
2460         if (ibdev->iboe.nb_inet.notifier_call) {
2461                 if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
2462                         pr_warn("failure unregistering notifier\n");
2463                 ibdev->iboe.nb_inet.notifier_call = NULL;
2464         }
2465 #if IS_ENABLED(CONFIG_IPV6)
2466         if (ibdev->iboe.nb_inet6.notifier_call) {
2467                 if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
2468                         pr_warn("failure unregistering notifier\n");
2469                 ibdev->iboe.nb_inet6.notifier_call = NULL;
2470         }
2471 #endif
2472
2473         iounmap(ibdev->uar_map);
2474         for (p = 0; p < ibdev->num_ports; ++p)
2475                 if (ibdev->counters[p] != -1)
2476                         mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
2477         mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2478                 mlx4_CLOSE_PORT(dev, p);
2479
2480         mlx4_ib_free_eqs(dev, ibdev);
2481
2482         mlx4_uar_free(dev, &ibdev->priv_uar);
2483         mlx4_pd_free(dev, ibdev->priv_pdn);
2484         ib_dealloc_device(&ibdev->ib_dev);
2485 }
2486
2487 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2488 {
2489         struct mlx4_ib_demux_work **dm = NULL;
2490         struct mlx4_dev *dev = ibdev->dev;
2491         int i;
2492         unsigned long flags;
2493         struct mlx4_active_ports actv_ports;
2494         unsigned int ports;
2495         unsigned int first_port;
2496
2497         if (!mlx4_is_master(dev))
2498                 return;
2499
2500         actv_ports = mlx4_get_active_ports(dev, slave);
2501         ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2502         first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2503
2504         dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
2505         if (!dm) {
2506                 pr_err("failed to allocate memory for tunneling qp update\n");
2507                 goto out;
2508         }
2509
2510         for (i = 0; i < ports; i++) {
2511                 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2512                 if (!dm[i]) {
2513                         pr_err("failed to allocate memory for tunneling qp update work struct\n");
2514                         for (i = 0; i < dev->caps.num_ports; i++) {
2515                                 if (dm[i])
2516                                         kfree(dm[i]);
2517                         }
2518                         goto out;
2519                 }
2520         }
2521         /* initialize or tear down tunnel QPs for the slave */
2522         for (i = 0; i < ports; i++) {
2523                 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2524                 dm[i]->port = first_port + i + 1;
2525                 dm[i]->slave = slave;
2526                 dm[i]->do_init = do_init;
2527                 dm[i]->dev = ibdev;
2528                 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2529                 if (!ibdev->sriov.is_going_down)
2530                         queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2531                 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2532         }
2533 out:
2534         kfree(dm);
2535         return;
2536 }
2537
2538 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2539                           enum mlx4_dev_event event, unsigned long param)
2540 {
2541         struct ib_event ibev;
2542         struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
2543         struct mlx4_eqe *eqe = NULL;
2544         struct ib_event_work *ew;
2545         int p = 0;
2546
2547         if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2548                 eqe = (struct mlx4_eqe *)param;
2549         else
2550                 p = (int) param;
2551
2552         switch (event) {
2553         case MLX4_DEV_EVENT_PORT_UP:
2554                 if (p > ibdev->num_ports)
2555                         return;
2556                 if (mlx4_is_master(dev) &&
2557                     rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
2558                         IB_LINK_LAYER_INFINIBAND) {
2559                         mlx4_ib_invalidate_all_guid_record(ibdev, p);
2560                 }
2561                 ibev.event = IB_EVENT_PORT_ACTIVE;
2562                 break;
2563
2564         case MLX4_DEV_EVENT_PORT_DOWN:
2565                 if (p > ibdev->num_ports)
2566                         return;
2567                 ibev.event = IB_EVENT_PORT_ERR;
2568                 break;
2569
2570         case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
2571                 ibdev->ib_active = false;
2572                 ibev.event = IB_EVENT_DEVICE_FATAL;
2573                 break;
2574
2575         case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
2576                 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
2577                 if (!ew) {
2578                         pr_err("failed to allocate memory for events work\n");
2579                         break;
2580                 }
2581
2582                 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
2583                 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
2584                 ew->ib_dev = ibdev;
2585                 /* need to queue only for port owner, which uses GEN_EQE */
2586                 if (mlx4_is_master(dev))
2587                         queue_work(wq, &ew->work);
2588                 else
2589                         handle_port_mgmt_change_event(&ew->work);
2590                 return;
2591
2592         case MLX4_DEV_EVENT_SLAVE_INIT:
2593                 /* here, p is the slave id */
2594                 do_slave_init(ibdev, p, 1);
2595                 return;
2596
2597         case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
2598                 /* here, p is the slave id */
2599                 do_slave_init(ibdev, p, 0);
2600                 return;
2601
2602         default:
2603                 return;
2604         }
2605
2606         ibev.device           = ibdev_ptr;
2607         ibev.element.port_num = (u8) p;
2608
2609         ib_dispatch_event(&ibev);
2610 }
2611
2612 static struct mlx4_interface mlx4_ib_interface = {
2613         .add            = mlx4_ib_add,
2614         .remove         = mlx4_ib_remove,
2615         .event          = mlx4_ib_event,
2616         .protocol       = MLX4_PROT_IB_IPV6
2617 };
2618
2619 static int __init mlx4_ib_init(void)
2620 {
2621         int err;
2622
2623         wq = create_singlethread_workqueue("mlx4_ib");
2624         if (!wq)
2625                 return -ENOMEM;
2626
2627         err = mlx4_ib_mcg_init();
2628         if (err)
2629                 goto clean_wq;
2630
2631         err = mlx4_register_interface(&mlx4_ib_interface);
2632         if (err)
2633                 goto clean_mcg;
2634
2635         return 0;
2636
2637 clean_mcg:
2638         mlx4_ib_mcg_destroy();
2639
2640 clean_wq:
2641         destroy_workqueue(wq);
2642         return err;
2643 }
2644
2645 static void __exit mlx4_ib_cleanup(void)
2646 {
2647         mlx4_unregister_interface(&mlx4_ib_interface);
2648         mlx4_ib_mcg_destroy();
2649         destroy_workqueue(wq);
2650 }
2651
2652 module_init(mlx4_ib_init);
2653 module_exit(mlx4_ib_cleanup);