Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / net / ethernet / mellanox / mlx4 / eq.c
1 /*
2  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/init.h>
35 #include <linux/interrupt.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/mm.h>
39 #include <linux/dma-mapping.h>
40
41 #include <linux/mlx4/cmd.h>
42 #include <linux/cpu_rmap.h>
43
44 #include "mlx4.h"
45 #include "fw.h"
46
47 enum {
48         MLX4_IRQNAME_SIZE       = 32
49 };
50
51 enum {
52         MLX4_NUM_ASYNC_EQE      = 0x100,
53         MLX4_NUM_SPARE_EQE      = 0x80,
54         MLX4_EQ_ENTRY_SIZE      = 0x20
55 };
56
57 #define MLX4_EQ_STATUS_OK          ( 0 << 28)
58 #define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
59 #define MLX4_EQ_OWNER_SW           ( 0 << 24)
60 #define MLX4_EQ_OWNER_HW           ( 1 << 24)
61 #define MLX4_EQ_FLAG_EC            ( 1 << 18)
62 #define MLX4_EQ_FLAG_OI            ( 1 << 17)
63 #define MLX4_EQ_STATE_ARMED        ( 9 <<  8)
64 #define MLX4_EQ_STATE_FIRED        (10 <<  8)
65 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 <<  8)
66
67 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG)           | \
68                                (1ull << MLX4_EVENT_TYPE_COMM_EST)           | \
69                                (1ull << MLX4_EVENT_TYPE_SQ_DRAINED)         | \
70                                (1ull << MLX4_EVENT_TYPE_CQ_ERROR)           | \
71                                (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR)     | \
72                                (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR)    | \
73                                (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED)    | \
74                                (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
75                                (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
76                                (1ull << MLX4_EVENT_TYPE_PORT_CHANGE)        | \
77                                (1ull << MLX4_EVENT_TYPE_ECC_DETECT)         | \
78                                (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
79                                (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
80                                (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT)          | \
81                                (1ull << MLX4_EVENT_TYPE_CMD)                | \
82                                (1ull << MLX4_EVENT_TYPE_OP_REQUIRED)        | \
83                                (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL)       | \
84                                (1ull << MLX4_EVENT_TYPE_FLR_EVENT)          | \
85                                (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
86
87 static u64 get_async_ev_mask(struct mlx4_dev *dev)
88 {
89         u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
90         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
91                 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
92
93         return async_ev_mask;
94 }
95
96 static void eq_set_ci(struct mlx4_eq *eq, int req_not)
97 {
98         __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
99                                                req_not << 31),
100                      eq->doorbell);
101         /* We still want ordering, just not swabbing, so add a barrier */
102         mb();
103 }
104
105 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor)
106 {
107         /* (entry & (eq->nent - 1)) gives us a cyclic array */
108         unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor);
109         /* CX3 is capable of extending the EQE from 32 to 64 bytes.
110          * When this feature is enabled, the first (in the lower addresses)
111          * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
112          * contain the legacy EQE information.
113          */
114         return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
115 }
116
117 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor)
118 {
119         struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor);
120         return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
121 }
122
123 static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
124 {
125         struct mlx4_eqe *eqe =
126                 &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
127         return (!!(eqe->owner & 0x80) ^
128                 !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
129                 eqe : NULL;
130 }
131
132 void mlx4_gen_slave_eqe(struct work_struct *work)
133 {
134         struct mlx4_mfunc_master_ctx *master =
135                 container_of(work, struct mlx4_mfunc_master_ctx,
136                              slave_event_work);
137         struct mlx4_mfunc *mfunc =
138                 container_of(master, struct mlx4_mfunc, master);
139         struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
140         struct mlx4_dev *dev = &priv->dev;
141         struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
142         struct mlx4_eqe *eqe;
143         u8 slave;
144         int i;
145
146         for (eqe = next_slave_event_eqe(slave_eq); eqe;
147               eqe = next_slave_event_eqe(slave_eq)) {
148                 slave = eqe->slave_id;
149
150                 /* All active slaves need to receive the event */
151                 if (slave == ALL_SLAVES) {
152                         for (i = 0; i < dev->num_slaves; i++) {
153                                 if (i != dev->caps.function &&
154                                     master->slave_state[i].active)
155                                         if (mlx4_GEN_EQE(dev, i, eqe))
156                                                 mlx4_warn(dev, "Failed to "
157                                                           " generate event "
158                                                           "for slave %d\n", i);
159                         }
160                 } else {
161                         if (mlx4_GEN_EQE(dev, slave, eqe))
162                                 mlx4_warn(dev, "Failed to generate event "
163                                                "for slave %d\n", slave);
164                 }
165                 ++slave_eq->cons;
166         }
167 }
168
169
170 static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
171 {
172         struct mlx4_priv *priv = mlx4_priv(dev);
173         struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
174         struct mlx4_eqe *s_eqe;
175         unsigned long flags;
176
177         spin_lock_irqsave(&slave_eq->event_lock, flags);
178         s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
179         if ((!!(s_eqe->owner & 0x80)) ^
180             (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
181                 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
182                           "No free EQE on slave events queue\n", slave);
183                 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
184                 return;
185         }
186
187         memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
188         s_eqe->slave_id = slave;
189         /* ensure all information is written before setting the ownersip bit */
190         wmb();
191         s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
192         ++slave_eq->prod;
193
194         queue_work(priv->mfunc.master.comm_wq,
195                    &priv->mfunc.master.slave_event_work);
196         spin_unlock_irqrestore(&slave_eq->event_lock, flags);
197 }
198
199 static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
200                              struct mlx4_eqe *eqe)
201 {
202         struct mlx4_priv *priv = mlx4_priv(dev);
203         struct mlx4_slave_state *s_slave =
204                 &priv->mfunc.master.slave_state[slave];
205
206         if (!s_slave->active) {
207                 /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
208                 return;
209         }
210
211         slave_event(dev, slave, eqe);
212 }
213
214 int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
215 {
216         struct mlx4_eqe eqe;
217
218         struct mlx4_priv *priv = mlx4_priv(dev);
219         struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave];
220
221         if (!s_slave->active)
222                 return 0;
223
224         memset(&eqe, 0, sizeof eqe);
225
226         eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
227         eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
228         eqe.event.port_mgmt_change.port = port;
229
230         return mlx4_GEN_EQE(dev, slave, &eqe);
231 }
232 EXPORT_SYMBOL(mlx4_gen_pkey_eqe);
233
234 int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
235 {
236         struct mlx4_eqe eqe;
237
238         /*don't send if we don't have the that slave */
239         if (dev->num_vfs < slave)
240                 return 0;
241         memset(&eqe, 0, sizeof eqe);
242
243         eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
244         eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
245         eqe.event.port_mgmt_change.port = port;
246
247         return mlx4_GEN_EQE(dev, slave, &eqe);
248 }
249 EXPORT_SYMBOL(mlx4_gen_guid_change_eqe);
250
251 int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
252                                    u8 port_subtype_change)
253 {
254         struct mlx4_eqe eqe;
255
256         /*don't send if we don't have the that slave */
257         if (dev->num_vfs < slave)
258                 return 0;
259         memset(&eqe, 0, sizeof eqe);
260
261         eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
262         eqe.subtype = port_subtype_change;
263         eqe.event.port_change.port = cpu_to_be32(port << 28);
264
265         mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
266                  port_subtype_change, slave, port);
267         return mlx4_GEN_EQE(dev, slave, &eqe);
268 }
269 EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe);
270
271 enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port)
272 {
273         struct mlx4_priv *priv = mlx4_priv(dev);
274         struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
275         if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) {
276                 pr_err("%s: Error: asking for slave:%d, port:%d\n",
277                        __func__, slave, port);
278                 return SLAVE_PORT_DOWN;
279         }
280         return s_state[slave].port_state[port];
281 }
282 EXPORT_SYMBOL(mlx4_get_slave_port_state);
283
284 static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
285                                      enum slave_port_state state)
286 {
287         struct mlx4_priv *priv = mlx4_priv(dev);
288         struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
289
290         if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
291                 pr_err("%s: Error: asking for slave:%d, port:%d\n",
292                        __func__, slave, port);
293                 return -1;
294         }
295         s_state[slave].port_state[port] = state;
296
297         return 0;
298 }
299
300 static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
301 {
302         int i;
303         enum slave_port_gen_event gen_event;
304
305         for (i = 0; i < dev->num_slaves; i++)
306                 set_and_calc_slave_port_state(dev, i, port, event, &gen_event);
307 }
308 /**************************************************************************
309         The function get as input the new event to that port,
310         and according to the prev state change the slave's port state.
311         The events are:
312                 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
313                 MLX4_PORT_STATE_DEV_EVENT_PORT_UP
314                 MLX4_PORT_STATE_IB_EVENT_GID_VALID
315                 MLX4_PORT_STATE_IB_EVENT_GID_INVALID
316 ***************************************************************************/
317 int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
318                                   u8 port, int event,
319                                   enum slave_port_gen_event *gen_event)
320 {
321         struct mlx4_priv *priv = mlx4_priv(dev);
322         struct mlx4_slave_state *ctx = NULL;
323         unsigned long flags;
324         int ret = -1;
325         enum slave_port_state cur_state =
326                 mlx4_get_slave_port_state(dev, slave, port);
327
328         *gen_event = SLAVE_PORT_GEN_EVENT_NONE;
329
330         if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
331                 pr_err("%s: Error: asking for slave:%d, port:%d\n",
332                        __func__, slave, port);
333                 return ret;
334         }
335
336         ctx = &priv->mfunc.master.slave_state[slave];
337         spin_lock_irqsave(&ctx->lock, flags);
338
339         switch (cur_state) {
340         case SLAVE_PORT_DOWN:
341                 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
342                         mlx4_set_slave_port_state(dev, slave, port,
343                                                   SLAVE_PENDING_UP);
344                 break;
345         case SLAVE_PENDING_UP:
346                 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event)
347                         mlx4_set_slave_port_state(dev, slave, port,
348                                                   SLAVE_PORT_DOWN);
349                 else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) {
350                         mlx4_set_slave_port_state(dev, slave, port,
351                                                   SLAVE_PORT_UP);
352                         *gen_event = SLAVE_PORT_GEN_EVENT_UP;
353                 }
354                 break;
355         case SLAVE_PORT_UP:
356                 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) {
357                         mlx4_set_slave_port_state(dev, slave, port,
358                                                   SLAVE_PORT_DOWN);
359                         *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
360                 } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID ==
361                                 event) {
362                         mlx4_set_slave_port_state(dev, slave, port,
363                                                   SLAVE_PENDING_UP);
364                         *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
365                 }
366                 break;
367         default:
368                 pr_err("%s: BUG!!! UNKNOWN state: "
369                        "slave:%d, port:%d\n", __func__, slave, port);
370                         goto out;
371         }
372         ret = mlx4_get_slave_port_state(dev, slave, port);
373
374 out:
375         spin_unlock_irqrestore(&ctx->lock, flags);
376         return ret;
377 }
378
379 EXPORT_SYMBOL(set_and_calc_slave_port_state);
380
381 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr)
382 {
383         struct mlx4_eqe eqe;
384
385         memset(&eqe, 0, sizeof eqe);
386
387         eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
388         eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO;
389         eqe.event.port_mgmt_change.port = port;
390         eqe.event.port_mgmt_change.params.port_info.changed_attr =
391                 cpu_to_be32((u32) attr);
392
393         slave_event(dev, ALL_SLAVES, &eqe);
394         return 0;
395 }
396 EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev);
397
398 void mlx4_master_handle_slave_flr(struct work_struct *work)
399 {
400         struct mlx4_mfunc_master_ctx *master =
401                 container_of(work, struct mlx4_mfunc_master_ctx,
402                              slave_flr_event_work);
403         struct mlx4_mfunc *mfunc =
404                 container_of(master, struct mlx4_mfunc, master);
405         struct mlx4_priv *priv =
406                 container_of(mfunc, struct mlx4_priv, mfunc);
407         struct mlx4_dev *dev = &priv->dev;
408         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
409         int i;
410         int err;
411         unsigned long flags;
412
413         mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
414
415         for (i = 0 ; i < dev->num_slaves; i++) {
416
417                 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
418                         mlx4_dbg(dev, "mlx4_handle_slave_flr: "
419                                  "clean slave: %d\n", i);
420
421                         mlx4_delete_all_resources_for_slave(dev, i);
422                         /*return the slave to running mode*/
423                         spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
424                         slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
425                         slave_state[i].is_slave_going_down = 0;
426                         spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
427                         /*notify the FW:*/
428                         err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
429                                        MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
430                         if (err)
431                                 mlx4_warn(dev, "Failed to notify FW on "
432                                           "FLR done (slave:%d)\n", i);
433                 }
434         }
435 }
436
437 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
438 {
439         struct mlx4_priv *priv = mlx4_priv(dev);
440         struct mlx4_eqe *eqe;
441         int cqn;
442         int eqes_found = 0;
443         int set_ci = 0;
444         int port;
445         int slave = 0;
446         int ret;
447         u32 flr_slave;
448         u8 update_slave_state;
449         int i;
450         enum slave_port_gen_event gen_event;
451         unsigned long flags;
452         struct mlx4_vport_state *s_info;
453
454         while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) {
455                 /*
456                  * Make sure we read EQ entry contents after we've
457                  * checked the ownership bit.
458                  */
459                 rmb();
460
461                 switch (eqe->type) {
462                 case MLX4_EVENT_TYPE_COMP:
463                         cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
464                         mlx4_cq_completion(dev, cqn);
465                         break;
466
467                 case MLX4_EVENT_TYPE_PATH_MIG:
468                 case MLX4_EVENT_TYPE_COMM_EST:
469                 case MLX4_EVENT_TYPE_SQ_DRAINED:
470                 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
471                 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
472                 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
473                 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
474                 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
475                         mlx4_dbg(dev, "event %d arrived\n", eqe->type);
476                         if (mlx4_is_master(dev)) {
477                                 /* forward only to slave owning the QP */
478                                 ret = mlx4_get_slave_from_resource_id(dev,
479                                                 RES_QP,
480                                                 be32_to_cpu(eqe->event.qp.qpn)
481                                                 & 0xffffff, &slave);
482                                 if (ret && ret != -ENOENT) {
483                                         mlx4_dbg(dev, "QP event %02x(%02x) on "
484                                                  "EQ %d at index %u: could "
485                                                  "not get slave id (%d)\n",
486                                                  eqe->type, eqe->subtype,
487                                                  eq->eqn, eq->cons_index, ret);
488                                         break;
489                                 }
490
491                                 if (!ret && slave != dev->caps.function) {
492                                         mlx4_slave_event(dev, slave, eqe);
493                                         break;
494                                 }
495
496                         }
497                         mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
498                                       0xffffff, eqe->type);
499                         break;
500
501                 case MLX4_EVENT_TYPE_SRQ_LIMIT:
502                         mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
503                                  __func__);
504                 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
505                         if (mlx4_is_master(dev)) {
506                                 /* forward only to slave owning the SRQ */
507                                 ret = mlx4_get_slave_from_resource_id(dev,
508                                                 RES_SRQ,
509                                                 be32_to_cpu(eqe->event.srq.srqn)
510                                                 & 0xffffff,
511                                                 &slave);
512                                 if (ret && ret != -ENOENT) {
513                                         mlx4_warn(dev, "SRQ event %02x(%02x) "
514                                                   "on EQ %d at index %u: could"
515                                                   " not get slave id (%d)\n",
516                                                   eqe->type, eqe->subtype,
517                                                   eq->eqn, eq->cons_index, ret);
518                                         break;
519                                 }
520                                 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
521                                           " event: %02x(%02x)\n", __func__,
522                                           slave,
523                                           be32_to_cpu(eqe->event.srq.srqn),
524                                           eqe->type, eqe->subtype);
525
526                                 if (!ret && slave != dev->caps.function) {
527                                         mlx4_warn(dev, "%s: sending event "
528                                                   "%02x(%02x) to slave:%d\n",
529                                                    __func__, eqe->type,
530                                                   eqe->subtype, slave);
531                                         mlx4_slave_event(dev, slave, eqe);
532                                         break;
533                                 }
534                         }
535                         mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
536                                        0xffffff, eqe->type);
537                         break;
538
539                 case MLX4_EVENT_TYPE_CMD:
540                         mlx4_cmd_event(dev,
541                                        be16_to_cpu(eqe->event.cmd.token),
542                                        eqe->event.cmd.status,
543                                        be64_to_cpu(eqe->event.cmd.out_param));
544                         break;
545
546                 case MLX4_EVENT_TYPE_PORT_CHANGE:
547                         port = be32_to_cpu(eqe->event.port_change.port) >> 28;
548                         if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
549                                 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
550                                                     port);
551                                 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
552                                 if (!mlx4_is_master(dev))
553                                         break;
554                                 for (i = 0; i < dev->num_slaves; i++) {
555                                         if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
556                                                 if (i == mlx4_master_func_num(dev))
557                                                         continue;
558                                                 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
559                                                          " to slave: %d, port:%d\n",
560                                                          __func__, i, port);
561                                                 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
562                                                 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
563                                                         mlx4_slave_event(dev, i, eqe);
564                                         } else {  /* IB port */
565                                                 set_and_calc_slave_port_state(dev, i, port,
566                                                                               MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
567                                                                               &gen_event);
568                                                 /*we can be in pending state, then do not send port_down event*/
569                                                 if (SLAVE_PORT_GEN_EVENT_DOWN ==  gen_event) {
570                                                         if (i == mlx4_master_func_num(dev))
571                                                                 continue;
572                                                         mlx4_slave_event(dev, i, eqe);
573                                                 }
574                                         }
575                                 }
576                         } else {
577                                 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port);
578
579                                 mlx4_priv(dev)->sense.do_sense_port[port] = 0;
580
581                                 if (!mlx4_is_master(dev))
582                                         break;
583                                 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
584                                         for (i = 0; i < dev->num_slaves; i++) {
585                                                 if (i == mlx4_master_func_num(dev))
586                                                         continue;
587                                                 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
588                                                 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
589                                                         mlx4_slave_event(dev, i, eqe);
590                                         }
591                                 else /* IB port */
592                                         /* port-up event will be sent to a slave when the
593                                          * slave's alias-guid is set. This is done in alias_GUID.c
594                                          */
595                                         set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
596                         }
597                         break;
598
599                 case MLX4_EVENT_TYPE_CQ_ERROR:
600                         mlx4_warn(dev, "CQ %s on CQN %06x\n",
601                                   eqe->event.cq_err.syndrome == 1 ?
602                                   "overrun" : "access violation",
603                                   be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
604                         if (mlx4_is_master(dev)) {
605                                 ret = mlx4_get_slave_from_resource_id(dev,
606                                         RES_CQ,
607                                         be32_to_cpu(eqe->event.cq_err.cqn)
608                                         & 0xffffff, &slave);
609                                 if (ret && ret != -ENOENT) {
610                                         mlx4_dbg(dev, "CQ event %02x(%02x) on "
611                                                  "EQ %d at index %u: could "
612                                                   "not get slave id (%d)\n",
613                                                   eqe->type, eqe->subtype,
614                                                   eq->eqn, eq->cons_index, ret);
615                                         break;
616                                 }
617
618                                 if (!ret && slave != dev->caps.function) {
619                                         mlx4_slave_event(dev, slave, eqe);
620                                         break;
621                                 }
622                         }
623                         mlx4_cq_event(dev,
624                                       be32_to_cpu(eqe->event.cq_err.cqn)
625                                       & 0xffffff,
626                                       eqe->type);
627                         break;
628
629                 case MLX4_EVENT_TYPE_EQ_OVERFLOW:
630                         mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
631                         break;
632
633                 case MLX4_EVENT_TYPE_OP_REQUIRED:
634                         atomic_inc(&priv->opreq_count);
635                         /* FW commands can't be executed from interrupt context
636                          * working in deferred task
637                          */
638                         queue_work(mlx4_wq, &priv->opreq_task);
639                         break;
640
641                 case MLX4_EVENT_TYPE_COMM_CHANNEL:
642                         if (!mlx4_is_master(dev)) {
643                                 mlx4_warn(dev, "Received comm channel event "
644                                                "for non master device\n");
645                                 break;
646                         }
647                         memcpy(&priv->mfunc.master.comm_arm_bit_vector,
648                                eqe->event.comm_channel_arm.bit_vec,
649                                sizeof eqe->event.comm_channel_arm.bit_vec);
650                         queue_work(priv->mfunc.master.comm_wq,
651                                    &priv->mfunc.master.comm_work);
652                         break;
653
654                 case MLX4_EVENT_TYPE_FLR_EVENT:
655                         flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
656                         if (!mlx4_is_master(dev)) {
657                                 mlx4_warn(dev, "Non-master function received"
658                                                "FLR event\n");
659                                 break;
660                         }
661
662                         mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
663
664                         if (flr_slave >= dev->num_slaves) {
665                                 mlx4_warn(dev,
666                                           "Got FLR for unknown function: %d\n",
667                                           flr_slave);
668                                 update_slave_state = 0;
669                         } else
670                                 update_slave_state = 1;
671
672                         spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
673                         if (update_slave_state) {
674                                 priv->mfunc.master.slave_state[flr_slave].active = false;
675                                 priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
676                                 priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
677                         }
678                         spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
679                         queue_work(priv->mfunc.master.comm_wq,
680                                    &priv->mfunc.master.slave_flr_event_work);
681                         break;
682
683                 case MLX4_EVENT_TYPE_FATAL_WARNING:
684                         if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
685                                 if (mlx4_is_master(dev))
686                                         for (i = 0; i < dev->num_slaves; i++) {
687                                                 mlx4_dbg(dev, "%s: Sending "
688                                                         "MLX4_FATAL_WARNING_SUBTYPE_WARMING"
689                                                         " to slave: %d\n", __func__, i);
690                                                 if (i == dev->caps.function)
691                                                         continue;
692                                                 mlx4_slave_event(dev, i, eqe);
693                                         }
694                                 mlx4_err(dev, "Temperature Threshold was reached! "
695                                         "Threshold: %d celsius degrees; "
696                                         "Current Temperature: %d\n",
697                                         be16_to_cpu(eqe->event.warming.warning_threshold),
698                                         be16_to_cpu(eqe->event.warming.current_temperature));
699                         } else
700                                 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), "
701                                           "subtype %02x on EQ %d at index %u. owner=%x, "
702                                           "nent=0x%x, slave=%x, ownership=%s\n",
703                                           eqe->type, eqe->subtype, eq->eqn,
704                                           eq->cons_index, eqe->owner, eq->nent,
705                                           eqe->slave_id,
706                                           !!(eqe->owner & 0x80) ^
707                                           !!(eq->cons_index & eq->nent) ? "HW" : "SW");
708
709                         break;
710
711                 case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT:
712                         mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
713                                             (unsigned long) eqe);
714                         break;
715
716                 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
717                 case MLX4_EVENT_TYPE_ECC_DETECT:
718                 default:
719                         mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
720                                   "index %u. owner=%x, nent=0x%x, slave=%x, "
721                                   "ownership=%s\n",
722                                   eqe->type, eqe->subtype, eq->eqn,
723                                   eq->cons_index, eqe->owner, eq->nent,
724                                   eqe->slave_id,
725                                   !!(eqe->owner & 0x80) ^
726                                   !!(eq->cons_index & eq->nent) ? "HW" : "SW");
727                         break;
728                 };
729
730                 ++eq->cons_index;
731                 eqes_found = 1;
732                 ++set_ci;
733
734                 /*
735                  * The HCA will think the queue has overflowed if we
736                  * don't tell it we've been processing events.  We
737                  * create our EQs with MLX4_NUM_SPARE_EQE extra
738                  * entries, so we must update our consumer index at
739                  * least that often.
740                  */
741                 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
742                         eq_set_ci(eq, 0);
743                         set_ci = 0;
744                 }
745         }
746
747         eq_set_ci(eq, 1);
748
749         return eqes_found;
750 }
751
752 static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
753 {
754         struct mlx4_dev *dev = dev_ptr;
755         struct mlx4_priv *priv = mlx4_priv(dev);
756         int work = 0;
757         int i;
758
759         writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
760
761         for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
762                 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
763
764         return IRQ_RETVAL(work);
765 }
766
767 static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
768 {
769         struct mlx4_eq  *eq  = eq_ptr;
770         struct mlx4_dev *dev = eq->dev;
771
772         mlx4_eq_int(dev, eq);
773
774         /* MSI-X vectors always belong to us */
775         return IRQ_HANDLED;
776 }
777
778 int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
779                         struct mlx4_vhcr *vhcr,
780                         struct mlx4_cmd_mailbox *inbox,
781                         struct mlx4_cmd_mailbox *outbox,
782                         struct mlx4_cmd_info *cmd)
783 {
784         struct mlx4_priv *priv = mlx4_priv(dev);
785         struct mlx4_slave_event_eq_info *event_eq =
786                 priv->mfunc.master.slave_state[slave].event_eq;
787         u32 in_modifier = vhcr->in_modifier;
788         u32 eqn = in_modifier & 0x3FF;
789         u64 in_param =  vhcr->in_param;
790         int err = 0;
791         int i;
792
793         if (slave == dev->caps.function)
794                 err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
795                                0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
796                                MLX4_CMD_NATIVE);
797         if (!err)
798                 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
799                         if (in_param & (1LL << i))
800                                 event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
801
802         return err;
803 }
804
805 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
806                         int eq_num)
807 {
808         return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
809                         0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
810                         MLX4_CMD_WRAPPED);
811 }
812
813 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
814                          int eq_num)
815 {
816         return mlx4_cmd(dev, mailbox->dma, eq_num, 0,
817                         MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
818                         MLX4_CMD_WRAPPED);
819 }
820
821 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
822                          int eq_num)
823 {
824         return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num,
825                             0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
826                             MLX4_CMD_WRAPPED);
827 }
828
829 static int mlx4_num_eq_uar(struct mlx4_dev *dev)
830 {
831         /*
832          * Each UAR holds 4 EQ doorbells.  To figure out how many UARs
833          * we need to map, take the difference of highest index and
834          * the lowest index we'll use and add 1.
835          */
836         return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
837                  dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
838 }
839
840 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
841 {
842         struct mlx4_priv *priv = mlx4_priv(dev);
843         int index;
844
845         index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
846
847         if (!priv->eq_table.uar_map[index]) {
848                 priv->eq_table.uar_map[index] =
849                         ioremap(pci_resource_start(dev->pdev, 2) +
850                                 ((eq->eqn / 4) << PAGE_SHIFT),
851                                 PAGE_SIZE);
852                 if (!priv->eq_table.uar_map[index]) {
853                         mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
854                                  eq->eqn);
855                         return NULL;
856                 }
857         }
858
859         return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
860 }
861
862 static void mlx4_unmap_uar(struct mlx4_dev *dev)
863 {
864         struct mlx4_priv *priv = mlx4_priv(dev);
865         int i;
866
867         for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
868                 if (priv->eq_table.uar_map[i]) {
869                         iounmap(priv->eq_table.uar_map[i]);
870                         priv->eq_table.uar_map[i] = NULL;
871                 }
872 }
873
874 static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
875                           u8 intr, struct mlx4_eq *eq)
876 {
877         struct mlx4_priv *priv = mlx4_priv(dev);
878         struct mlx4_cmd_mailbox *mailbox;
879         struct mlx4_eq_context *eq_context;
880         int npages;
881         u64 *dma_list = NULL;
882         dma_addr_t t;
883         u64 mtt_addr;
884         int err = -ENOMEM;
885         int i;
886
887         eq->dev   = dev;
888         eq->nent  = roundup_pow_of_two(max(nent, 2));
889         /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
890         npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE;
891
892         eq->page_list = kmalloc(npages * sizeof *eq->page_list,
893                                 GFP_KERNEL);
894         if (!eq->page_list)
895                 goto err_out;
896
897         for (i = 0; i < npages; ++i)
898                 eq->page_list[i].buf = NULL;
899
900         dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
901         if (!dma_list)
902                 goto err_out_free;
903
904         mailbox = mlx4_alloc_cmd_mailbox(dev);
905         if (IS_ERR(mailbox))
906                 goto err_out_free;
907         eq_context = mailbox->buf;
908
909         for (i = 0; i < npages; ++i) {
910                 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
911                                                           PAGE_SIZE, &t, GFP_KERNEL);
912                 if (!eq->page_list[i].buf)
913                         goto err_out_free_pages;
914
915                 dma_list[i] = t;
916                 eq->page_list[i].map = t;
917
918                 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
919         }
920
921         eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
922         if (eq->eqn == -1)
923                 goto err_out_free_pages;
924
925         eq->doorbell = mlx4_get_eq_uar(dev, eq);
926         if (!eq->doorbell) {
927                 err = -ENOMEM;
928                 goto err_out_free_eq;
929         }
930
931         err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
932         if (err)
933                 goto err_out_free_eq;
934
935         err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
936         if (err)
937                 goto err_out_free_mtt;
938
939         eq_context->flags         = cpu_to_be32(MLX4_EQ_STATUS_OK   |
940                                                 MLX4_EQ_STATE_ARMED);
941         eq_context->log_eq_size   = ilog2(eq->nent);
942         eq_context->intr          = intr;
943         eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
944
945         mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
946         eq_context->mtt_base_addr_h = mtt_addr >> 32;
947         eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
948
949         err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
950         if (err) {
951                 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
952                 goto err_out_free_mtt;
953         }
954
955         kfree(dma_list);
956         mlx4_free_cmd_mailbox(dev, mailbox);
957
958         eq->cons_index = 0;
959
960         return err;
961
962 err_out_free_mtt:
963         mlx4_mtt_cleanup(dev, &eq->mtt);
964
965 err_out_free_eq:
966         mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
967
968 err_out_free_pages:
969         for (i = 0; i < npages; ++i)
970                 if (eq->page_list[i].buf)
971                         dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
972                                           eq->page_list[i].buf,
973                                           eq->page_list[i].map);
974
975         mlx4_free_cmd_mailbox(dev, mailbox);
976
977 err_out_free:
978         kfree(eq->page_list);
979         kfree(dma_list);
980
981 err_out:
982         return err;
983 }
984
985 static void mlx4_free_eq(struct mlx4_dev *dev,
986                          struct mlx4_eq *eq)
987 {
988         struct mlx4_priv *priv = mlx4_priv(dev);
989         struct mlx4_cmd_mailbox *mailbox;
990         int err;
991         int i;
992         /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
993         int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE;
994
995         mailbox = mlx4_alloc_cmd_mailbox(dev);
996         if (IS_ERR(mailbox))
997                 return;
998
999         err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
1000         if (err)
1001                 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
1002
1003         if (0) {
1004                 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
1005                 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
1006                         if (i % 4 == 0)
1007                                 pr_cont("[%02x] ", i * 4);
1008                         pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
1009                         if ((i + 1) % 4 == 0)
1010                                 pr_cont("\n");
1011                 }
1012         }
1013
1014         mlx4_mtt_cleanup(dev, &eq->mtt);
1015         for (i = 0; i < npages; ++i)
1016                 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
1017                                     eq->page_list[i].buf,
1018                                     eq->page_list[i].map);
1019
1020         kfree(eq->page_list);
1021         mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
1022         mlx4_free_cmd_mailbox(dev, mailbox);
1023 }
1024
1025 static void mlx4_free_irqs(struct mlx4_dev *dev)
1026 {
1027         struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
1028         struct mlx4_priv *priv = mlx4_priv(dev);
1029         int     i, vec;
1030
1031         if (eq_table->have_irq)
1032                 free_irq(dev->pdev->irq, dev);
1033
1034         for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
1035                 if (eq_table->eq[i].have_irq) {
1036                         free_irq(eq_table->eq[i].irq, eq_table->eq + i);
1037                         eq_table->eq[i].have_irq = 0;
1038                 }
1039
1040         for (i = 0; i < dev->caps.comp_pool; i++) {
1041                 /*
1042                  * Freeing the assigned irq's
1043                  * all bits should be 0, but we need to validate
1044                  */
1045                 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1046                         /* NO need protecting*/
1047                         vec = dev->caps.num_comp_vectors + 1 + i;
1048                         free_irq(priv->eq_table.eq[vec].irq,
1049                                  &priv->eq_table.eq[vec]);
1050                 }
1051         }
1052
1053
1054         kfree(eq_table->irq_names);
1055 }
1056
1057 static int mlx4_map_clr_int(struct mlx4_dev *dev)
1058 {
1059         struct mlx4_priv *priv = mlx4_priv(dev);
1060
1061         priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
1062                                  priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
1063         if (!priv->clr_base) {
1064                 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
1065                 return -ENOMEM;
1066         }
1067
1068         return 0;
1069 }
1070
1071 static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
1072 {
1073         struct mlx4_priv *priv = mlx4_priv(dev);
1074
1075         iounmap(priv->clr_base);
1076 }
1077
1078 int mlx4_alloc_eq_table(struct mlx4_dev *dev)
1079 {
1080         struct mlx4_priv *priv = mlx4_priv(dev);
1081
1082         priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
1083                                     sizeof *priv->eq_table.eq, GFP_KERNEL);
1084         if (!priv->eq_table.eq)
1085                 return -ENOMEM;
1086
1087         return 0;
1088 }
1089
1090 void mlx4_free_eq_table(struct mlx4_dev *dev)
1091 {
1092         kfree(mlx4_priv(dev)->eq_table.eq);
1093 }
1094
1095 int mlx4_init_eq_table(struct mlx4_dev *dev)
1096 {
1097         struct mlx4_priv *priv = mlx4_priv(dev);
1098         int err;
1099         int i;
1100
1101         priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
1102                                          sizeof *priv->eq_table.uar_map,
1103                                          GFP_KERNEL);
1104         if (!priv->eq_table.uar_map) {
1105                 err = -ENOMEM;
1106                 goto err_out_free;
1107         }
1108
1109         err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
1110                                dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
1111         if (err)
1112                 goto err_out_free;
1113
1114         for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
1115                 priv->eq_table.uar_map[i] = NULL;
1116
1117         if (!mlx4_is_slave(dev)) {
1118                 err = mlx4_map_clr_int(dev);
1119                 if (err)
1120                         goto err_out_bitmap;
1121
1122                 priv->eq_table.clr_mask =
1123                         swab32(1 << (priv->eq_table.inta_pin & 31));
1124                 priv->eq_table.clr_int  = priv->clr_base +
1125                         (priv->eq_table.inta_pin < 32 ? 4 : 0);
1126         }
1127
1128         priv->eq_table.irq_names =
1129                 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
1130                                              dev->caps.comp_pool),
1131                         GFP_KERNEL);
1132         if (!priv->eq_table.irq_names) {
1133                 err = -ENOMEM;
1134                 goto err_out_bitmap;
1135         }
1136
1137         for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
1138                 err = mlx4_create_eq(dev, dev->caps.num_cqs -
1139                                           dev->caps.reserved_cqs +
1140                                           MLX4_NUM_SPARE_EQE,
1141                                      (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
1142                                      &priv->eq_table.eq[i]);
1143                 if (err) {
1144                         --i;
1145                         goto err_out_unmap;
1146                 }
1147         }
1148
1149         err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
1150                              (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
1151                              &priv->eq_table.eq[dev->caps.num_comp_vectors]);
1152         if (err)
1153                 goto err_out_comp;
1154
1155         /*if additional completion vectors poolsize is 0 this loop will not run*/
1156         for (i = dev->caps.num_comp_vectors + 1;
1157               i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
1158
1159                 err = mlx4_create_eq(dev, dev->caps.num_cqs -
1160                                           dev->caps.reserved_cqs +
1161                                           MLX4_NUM_SPARE_EQE,
1162                                      (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
1163                                      &priv->eq_table.eq[i]);
1164                 if (err) {
1165                         --i;
1166                         goto err_out_unmap;
1167                 }
1168         }
1169
1170
1171         if (dev->flags & MLX4_FLAG_MSI_X) {
1172                 const char *eq_name;
1173
1174                 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
1175                         if (i < dev->caps.num_comp_vectors) {
1176                                 snprintf(priv->eq_table.irq_names +
1177                                          i * MLX4_IRQNAME_SIZE,
1178                                          MLX4_IRQNAME_SIZE,
1179                                          "mlx4-comp-%d@pci:%s", i,
1180                                          pci_name(dev->pdev));
1181                         } else {
1182                                 snprintf(priv->eq_table.irq_names +
1183                                          i * MLX4_IRQNAME_SIZE,
1184                                          MLX4_IRQNAME_SIZE,
1185                                          "mlx4-async@pci:%s",
1186                                          pci_name(dev->pdev));
1187                         }
1188
1189                         eq_name = priv->eq_table.irq_names +
1190                                   i * MLX4_IRQNAME_SIZE;
1191                         err = request_irq(priv->eq_table.eq[i].irq,
1192                                           mlx4_msi_x_interrupt, 0, eq_name,
1193                                           priv->eq_table.eq + i);
1194                         if (err)
1195                                 goto err_out_async;
1196
1197                         priv->eq_table.eq[i].have_irq = 1;
1198                 }
1199         } else {
1200                 snprintf(priv->eq_table.irq_names,
1201                          MLX4_IRQNAME_SIZE,
1202                          DRV_NAME "@pci:%s",
1203                          pci_name(dev->pdev));
1204                 err = request_irq(dev->pdev->irq, mlx4_interrupt,
1205                                   IRQF_SHARED, priv->eq_table.irq_names, dev);
1206                 if (err)
1207                         goto err_out_async;
1208
1209                 priv->eq_table.have_irq = 1;
1210         }
1211
1212         err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1213                           priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
1214         if (err)
1215                 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
1216                            priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
1217
1218         for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
1219                 eq_set_ci(&priv->eq_table.eq[i], 1);
1220
1221         return 0;
1222
1223 err_out_async:
1224         mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
1225
1226 err_out_comp:
1227         i = dev->caps.num_comp_vectors - 1;
1228
1229 err_out_unmap:
1230         while (i >= 0) {
1231                 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
1232                 --i;
1233         }
1234         if (!mlx4_is_slave(dev))
1235                 mlx4_unmap_clr_int(dev);
1236         mlx4_free_irqs(dev);
1237
1238 err_out_bitmap:
1239         mlx4_unmap_uar(dev);
1240         mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1241
1242 err_out_free:
1243         kfree(priv->eq_table.uar_map);
1244
1245         return err;
1246 }
1247
1248 void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
1249 {
1250         struct mlx4_priv *priv = mlx4_priv(dev);
1251         int i;
1252
1253         mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
1254                     priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
1255
1256         mlx4_free_irqs(dev);
1257
1258         for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
1259                 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
1260
1261         if (!mlx4_is_slave(dev))
1262                 mlx4_unmap_clr_int(dev);
1263
1264         mlx4_unmap_uar(dev);
1265         mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1266
1267         kfree(priv->eq_table.uar_map);
1268 }
1269
1270 /* A test that verifies that we can accept interrupts on all
1271  * the irq vectors of the device.
1272  * Interrupts are checked using the NOP command.
1273  */
1274 int mlx4_test_interrupts(struct mlx4_dev *dev)
1275 {
1276         struct mlx4_priv *priv = mlx4_priv(dev);
1277         int i;
1278         int err;
1279
1280         err = mlx4_NOP(dev);
1281         /* When not in MSI_X, there is only one irq to check */
1282         if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
1283                 return err;
1284
1285         /* A loop over all completion vectors, for each vector we will check
1286          * whether it works by mapping command completions to that vector
1287          * and performing a NOP command
1288          */
1289         for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
1290                 /* Temporary use polling for command completions */
1291                 mlx4_cmd_use_polling(dev);
1292
1293                 /* Map the new eq to handle all asynchronous events */
1294                 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1295                                   priv->eq_table.eq[i].eqn);
1296                 if (err) {
1297                         mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
1298                         mlx4_cmd_use_events(dev);
1299                         break;
1300                 }
1301
1302                 /* Go back to using events */
1303                 mlx4_cmd_use_events(dev);
1304                 err = mlx4_NOP(dev);
1305         }
1306
1307         /* Return to default */
1308         mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1309                     priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
1310         return err;
1311 }
1312 EXPORT_SYMBOL(mlx4_test_interrupts);
1313
1314 int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1315                    int *vector)
1316 {
1317
1318         struct mlx4_priv *priv = mlx4_priv(dev);
1319         int vec = 0, err = 0, i;
1320
1321         mutex_lock(&priv->msix_ctl.pool_lock);
1322         for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
1323                 if (~priv->msix_ctl.pool_bm & 1ULL << i) {
1324                         priv->msix_ctl.pool_bm |= 1ULL << i;
1325                         vec = dev->caps.num_comp_vectors + 1 + i;
1326                         snprintf(priv->eq_table.irq_names +
1327                                         vec * MLX4_IRQNAME_SIZE,
1328                                         MLX4_IRQNAME_SIZE, "%s", name);
1329 #ifdef CONFIG_RFS_ACCEL
1330                         if (rmap) {
1331                                 err = irq_cpu_rmap_add(rmap,
1332                                                        priv->eq_table.eq[vec].irq);
1333                                 if (err)
1334                                         mlx4_warn(dev, "Failed adding irq rmap\n");
1335                         }
1336 #endif
1337                         err = request_irq(priv->eq_table.eq[vec].irq,
1338                                           mlx4_msi_x_interrupt, 0,
1339                                           &priv->eq_table.irq_names[vec<<5],
1340                                           priv->eq_table.eq + vec);
1341                         if (err) {
1342                                 /*zero out bit by fliping it*/
1343                                 priv->msix_ctl.pool_bm ^= 1 << i;
1344                                 vec = 0;
1345                                 continue;
1346                                 /*we dont want to break here*/
1347                         }
1348                         eq_set_ci(&priv->eq_table.eq[vec], 1);
1349                 }
1350         }
1351         mutex_unlock(&priv->msix_ctl.pool_lock);
1352
1353         if (vec) {
1354                 *vector = vec;
1355         } else {
1356                 *vector = 0;
1357                 err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
1358         }
1359         return err;
1360 }
1361 EXPORT_SYMBOL(mlx4_assign_eq);
1362
1363 void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1364 {
1365         struct mlx4_priv *priv = mlx4_priv(dev);
1366         /*bm index*/
1367         int i = vec - dev->caps.num_comp_vectors - 1;
1368
1369         if (likely(i >= 0)) {
1370                 /*sanity check , making sure were not trying to free irq's
1371                   Belonging to a legacy EQ*/
1372                 mutex_lock(&priv->msix_ctl.pool_lock);
1373                 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1374                         free_irq(priv->eq_table.eq[vec].irq,
1375                                  &priv->eq_table.eq[vec]);
1376                         priv->msix_ctl.pool_bm &= ~(1ULL << i);
1377                 }
1378                 mutex_unlock(&priv->msix_ctl.pool_lock);
1379         }
1380
1381 }
1382 EXPORT_SYMBOL(mlx4_release_eq);
1383