Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26
27 MODULE_VERSION(DRV_VER);
28 MODULE_DEVICE_TABLE(pci, be_dev_ids);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
32
33 static unsigned int num_vfs;
34 module_param(num_vfs, uint, S_IRUGO);
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static ushort rx_frag_size = 2048;
38 module_param(rx_frag_size, ushort, S_IRUGO);
39 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
41 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
42         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
44         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
49         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
50         { 0 }
51 };
52 MODULE_DEVICE_TABLE(pci, be_dev_ids);
53 /* UE Status Low CSR */
54 static const char * const ue_status_low_desc[] = {
55         "CEV",
56         "CTX",
57         "DBUF",
58         "ERX",
59         "Host",
60         "MPU",
61         "NDMA",
62         "PTC ",
63         "RDMA ",
64         "RXF ",
65         "RXIPS ",
66         "RXULP0 ",
67         "RXULP1 ",
68         "RXULP2 ",
69         "TIM ",
70         "TPOST ",
71         "TPRE ",
72         "TXIPS ",
73         "TXULP0 ",
74         "TXULP1 ",
75         "UC ",
76         "WDMA ",
77         "TXULP2 ",
78         "HOST1 ",
79         "P0_OB_LINK ",
80         "P1_OB_LINK ",
81         "HOST_GPIO ",
82         "MBOX ",
83         "AXGMAC0",
84         "AXGMAC1",
85         "JTAG",
86         "MPU_INTPEND"
87 };
88 /* UE Status High CSR */
89 static const char * const ue_status_hi_desc[] = {
90         "LPCMEMHOST",
91         "MGMT_MAC",
92         "PCS0ONLINE",
93         "MPU_IRAM",
94         "PCS1ONLINE",
95         "PCTL0",
96         "PCTL1",
97         "PMEM",
98         "RR",
99         "TXPB",
100         "RXPP",
101         "XAUI",
102         "TXP",
103         "ARM",
104         "IPC",
105         "HOST2",
106         "HOST3",
107         "HOST4",
108         "HOST5",
109         "HOST6",
110         "HOST7",
111         "HOST8",
112         "HOST9",
113         "NETC",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown",
121         "Unknown"
122 };
123
124 /* Is BE in a multi-channel mode */
125 static inline bool be_is_mc(struct be_adapter *adapter) {
126         return (adapter->function_mode & FLEX10_MODE ||
127                 adapter->function_mode & VNIC_MODE ||
128                 adapter->function_mode & UMC_ENABLED);
129 }
130
131 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
132 {
133         struct be_dma_mem *mem = &q->dma_mem;
134         if (mem->va) {
135                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
136                                   mem->dma);
137                 mem->va = NULL;
138         }
139 }
140
141 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
142                 u16 len, u16 entry_size)
143 {
144         struct be_dma_mem *mem = &q->dma_mem;
145
146         memset(q, 0, sizeof(*q));
147         q->len = len;
148         q->entry_size = entry_size;
149         mem->size = len * entry_size;
150         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
151                                       GFP_KERNEL);
152         if (!mem->va)
153                 return -ENOMEM;
154         return 0;
155 }
156
157 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
158 {
159         u32 reg, enabled;
160
161         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162                                 &reg);
163         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165         if (!enabled && enable)
166                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else if (enabled && !enable)
168                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else
170                 return;
171
172         pci_write_config_dword(adapter->pdev,
173                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175
176 static void be_intr_set(struct be_adapter *adapter, bool enable)
177 {
178         int status = 0;
179
180         /* On lancer interrupts can't be controlled via this register */
181         if (lancer_chip(adapter))
182                 return;
183
184         if (adapter->eeh_error)
185                 return;
186
187         status = be_cmd_intr_set(adapter, enable);
188         if (status)
189                 be_reg_intr_set(adapter, enable);
190 }
191
192 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
193 {
194         u32 val = 0;
195         val |= qid & DB_RQ_RING_ID_MASK;
196         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
197
198         wmb();
199         iowrite32(val, adapter->db + DB_RQ_OFFSET);
200 }
201
202 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
203                           u16 posted)
204 {
205         u32 val = 0;
206         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
207         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
208
209         wmb();
210         iowrite32(val, adapter->db + txo->db_offset);
211 }
212
213 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
214                 bool arm, bool clear_int, u16 num_popped)
215 {
216         u32 val = 0;
217         val |= qid & DB_EQ_RING_ID_MASK;
218         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
219                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
220
221         if (adapter->eeh_error)
222                 return;
223
224         if (arm)
225                 val |= 1 << DB_EQ_REARM_SHIFT;
226         if (clear_int)
227                 val |= 1 << DB_EQ_CLR_SHIFT;
228         val |= 1 << DB_EQ_EVNT_SHIFT;
229         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
230         iowrite32(val, adapter->db + DB_EQ_OFFSET);
231 }
232
233 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
234 {
235         u32 val = 0;
236         val |= qid & DB_CQ_RING_ID_MASK;
237         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
238                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
239
240         if (adapter->eeh_error)
241                 return;
242
243         if (arm)
244                 val |= 1 << DB_CQ_REARM_SHIFT;
245         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
246         iowrite32(val, adapter->db + DB_CQ_OFFSET);
247 }
248
249 static int be_mac_addr_set(struct net_device *netdev, void *p)
250 {
251         struct be_adapter *adapter = netdev_priv(netdev);
252         struct device *dev = &adapter->pdev->dev;
253         struct sockaddr *addr = p;
254         int status;
255         u8 mac[ETH_ALEN];
256         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
257
258         if (!is_valid_ether_addr(addr->sa_data))
259                 return -EADDRNOTAVAIL;
260
261         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262          * privilege or if PF did not provision the new MAC address.
263          * On BE3, this cmd will always fail if the VF doesn't have the
264          * FILTMGMT privilege. This failure is OK, only if the PF programmed
265          * the MAC for the VF.
266          */
267         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268                                  adapter->if_handle, &adapter->pmac_id[0], 0);
269         if (!status) {
270                 curr_pmac_id = adapter->pmac_id[0];
271
272                 /* Delete the old programmed MAC. This call may fail if the
273                  * old MAC was already deleted by the PF driver.
274                  */
275                 if (adapter->pmac_id[0] != old_pmac_id)
276                         be_cmd_pmac_del(adapter, adapter->if_handle,
277                                         old_pmac_id, 0);
278         }
279
280         /* Decide if the new MAC is successfully activated only after
281          * querying the FW
282          */
283         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
284         if (status)
285                 goto err;
286
287         /* The MAC change did not happen, either due to lack of privilege
288          * or PF didn't pre-provision.
289          */
290         if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
291                 status = -EPERM;
292                 goto err;
293         }
294
295         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
296         dev_info(dev, "MAC address changed to %pM\n", mac);
297         return 0;
298 err:
299         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
300         return status;
301 }
302
303 /* BE2 supports only v0 cmd */
304 static void *hw_stats_from_cmd(struct be_adapter *adapter)
305 {
306         if (BE2_chip(adapter)) {
307                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
308
309                 return &cmd->hw_stats;
310         } else if (BE3_chip(adapter)) {
311                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
312
313                 return &cmd->hw_stats;
314         } else {
315                 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
316
317                 return &cmd->hw_stats;
318         }
319 }
320
321 /* BE2 supports only v0 cmd */
322 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
323 {
324         if (BE2_chip(adapter)) {
325                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
326
327                 return &hw_stats->erx;
328         } else if (BE3_chip(adapter)) {
329                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
330
331                 return &hw_stats->erx;
332         } else {
333                 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
334
335                 return &hw_stats->erx;
336         }
337 }
338
339 static void populate_be_v0_stats(struct be_adapter *adapter)
340 {
341         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
342         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
343         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
344         struct be_port_rxf_stats_v0 *port_stats =
345                                         &rxf_stats->port[adapter->port_num];
346         struct be_drv_stats *drvs = &adapter->drv_stats;
347
348         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
349         drvs->rx_pause_frames = port_stats->rx_pause_frames;
350         drvs->rx_crc_errors = port_stats->rx_crc_errors;
351         drvs->rx_control_frames = port_stats->rx_control_frames;
352         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
353         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
354         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
355         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
356         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
357         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
358         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
359         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
360         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
361         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
362         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
363         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
364         drvs->rx_dropped_header_too_small =
365                 port_stats->rx_dropped_header_too_small;
366         drvs->rx_address_filtered =
367                                         port_stats->rx_address_filtered +
368                                         port_stats->rx_vlan_filtered;
369         drvs->rx_alignment_symbol_errors =
370                 port_stats->rx_alignment_symbol_errors;
371
372         drvs->tx_pauseframes = port_stats->tx_pauseframes;
373         drvs->tx_controlframes = port_stats->tx_controlframes;
374
375         if (adapter->port_num)
376                 drvs->jabber_events = rxf_stats->port1_jabber_events;
377         else
378                 drvs->jabber_events = rxf_stats->port0_jabber_events;
379         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
380         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
381         drvs->forwarded_packets = rxf_stats->forwarded_packets;
382         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
383         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
384         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
385         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
386 }
387
388 static void populate_be_v1_stats(struct be_adapter *adapter)
389 {
390         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
391         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
392         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
393         struct be_port_rxf_stats_v1 *port_stats =
394                                         &rxf_stats->port[adapter->port_num];
395         struct be_drv_stats *drvs = &adapter->drv_stats;
396
397         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
398         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
399         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
400         drvs->rx_pause_frames = port_stats->rx_pause_frames;
401         drvs->rx_crc_errors = port_stats->rx_crc_errors;
402         drvs->rx_control_frames = port_stats->rx_control_frames;
403         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
404         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
405         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
406         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
407         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
408         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
409         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
410         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
411         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
412         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
413         drvs->rx_dropped_header_too_small =
414                 port_stats->rx_dropped_header_too_small;
415         drvs->rx_input_fifo_overflow_drop =
416                 port_stats->rx_input_fifo_overflow_drop;
417         drvs->rx_address_filtered = port_stats->rx_address_filtered;
418         drvs->rx_alignment_symbol_errors =
419                 port_stats->rx_alignment_symbol_errors;
420         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
421         drvs->tx_pauseframes = port_stats->tx_pauseframes;
422         drvs->tx_controlframes = port_stats->tx_controlframes;
423         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
424         drvs->jabber_events = port_stats->jabber_events;
425         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
426         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
427         drvs->forwarded_packets = rxf_stats->forwarded_packets;
428         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
429         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
430         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
431         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
432 }
433
434 static void populate_be_v2_stats(struct be_adapter *adapter)
435 {
436         struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
437         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
438         struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
439         struct be_port_rxf_stats_v2 *port_stats =
440                                         &rxf_stats->port[adapter->port_num];
441         struct be_drv_stats *drvs = &adapter->drv_stats;
442
443         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
444         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
445         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
446         drvs->rx_pause_frames = port_stats->rx_pause_frames;
447         drvs->rx_crc_errors = port_stats->rx_crc_errors;
448         drvs->rx_control_frames = port_stats->rx_control_frames;
449         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
450         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
451         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
452         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
453         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
454         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
455         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
456         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
457         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
458         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
459         drvs->rx_dropped_header_too_small =
460                 port_stats->rx_dropped_header_too_small;
461         drvs->rx_input_fifo_overflow_drop =
462                 port_stats->rx_input_fifo_overflow_drop;
463         drvs->rx_address_filtered = port_stats->rx_address_filtered;
464         drvs->rx_alignment_symbol_errors =
465                 port_stats->rx_alignment_symbol_errors;
466         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
467         drvs->tx_pauseframes = port_stats->tx_pauseframes;
468         drvs->tx_controlframes = port_stats->tx_controlframes;
469         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
470         drvs->jabber_events = port_stats->jabber_events;
471         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
472         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
473         drvs->forwarded_packets = rxf_stats->forwarded_packets;
474         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
475         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
476         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
477         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
478         if (be_roce_supported(adapter))  {
479                 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
480                 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
481                 drvs->rx_roce_frames = port_stats->roce_frames_received;
482                 drvs->roce_drops_crc = port_stats->roce_drops_crc;
483                 drvs->roce_drops_payload_len =
484                         port_stats->roce_drops_payload_len;
485         }
486 }
487
488 static void populate_lancer_stats(struct be_adapter *adapter)
489 {
490
491         struct be_drv_stats *drvs = &adapter->drv_stats;
492         struct lancer_pport_stats *pport_stats =
493                                         pport_stats_from_cmd(adapter);
494
495         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
499         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
500         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
501         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505         drvs->rx_dropped_tcp_length =
506                                 pport_stats->rx_dropped_invalid_tcp_length;
507         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510         drvs->rx_dropped_header_too_small =
511                                 pport_stats->rx_dropped_header_too_small;
512         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
513         drvs->rx_address_filtered =
514                                         pport_stats->rx_address_filtered +
515                                         pport_stats->rx_vlan_filtered;
516         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
517         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
518         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
520         drvs->jabber_events = pport_stats->rx_jabbers;
521         drvs->forwarded_packets = pport_stats->num_forwards_lo;
522         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
523         drvs->rx_drops_too_many_frags =
524                                 pport_stats->rx_drops_too_many_frags_lo;
525 }
526
527 static void accumulate_16bit_val(u32 *acc, u16 val)
528 {
529 #define lo(x)                   (x & 0xFFFF)
530 #define hi(x)                   (x & 0xFFFF0000)
531         bool wrapped = val < lo(*acc);
532         u32 newacc = hi(*acc) + val;
533
534         if (wrapped)
535                 newacc += 65536;
536         ACCESS_ONCE(*acc) = newacc;
537 }
538
539 static void populate_erx_stats(struct be_adapter *adapter,
540                         struct be_rx_obj *rxo,
541                         u32 erx_stat)
542 {
543         if (!BEx_chip(adapter))
544                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
545         else
546                 /* below erx HW counter can actually wrap around after
547                  * 65535. Driver accumulates a 32-bit value
548                  */
549                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
550                                      (u16)erx_stat);
551 }
552
553 void be_parse_stats(struct be_adapter *adapter)
554 {
555         struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
556         struct be_rx_obj *rxo;
557         int i;
558         u32 erx_stat;
559
560         if (lancer_chip(adapter)) {
561                 populate_lancer_stats(adapter);
562         } else {
563                 if (BE2_chip(adapter))
564                         populate_be_v0_stats(adapter);
565                 else if (BE3_chip(adapter))
566                         /* for BE3 */
567                         populate_be_v1_stats(adapter);
568                 else
569                         populate_be_v2_stats(adapter);
570
571                 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
572                 for_all_rx_queues(adapter, rxo, i) {
573                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
574                         populate_erx_stats(adapter, rxo, erx_stat);
575                 }
576         }
577 }
578
579 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
580                                         struct rtnl_link_stats64 *stats)
581 {
582         struct be_adapter *adapter = netdev_priv(netdev);
583         struct be_drv_stats *drvs = &adapter->drv_stats;
584         struct be_rx_obj *rxo;
585         struct be_tx_obj *txo;
586         u64 pkts, bytes;
587         unsigned int start;
588         int i;
589
590         for_all_rx_queues(adapter, rxo, i) {
591                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
592                 do {
593                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
594                         pkts = rx_stats(rxo)->rx_pkts;
595                         bytes = rx_stats(rxo)->rx_bytes;
596                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
597                 stats->rx_packets += pkts;
598                 stats->rx_bytes += bytes;
599                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
600                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
601                                         rx_stats(rxo)->rx_drops_no_frags;
602         }
603
604         for_all_tx_queues(adapter, txo, i) {
605                 const struct be_tx_stats *tx_stats = tx_stats(txo);
606                 do {
607                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
608                         pkts = tx_stats(txo)->tx_pkts;
609                         bytes = tx_stats(txo)->tx_bytes;
610                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
611                 stats->tx_packets += pkts;
612                 stats->tx_bytes += bytes;
613         }
614
615         /* bad pkts received */
616         stats->rx_errors = drvs->rx_crc_errors +
617                 drvs->rx_alignment_symbol_errors +
618                 drvs->rx_in_range_errors +
619                 drvs->rx_out_range_errors +
620                 drvs->rx_frame_too_long +
621                 drvs->rx_dropped_too_small +
622                 drvs->rx_dropped_too_short +
623                 drvs->rx_dropped_header_too_small +
624                 drvs->rx_dropped_tcp_length +
625                 drvs->rx_dropped_runt;
626
627         /* detailed rx errors */
628         stats->rx_length_errors = drvs->rx_in_range_errors +
629                 drvs->rx_out_range_errors +
630                 drvs->rx_frame_too_long;
631
632         stats->rx_crc_errors = drvs->rx_crc_errors;
633
634         /* frame alignment errors */
635         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
636
637         /* receiver fifo overrun */
638         /* drops_no_pbuf is no per i/f, it's per BE card */
639         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
640                                 drvs->rx_input_fifo_overflow_drop +
641                                 drvs->rx_drops_no_pbuf;
642         return stats;
643 }
644
645 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
646 {
647         struct net_device *netdev = adapter->netdev;
648
649         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
650                 netif_carrier_off(netdev);
651                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
652         }
653
654         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
655                 netif_carrier_on(netdev);
656         else
657                 netif_carrier_off(netdev);
658 }
659
660 static void be_tx_stats_update(struct be_tx_obj *txo,
661                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
662 {
663         struct be_tx_stats *stats = tx_stats(txo);
664
665         u64_stats_update_begin(&stats->sync);
666         stats->tx_reqs++;
667         stats->tx_wrbs += wrb_cnt;
668         stats->tx_bytes += copied;
669         stats->tx_pkts += (gso_segs ? gso_segs : 1);
670         if (stopped)
671                 stats->tx_stops++;
672         u64_stats_update_end(&stats->sync);
673 }
674
675 /* Determine number of WRB entries needed to xmit data in an skb */
676 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
677                                                                 bool *dummy)
678 {
679         int cnt = (skb->len > skb->data_len);
680
681         cnt += skb_shinfo(skb)->nr_frags;
682
683         /* to account for hdr wrb */
684         cnt++;
685         if (lancer_chip(adapter) || !(cnt & 1)) {
686                 *dummy = false;
687         } else {
688                 /* add a dummy to make it an even num */
689                 cnt++;
690                 *dummy = true;
691         }
692         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693         return cnt;
694 }
695
696 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697 {
698         wrb->frag_pa_hi = upper_32_bits(addr);
699         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
701         wrb->rsvd0 = 0;
702 }
703
704 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
705                                         struct sk_buff *skb)
706 {
707         u8 vlan_prio;
708         u16 vlan_tag;
709
710         vlan_tag = vlan_tx_tag_get(skb);
711         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712         /* If vlan priority provided by OS is NOT in available bmap */
713         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715                                 adapter->recommended_prio;
716
717         return vlan_tag;
718 }
719
720 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
721                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
722 {
723         u16 vlan_tag;
724
725         memset(hdr, 0, sizeof(*hdr));
726
727         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
728
729         if (skb_is_gso(skb)) {
730                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
731                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
732                         hdr, skb_shinfo(skb)->gso_size);
733                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
734                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
735         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
736                 if (is_tcp_pkt(skb))
737                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
738                 else if (is_udp_pkt(skb))
739                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
740         }
741
742         if (vlan_tx_tag_present(skb)) {
743                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
744                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
745                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
746         }
747
748         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
749         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
750         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
751         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
752         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
753 }
754
755 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
756                 bool unmap_single)
757 {
758         dma_addr_t dma;
759
760         be_dws_le_to_cpu(wrb, sizeof(*wrb));
761
762         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
763         if (wrb->frag_len) {
764                 if (unmap_single)
765                         dma_unmap_single(dev, dma, wrb->frag_len,
766                                          DMA_TO_DEVICE);
767                 else
768                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
769         }
770 }
771
772 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
773                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
774                 bool skip_hw_vlan)
775 {
776         dma_addr_t busaddr;
777         int i, copied = 0;
778         struct device *dev = &adapter->pdev->dev;
779         struct sk_buff *first_skb = skb;
780         struct be_eth_wrb *wrb;
781         struct be_eth_hdr_wrb *hdr;
782         bool map_single = false;
783         u16 map_head;
784
785         hdr = queue_head_node(txq);
786         queue_head_inc(txq);
787         map_head = txq->head;
788
789         if (skb->len > skb->data_len) {
790                 int len = skb_headlen(skb);
791                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
792                 if (dma_mapping_error(dev, busaddr))
793                         goto dma_err;
794                 map_single = true;
795                 wrb = queue_head_node(txq);
796                 wrb_fill(wrb, busaddr, len);
797                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
798                 queue_head_inc(txq);
799                 copied += len;
800         }
801
802         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
803                 const struct skb_frag_struct *frag =
804                         &skb_shinfo(skb)->frags[i];
805                 busaddr = skb_frag_dma_map(dev, frag, 0,
806                                            skb_frag_size(frag), DMA_TO_DEVICE);
807                 if (dma_mapping_error(dev, busaddr))
808                         goto dma_err;
809                 wrb = queue_head_node(txq);
810                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
811                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
812                 queue_head_inc(txq);
813                 copied += skb_frag_size(frag);
814         }
815
816         if (dummy_wrb) {
817                 wrb = queue_head_node(txq);
818                 wrb_fill(wrb, 0, 0);
819                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
820                 queue_head_inc(txq);
821         }
822
823         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
824         be_dws_cpu_to_le(hdr, sizeof(*hdr));
825
826         return copied;
827 dma_err:
828         txq->head = map_head;
829         while (copied) {
830                 wrb = queue_head_node(txq);
831                 unmap_tx_frag(dev, wrb, map_single);
832                 map_single = false;
833                 copied -= wrb->frag_len;
834                 queue_head_inc(txq);
835         }
836         return 0;
837 }
838
839 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
840                                              struct sk_buff *skb,
841                                              bool *skip_hw_vlan)
842 {
843         u16 vlan_tag = 0;
844
845         skb = skb_share_check(skb, GFP_ATOMIC);
846         if (unlikely(!skb))
847                 return skb;
848
849         if (vlan_tx_tag_present(skb))
850                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
851
852         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
853                 if (!vlan_tag)
854                         vlan_tag = adapter->pvid;
855                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
856                  * skip VLAN insertion
857                  */
858                 if (skip_hw_vlan)
859                         *skip_hw_vlan = true;
860         }
861
862         if (vlan_tag) {
863                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
864                 if (unlikely(!skb))
865                         return skb;
866                 skb->vlan_tci = 0;
867         }
868
869         /* Insert the outer VLAN, if any */
870         if (adapter->qnq_vid) {
871                 vlan_tag = adapter->qnq_vid;
872                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
873                 if (unlikely(!skb))
874                         return skb;
875                 if (skip_hw_vlan)
876                         *skip_hw_vlan = true;
877         }
878
879         return skb;
880 }
881
882 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
883 {
884         struct ethhdr *eh = (struct ethhdr *)skb->data;
885         u16 offset = ETH_HLEN;
886
887         if (eh->h_proto == htons(ETH_P_IPV6)) {
888                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
889
890                 offset += sizeof(struct ipv6hdr);
891                 if (ip6h->nexthdr != NEXTHDR_TCP &&
892                     ip6h->nexthdr != NEXTHDR_UDP) {
893                         struct ipv6_opt_hdr *ehdr =
894                                 (struct ipv6_opt_hdr *) (skb->data + offset);
895
896                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
897                         if (ehdr->hdrlen == 0xff)
898                                 return true;
899                 }
900         }
901         return false;
902 }
903
904 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
905 {
906         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
907 }
908
909 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
910                                 struct sk_buff *skb)
911 {
912         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
913 }
914
915 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
916                                            struct sk_buff *skb,
917                                            bool *skip_hw_vlan)
918 {
919         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
920         unsigned int eth_hdr_len;
921         struct iphdr *ip;
922
923         /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
924          * may cause a transmit stall on that port. So the work-around is to
925          * pad short packets (<= 32 bytes) to a 36-byte length.
926          */
927         if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
928                 if (skb_padto(skb, 36))
929                         goto tx_drop;
930                 skb->len = 36;
931         }
932
933         /* For padded packets, BE HW modifies tot_len field in IP header
934          * incorrecly when VLAN tag is inserted by HW.
935          * For padded packets, Lancer computes incorrect checksum.
936          */
937         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
938                                                 VLAN_ETH_HLEN : ETH_HLEN;
939         if (skb->len <= 60 &&
940             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
941             is_ipv4_pkt(skb)) {
942                 ip = (struct iphdr *)ip_hdr(skb);
943                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
944         }
945
946         /* If vlan tag is already inlined in the packet, skip HW VLAN
947          * tagging in UMC mode
948          */
949         if ((adapter->function_mode & UMC_ENABLED) &&
950             veh->h_vlan_proto == htons(ETH_P_8021Q))
951                         *skip_hw_vlan = true;
952
953         /* HW has a bug wherein it will calculate CSUM for VLAN
954          * pkts even though it is disabled.
955          * Manually insert VLAN in pkt.
956          */
957         if (skb->ip_summed != CHECKSUM_PARTIAL &&
958             vlan_tx_tag_present(skb)) {
959                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
960                 if (unlikely(!skb))
961                         goto tx_drop;
962         }
963
964         /* HW may lockup when VLAN HW tagging is requested on
965          * certain ipv6 packets. Drop such pkts if the HW workaround to
966          * skip HW tagging is not enabled by FW.
967          */
968         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
969             (adapter->pvid || adapter->qnq_vid) &&
970             !qnq_async_evt_rcvd(adapter)))
971                 goto tx_drop;
972
973         /* Manual VLAN tag insertion to prevent:
974          * ASIC lockup when the ASIC inserts VLAN tag into
975          * certain ipv6 packets. Insert VLAN tags in driver,
976          * and set event, completion, vlan bits accordingly
977          * in the Tx WRB.
978          */
979         if (be_ipv6_tx_stall_chk(adapter, skb) &&
980             be_vlan_tag_tx_chk(adapter, skb)) {
981                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
982                 if (unlikely(!skb))
983                         goto tx_drop;
984         }
985
986         return skb;
987 tx_drop:
988         dev_kfree_skb_any(skb);
989         return NULL;
990 }
991
992 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
993 {
994         struct be_adapter *adapter = netdev_priv(netdev);
995         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
996         struct be_queue_info *txq = &txo->q;
997         bool dummy_wrb, stopped = false;
998         u32 wrb_cnt = 0, copied = 0;
999         bool skip_hw_vlan = false;
1000         u32 start = txq->head;
1001
1002         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1003         if (!skb) {
1004                 tx_stats(txo)->tx_drv_drops++;
1005                 return NETDEV_TX_OK;
1006         }
1007
1008         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1009
1010         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1011                               skip_hw_vlan);
1012         if (copied) {
1013                 int gso_segs = skb_shinfo(skb)->gso_segs;
1014
1015                 /* record the sent skb in the sent_skb table */
1016                 BUG_ON(txo->sent_skb_list[start]);
1017                 txo->sent_skb_list[start] = skb;
1018
1019                 /* Ensure txq has space for the next skb; Else stop the queue
1020                  * *BEFORE* ringing the tx doorbell, so that we serialze the
1021                  * tx compls of the current transmit which'll wake up the queue
1022                  */
1023                 atomic_add(wrb_cnt, &txq->used);
1024                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1025                                                                 txq->len) {
1026                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1027                         stopped = true;
1028                 }
1029
1030                 be_txq_notify(adapter, txo, wrb_cnt);
1031
1032                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1033         } else {
1034                 txq->head = start;
1035                 tx_stats(txo)->tx_drv_drops++;
1036                 dev_kfree_skb_any(skb);
1037         }
1038         return NETDEV_TX_OK;
1039 }
1040
1041 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1042 {
1043         struct be_adapter *adapter = netdev_priv(netdev);
1044         if (new_mtu < BE_MIN_MTU ||
1045                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1046                                         (ETH_HLEN + ETH_FCS_LEN))) {
1047                 dev_info(&adapter->pdev->dev,
1048                         "MTU must be between %d and %d bytes\n",
1049                         BE_MIN_MTU,
1050                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1051                 return -EINVAL;
1052         }
1053         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1054                         netdev->mtu, new_mtu);
1055         netdev->mtu = new_mtu;
1056         return 0;
1057 }
1058
1059 /*
1060  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1061  * If the user configures more, place BE in vlan promiscuous mode.
1062  */
1063 static int be_vid_config(struct be_adapter *adapter)
1064 {
1065         u16 vids[BE_NUM_VLANS_SUPPORTED];
1066         u16 num = 0, i;
1067         int status = 0;
1068
1069         /* No need to further configure vids if in promiscuous mode */
1070         if (adapter->promiscuous)
1071                 return 0;
1072
1073         if (adapter->vlans_added > be_max_vlans(adapter))
1074                 goto set_vlan_promisc;
1075
1076         /* Construct VLAN Table to give to HW */
1077         for (i = 0; i < VLAN_N_VID; i++)
1078                 if (adapter->vlan_tag[i])
1079                         vids[num++] = cpu_to_le16(i);
1080
1081         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1082                                     vids, num, 0);
1083
1084         if (status) {
1085                 /* Set to VLAN promisc mode as setting VLAN filter failed */
1086                 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1087                         goto set_vlan_promisc;
1088                 dev_err(&adapter->pdev->dev,
1089                         "Setting HW VLAN filtering failed.\n");
1090         } else {
1091                 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1092                         /* hw VLAN filtering re-enabled. */
1093                         status = be_cmd_rx_filter(adapter,
1094                                                   BE_FLAGS_VLAN_PROMISC, OFF);
1095                         if (!status) {
1096                                 dev_info(&adapter->pdev->dev,
1097                                          "Disabling VLAN Promiscuous mode.\n");
1098                                 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1099                                 dev_info(&adapter->pdev->dev,
1100                                          "Re-Enabling HW VLAN filtering\n");
1101                         }
1102                 }
1103         }
1104
1105         return status;
1106
1107 set_vlan_promisc:
1108         dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1109
1110         status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1111         if (!status) {
1112                 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1113                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1114                 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1115         } else
1116                 dev_err(&adapter->pdev->dev,
1117                         "Failed to enable VLAN Promiscuous mode.\n");
1118         return status;
1119 }
1120
1121 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1122 {
1123         struct be_adapter *adapter = netdev_priv(netdev);
1124         int status = 0;
1125
1126
1127         /* Packets with VID 0 are always received by Lancer by default */
1128         if (lancer_chip(adapter) && vid == 0)
1129                 goto ret;
1130
1131         adapter->vlan_tag[vid] = 1;
1132         if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
1133                 status = be_vid_config(adapter);
1134
1135         if (!status)
1136                 adapter->vlans_added++;
1137         else
1138                 adapter->vlan_tag[vid] = 0;
1139 ret:
1140         return status;
1141 }
1142
1143 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1144 {
1145         struct be_adapter *adapter = netdev_priv(netdev);
1146         int status = 0;
1147
1148         /* Packets with VID 0 are always received by Lancer by default */
1149         if (lancer_chip(adapter) && vid == 0)
1150                 goto ret;
1151
1152         adapter->vlan_tag[vid] = 0;
1153         if (adapter->vlans_added <= be_max_vlans(adapter))
1154                 status = be_vid_config(adapter);
1155
1156         if (!status)
1157                 adapter->vlans_added--;
1158         else
1159                 adapter->vlan_tag[vid] = 1;
1160 ret:
1161         return status;
1162 }
1163
1164 static void be_set_rx_mode(struct net_device *netdev)
1165 {
1166         struct be_adapter *adapter = netdev_priv(netdev);
1167         int status;
1168
1169         if (netdev->flags & IFF_PROMISC) {
1170                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1171                 adapter->promiscuous = true;
1172                 goto done;
1173         }
1174
1175         /* BE was previously in promiscuous mode; disable it */
1176         if (adapter->promiscuous) {
1177                 adapter->promiscuous = false;
1178                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1179
1180                 if (adapter->vlans_added)
1181                         be_vid_config(adapter);
1182         }
1183
1184         /* Enable multicast promisc if num configured exceeds what we support */
1185         if (netdev->flags & IFF_ALLMULTI ||
1186             netdev_mc_count(netdev) > be_max_mc(adapter)) {
1187                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1188                 goto done;
1189         }
1190
1191         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1192                 struct netdev_hw_addr *ha;
1193                 int i = 1; /* First slot is claimed by the Primary MAC */
1194
1195                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1196                         be_cmd_pmac_del(adapter, adapter->if_handle,
1197                                         adapter->pmac_id[i], 0);
1198                 }
1199
1200                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1201                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1202                         adapter->promiscuous = true;
1203                         goto done;
1204                 }
1205
1206                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1207                         adapter->uc_macs++; /* First slot is for Primary MAC */
1208                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1209                                         adapter->if_handle,
1210                                         &adapter->pmac_id[adapter->uc_macs], 0);
1211                 }
1212         }
1213
1214         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1215
1216         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1217         if (status) {
1218                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1219                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1220                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1221         }
1222 done:
1223         return;
1224 }
1225
1226 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1227 {
1228         struct be_adapter *adapter = netdev_priv(netdev);
1229         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1230         int status;
1231
1232         if (!sriov_enabled(adapter))
1233                 return -EPERM;
1234
1235         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1236                 return -EINVAL;
1237
1238         if (BEx_chip(adapter)) {
1239                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1240                                 vf + 1);
1241
1242                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1243                                          &vf_cfg->pmac_id, vf + 1);
1244         } else {
1245                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1246                                         vf + 1);
1247         }
1248
1249         if (status)
1250                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1251                                 mac, vf);
1252         else
1253                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1254
1255         return status;
1256 }
1257
1258 static int be_get_vf_config(struct net_device *netdev, int vf,
1259                         struct ifla_vf_info *vi)
1260 {
1261         struct be_adapter *adapter = netdev_priv(netdev);
1262         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1263
1264         if (!sriov_enabled(adapter))
1265                 return -EPERM;
1266
1267         if (vf >= adapter->num_vfs)
1268                 return -EINVAL;
1269
1270         vi->vf = vf;
1271         vi->tx_rate = vf_cfg->tx_rate;
1272         vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1273         vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1274         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1275
1276         return 0;
1277 }
1278
1279 static int be_set_vf_vlan(struct net_device *netdev,
1280                         int vf, u16 vlan, u8 qos)
1281 {
1282         struct be_adapter *adapter = netdev_priv(netdev);
1283         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1284         int status = 0;
1285
1286         if (!sriov_enabled(adapter))
1287                 return -EPERM;
1288
1289         if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1290                 return -EINVAL;
1291
1292         if (vlan || qos) {
1293                 vlan |= qos << VLAN_PRIO_SHIFT;
1294                 if (vf_cfg->vlan_tag != vlan) {
1295                         /* If this is new value, program it. Else skip. */
1296                         vf_cfg->vlan_tag = vlan;
1297                         status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1298                                                        vf_cfg->if_handle, 0);
1299                 }
1300         } else {
1301                 /* Reset Transparent Vlan Tagging. */
1302                 vf_cfg->vlan_tag = 0;
1303                 vlan = vf_cfg->def_vid;
1304                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1305                                                vf_cfg->if_handle, 0);
1306         }
1307
1308
1309         if (status)
1310                 dev_info(&adapter->pdev->dev,
1311                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1312         return status;
1313 }
1314
1315 static int be_set_vf_tx_rate(struct net_device *netdev,
1316                         int vf, int rate)
1317 {
1318         struct be_adapter *adapter = netdev_priv(netdev);
1319         int status = 0;
1320
1321         if (!sriov_enabled(adapter))
1322                 return -EPERM;
1323
1324         if (vf >= adapter->num_vfs)
1325                 return -EINVAL;
1326
1327         if (rate < 100 || rate > 10000) {
1328                 dev_err(&adapter->pdev->dev,
1329                         "tx rate must be between 100 and 10000 Mbps\n");
1330                 return -EINVAL;
1331         }
1332
1333         if (lancer_chip(adapter))
1334                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1335         else
1336                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1337
1338         if (status)
1339                 dev_err(&adapter->pdev->dev,
1340                                 "tx rate %d on VF %d failed\n", rate, vf);
1341         else
1342                 adapter->vf_cfg[vf].tx_rate = rate;
1343         return status;
1344 }
1345
1346 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1347                           ulong now)
1348 {
1349         aic->rx_pkts_prev = rx_pkts;
1350         aic->tx_reqs_prev = tx_pkts;
1351         aic->jiffies = now;
1352 }
1353
1354 static void be_eqd_update(struct be_adapter *adapter)
1355 {
1356         struct be_set_eqd set_eqd[MAX_EVT_QS];
1357         int eqd, i, num = 0, start;
1358         struct be_aic_obj *aic;
1359         struct be_eq_obj *eqo;
1360         struct be_rx_obj *rxo;
1361         struct be_tx_obj *txo;
1362         u64 rx_pkts, tx_pkts;
1363         ulong now;
1364         u32 pps, delta;
1365
1366         for_all_evt_queues(adapter, eqo, i) {
1367                 aic = &adapter->aic_obj[eqo->idx];
1368                 if (!aic->enable) {
1369                         if (aic->jiffies)
1370                                 aic->jiffies = 0;
1371                         eqd = aic->et_eqd;
1372                         goto modify_eqd;
1373                 }
1374
1375                 rxo = &adapter->rx_obj[eqo->idx];
1376                 do {
1377                         start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1378                         rx_pkts = rxo->stats.rx_pkts;
1379                 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
1380
1381                 txo = &adapter->tx_obj[eqo->idx];
1382                 do {
1383                         start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1384                         tx_pkts = txo->stats.tx_reqs;
1385                 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
1386
1387
1388                 /* Skip, if wrapped around or first calculation */
1389                 now = jiffies;
1390                 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1391                     rx_pkts < aic->rx_pkts_prev ||
1392                     tx_pkts < aic->tx_reqs_prev) {
1393                         be_aic_update(aic, rx_pkts, tx_pkts, now);
1394                         continue;
1395                 }
1396
1397                 delta = jiffies_to_msecs(now - aic->jiffies);
1398                 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1399                         (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1400                 eqd = (pps / 15000) << 2;
1401
1402                 if (eqd < 8)
1403                         eqd = 0;
1404                 eqd = min_t(u32, eqd, aic->max_eqd);
1405                 eqd = max_t(u32, eqd, aic->min_eqd);
1406
1407                 be_aic_update(aic, rx_pkts, tx_pkts, now);
1408 modify_eqd:
1409                 if (eqd != aic->prev_eqd) {
1410                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
1411                         set_eqd[num].eq_id = eqo->q.id;
1412                         aic->prev_eqd = eqd;
1413                         num++;
1414                 }
1415         }
1416
1417         if (num)
1418                 be_cmd_modify_eqd(adapter, set_eqd, num);
1419 }
1420
1421 static void be_rx_stats_update(struct be_rx_obj *rxo,
1422                 struct be_rx_compl_info *rxcp)
1423 {
1424         struct be_rx_stats *stats = rx_stats(rxo);
1425
1426         u64_stats_update_begin(&stats->sync);
1427         stats->rx_compl++;
1428         stats->rx_bytes += rxcp->pkt_size;
1429         stats->rx_pkts++;
1430         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1431                 stats->rx_mcast_pkts++;
1432         if (rxcp->err)
1433                 stats->rx_compl_err++;
1434         u64_stats_update_end(&stats->sync);
1435 }
1436
1437 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1438 {
1439         /* L4 checksum is not reliable for non TCP/UDP packets.
1440          * Also ignore ipcksm for ipv6 pkts */
1441         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1442                                 (rxcp->ip_csum || rxcp->ipv6);
1443 }
1444
1445 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1446                                                 u16 frag_idx)
1447 {
1448         struct be_adapter *adapter = rxo->adapter;
1449         struct be_rx_page_info *rx_page_info;
1450         struct be_queue_info *rxq = &rxo->q;
1451
1452         rx_page_info = &rxo->page_info_tbl[frag_idx];
1453         BUG_ON(!rx_page_info->page);
1454
1455         if (rx_page_info->last_page_user) {
1456                 dma_unmap_page(&adapter->pdev->dev,
1457                                dma_unmap_addr(rx_page_info, bus),
1458                                adapter->big_page_size, DMA_FROM_DEVICE);
1459                 rx_page_info->last_page_user = false;
1460         }
1461
1462         atomic_dec(&rxq->used);
1463         return rx_page_info;
1464 }
1465
1466 /* Throwaway the data in the Rx completion */
1467 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1468                                 struct be_rx_compl_info *rxcp)
1469 {
1470         struct be_queue_info *rxq = &rxo->q;
1471         struct be_rx_page_info *page_info;
1472         u16 i, num_rcvd = rxcp->num_rcvd;
1473
1474         for (i = 0; i < num_rcvd; i++) {
1475                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1476                 put_page(page_info->page);
1477                 memset(page_info, 0, sizeof(*page_info));
1478                 index_inc(&rxcp->rxq_idx, rxq->len);
1479         }
1480 }
1481
1482 /*
1483  * skb_fill_rx_data forms a complete skb for an ether frame
1484  * indicated by rxcp.
1485  */
1486 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1487                              struct be_rx_compl_info *rxcp)
1488 {
1489         struct be_queue_info *rxq = &rxo->q;
1490         struct be_rx_page_info *page_info;
1491         u16 i, j;
1492         u16 hdr_len, curr_frag_len, remaining;
1493         u8 *start;
1494
1495         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1496         start = page_address(page_info->page) + page_info->page_offset;
1497         prefetch(start);
1498
1499         /* Copy data in the first descriptor of this completion */
1500         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1501
1502         skb->len = curr_frag_len;
1503         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1504                 memcpy(skb->data, start, curr_frag_len);
1505                 /* Complete packet has now been moved to data */
1506                 put_page(page_info->page);
1507                 skb->data_len = 0;
1508                 skb->tail += curr_frag_len;
1509         } else {
1510                 hdr_len = ETH_HLEN;
1511                 memcpy(skb->data, start, hdr_len);
1512                 skb_shinfo(skb)->nr_frags = 1;
1513                 skb_frag_set_page(skb, 0, page_info->page);
1514                 skb_shinfo(skb)->frags[0].page_offset =
1515                                         page_info->page_offset + hdr_len;
1516                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1517                 skb->data_len = curr_frag_len - hdr_len;
1518                 skb->truesize += rx_frag_size;
1519                 skb->tail += hdr_len;
1520         }
1521         page_info->page = NULL;
1522
1523         if (rxcp->pkt_size <= rx_frag_size) {
1524                 BUG_ON(rxcp->num_rcvd != 1);
1525                 return;
1526         }
1527
1528         /* More frags present for this completion */
1529         index_inc(&rxcp->rxq_idx, rxq->len);
1530         remaining = rxcp->pkt_size - curr_frag_len;
1531         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1532                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1533                 curr_frag_len = min(remaining, rx_frag_size);
1534
1535                 /* Coalesce all frags from the same physical page in one slot */
1536                 if (page_info->page_offset == 0) {
1537                         /* Fresh page */
1538                         j++;
1539                         skb_frag_set_page(skb, j, page_info->page);
1540                         skb_shinfo(skb)->frags[j].page_offset =
1541                                                         page_info->page_offset;
1542                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1543                         skb_shinfo(skb)->nr_frags++;
1544                 } else {
1545                         put_page(page_info->page);
1546                 }
1547
1548                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1549                 skb->len += curr_frag_len;
1550                 skb->data_len += curr_frag_len;
1551                 skb->truesize += rx_frag_size;
1552                 remaining -= curr_frag_len;
1553                 index_inc(&rxcp->rxq_idx, rxq->len);
1554                 page_info->page = NULL;
1555         }
1556         BUG_ON(j > MAX_SKB_FRAGS);
1557 }
1558
1559 /* Process the RX completion indicated by rxcp when GRO is disabled */
1560 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1561                                 struct be_rx_compl_info *rxcp)
1562 {
1563         struct be_adapter *adapter = rxo->adapter;
1564         struct net_device *netdev = adapter->netdev;
1565         struct sk_buff *skb;
1566
1567         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1568         if (unlikely(!skb)) {
1569                 rx_stats(rxo)->rx_drops_no_skbs++;
1570                 be_rx_compl_discard(rxo, rxcp);
1571                 return;
1572         }
1573
1574         skb_fill_rx_data(rxo, skb, rxcp);
1575
1576         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1577                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1578         else
1579                 skb_checksum_none_assert(skb);
1580
1581         skb->protocol = eth_type_trans(skb, netdev);
1582         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1583         if (netdev->features & NETIF_F_RXHASH)
1584                 skb->rxhash = rxcp->rss_hash;
1585         skb_mark_napi_id(skb, napi);
1586
1587         if (rxcp->vlanf)
1588                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1589
1590         netif_receive_skb(skb);
1591 }
1592
1593 /* Process the RX completion indicated by rxcp when GRO is enabled */
1594 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1595                                     struct napi_struct *napi,
1596                                     struct be_rx_compl_info *rxcp)
1597 {
1598         struct be_adapter *adapter = rxo->adapter;
1599         struct be_rx_page_info *page_info;
1600         struct sk_buff *skb = NULL;
1601         struct be_queue_info *rxq = &rxo->q;
1602         u16 remaining, curr_frag_len;
1603         u16 i, j;
1604
1605         skb = napi_get_frags(napi);
1606         if (!skb) {
1607                 be_rx_compl_discard(rxo, rxcp);
1608                 return;
1609         }
1610
1611         remaining = rxcp->pkt_size;
1612         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1613                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1614
1615                 curr_frag_len = min(remaining, rx_frag_size);
1616
1617                 /* Coalesce all frags from the same physical page in one slot */
1618                 if (i == 0 || page_info->page_offset == 0) {
1619                         /* First frag or Fresh page */
1620                         j++;
1621                         skb_frag_set_page(skb, j, page_info->page);
1622                         skb_shinfo(skb)->frags[j].page_offset =
1623                                                         page_info->page_offset;
1624                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1625                 } else {
1626                         put_page(page_info->page);
1627                 }
1628                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1629                 skb->truesize += rx_frag_size;
1630                 remaining -= curr_frag_len;
1631                 index_inc(&rxcp->rxq_idx, rxq->len);
1632                 memset(page_info, 0, sizeof(*page_info));
1633         }
1634         BUG_ON(j > MAX_SKB_FRAGS);
1635
1636         skb_shinfo(skb)->nr_frags = j + 1;
1637         skb->len = rxcp->pkt_size;
1638         skb->data_len = rxcp->pkt_size;
1639         skb->ip_summed = CHECKSUM_UNNECESSARY;
1640         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1641         if (adapter->netdev->features & NETIF_F_RXHASH)
1642                 skb->rxhash = rxcp->rss_hash;
1643         skb_mark_napi_id(skb, napi);
1644
1645         if (rxcp->vlanf)
1646                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1647
1648         napi_gro_frags(napi);
1649 }
1650
1651 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1652                                  struct be_rx_compl_info *rxcp)
1653 {
1654         rxcp->pkt_size =
1655                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1656         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1657         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1658         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1659         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1660         rxcp->ip_csum =
1661                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1662         rxcp->l4_csum =
1663                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1664         rxcp->ipv6 =
1665                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1666         rxcp->rxq_idx =
1667                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1668         rxcp->num_rcvd =
1669                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1670         rxcp->pkt_type =
1671                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1672         rxcp->rss_hash =
1673                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1674         if (rxcp->vlanf) {
1675                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1676                                           compl);
1677                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1678                                                compl);
1679         }
1680         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1681 }
1682
1683 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1684                                  struct be_rx_compl_info *rxcp)
1685 {
1686         rxcp->pkt_size =
1687                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1688         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1689         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1690         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1691         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1692         rxcp->ip_csum =
1693                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1694         rxcp->l4_csum =
1695                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1696         rxcp->ipv6 =
1697                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1698         rxcp->rxq_idx =
1699                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1700         rxcp->num_rcvd =
1701                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1702         rxcp->pkt_type =
1703                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1704         rxcp->rss_hash =
1705                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1706         if (rxcp->vlanf) {
1707                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1708                                           compl);
1709                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1710                                                compl);
1711         }
1712         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1713         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1714                                       ip_frag, compl);
1715 }
1716
1717 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1718 {
1719         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1720         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1721         struct be_adapter *adapter = rxo->adapter;
1722
1723         /* For checking the valid bit it is Ok to use either definition as the
1724          * valid bit is at the same position in both v0 and v1 Rx compl */
1725         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1726                 return NULL;
1727
1728         rmb();
1729         be_dws_le_to_cpu(compl, sizeof(*compl));
1730
1731         if (adapter->be3_native)
1732                 be_parse_rx_compl_v1(compl, rxcp);
1733         else
1734                 be_parse_rx_compl_v0(compl, rxcp);
1735
1736         if (rxcp->ip_frag)
1737                 rxcp->l4_csum = 0;
1738
1739         if (rxcp->vlanf) {
1740                 /* vlanf could be wrongly set in some cards.
1741                  * ignore if vtm is not set */
1742                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1743                         rxcp->vlanf = 0;
1744
1745                 if (!lancer_chip(adapter))
1746                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1747
1748                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1749                     !adapter->vlan_tag[rxcp->vlan_tag])
1750                         rxcp->vlanf = 0;
1751         }
1752
1753         /* As the compl has been parsed, reset it; we wont touch it again */
1754         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1755
1756         queue_tail_inc(&rxo->cq);
1757         return rxcp;
1758 }
1759
1760 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1761 {
1762         u32 order = get_order(size);
1763
1764         if (order > 0)
1765                 gfp |= __GFP_COMP;
1766         return  alloc_pages(gfp, order);
1767 }
1768
1769 /*
1770  * Allocate a page, split it to fragments of size rx_frag_size and post as
1771  * receive buffers to BE
1772  */
1773 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1774 {
1775         struct be_adapter *adapter = rxo->adapter;
1776         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1777         struct be_queue_info *rxq = &rxo->q;
1778         struct page *pagep = NULL;
1779         struct be_eth_rx_d *rxd;
1780         u64 page_dmaaddr = 0, frag_dmaaddr;
1781         u32 posted, page_offset = 0;
1782
1783         page_info = &rxo->page_info_tbl[rxq->head];
1784         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1785                 if (!pagep) {
1786                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1787                         if (unlikely(!pagep)) {
1788                                 rx_stats(rxo)->rx_post_fail++;
1789                                 break;
1790                         }
1791                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1792                                                     0, adapter->big_page_size,
1793                                                     DMA_FROM_DEVICE);
1794                         page_info->page_offset = 0;
1795                 } else {
1796                         get_page(pagep);
1797                         page_info->page_offset = page_offset + rx_frag_size;
1798                 }
1799                 page_offset = page_info->page_offset;
1800                 page_info->page = pagep;
1801                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1802                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1803
1804                 rxd = queue_head_node(rxq);
1805                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1806                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1807
1808                 /* Any space left in the current big page for another frag? */
1809                 if ((page_offset + rx_frag_size + rx_frag_size) >
1810                                         adapter->big_page_size) {
1811                         pagep = NULL;
1812                         page_info->last_page_user = true;
1813                 }
1814
1815                 prev_page_info = page_info;
1816                 queue_head_inc(rxq);
1817                 page_info = &rxo->page_info_tbl[rxq->head];
1818         }
1819         if (pagep)
1820                 prev_page_info->last_page_user = true;
1821
1822         if (posted) {
1823                 atomic_add(posted, &rxq->used);
1824                 if (rxo->rx_post_starved)
1825                         rxo->rx_post_starved = false;
1826                 be_rxq_notify(adapter, rxq->id, posted);
1827         } else if (atomic_read(&rxq->used) == 0) {
1828                 /* Let be_worker replenish when memory is available */
1829                 rxo->rx_post_starved = true;
1830         }
1831 }
1832
1833 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1834 {
1835         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1836
1837         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1838                 return NULL;
1839
1840         rmb();
1841         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1842
1843         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1844
1845         queue_tail_inc(tx_cq);
1846         return txcp;
1847 }
1848
1849 static u16 be_tx_compl_process(struct be_adapter *adapter,
1850                 struct be_tx_obj *txo, u16 last_index)
1851 {
1852         struct be_queue_info *txq = &txo->q;
1853         struct be_eth_wrb *wrb;
1854         struct sk_buff **sent_skbs = txo->sent_skb_list;
1855         struct sk_buff *sent_skb;
1856         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1857         bool unmap_skb_hdr = true;
1858
1859         sent_skb = sent_skbs[txq->tail];
1860         BUG_ON(!sent_skb);
1861         sent_skbs[txq->tail] = NULL;
1862
1863         /* skip header wrb */
1864         queue_tail_inc(txq);
1865
1866         do {
1867                 cur_index = txq->tail;
1868                 wrb = queue_tail_node(txq);
1869                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1870                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1871                 unmap_skb_hdr = false;
1872
1873                 num_wrbs++;
1874                 queue_tail_inc(txq);
1875         } while (cur_index != last_index);
1876
1877         kfree_skb(sent_skb);
1878         return num_wrbs;
1879 }
1880
1881 /* Return the number of events in the event queue */
1882 static inline int events_get(struct be_eq_obj *eqo)
1883 {
1884         struct be_eq_entry *eqe;
1885         int num = 0;
1886
1887         do {
1888                 eqe = queue_tail_node(&eqo->q);
1889                 if (eqe->evt == 0)
1890                         break;
1891
1892                 rmb();
1893                 eqe->evt = 0;
1894                 num++;
1895                 queue_tail_inc(&eqo->q);
1896         } while (true);
1897
1898         return num;
1899 }
1900
1901 /* Leaves the EQ is disarmed state */
1902 static void be_eq_clean(struct be_eq_obj *eqo)
1903 {
1904         int num = events_get(eqo);
1905
1906         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1907 }
1908
1909 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1910 {
1911         struct be_rx_page_info *page_info;
1912         struct be_queue_info *rxq = &rxo->q;
1913         struct be_queue_info *rx_cq = &rxo->cq;
1914         struct be_rx_compl_info *rxcp;
1915         struct be_adapter *adapter = rxo->adapter;
1916         int flush_wait = 0;
1917         u16 tail;
1918
1919         /* Consume pending rx completions.
1920          * Wait for the flush completion (identified by zero num_rcvd)
1921          * to arrive. Notify CQ even when there are no more CQ entries
1922          * for HW to flush partially coalesced CQ entries.
1923          * In Lancer, there is no need to wait for flush compl.
1924          */
1925         for (;;) {
1926                 rxcp = be_rx_compl_get(rxo);
1927                 if (rxcp == NULL) {
1928                         if (lancer_chip(adapter))
1929                                 break;
1930
1931                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1932                                 dev_warn(&adapter->pdev->dev,
1933                                          "did not receive flush compl\n");
1934                                 break;
1935                         }
1936                         be_cq_notify(adapter, rx_cq->id, true, 0);
1937                         mdelay(1);
1938                 } else {
1939                         be_rx_compl_discard(rxo, rxcp);
1940                         be_cq_notify(adapter, rx_cq->id, false, 1);
1941                         if (rxcp->num_rcvd == 0)
1942                                 break;
1943                 }
1944         }
1945
1946         /* After cleanup, leave the CQ in unarmed state */
1947         be_cq_notify(adapter, rx_cq->id, false, 0);
1948
1949         /* Then free posted rx buffers that were not used */
1950         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1951         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1952                 page_info = get_rx_page_info(rxo, tail);
1953                 put_page(page_info->page);
1954                 memset(page_info, 0, sizeof(*page_info));
1955         }
1956         BUG_ON(atomic_read(&rxq->used));
1957         rxq->tail = rxq->head = 0;
1958 }
1959
1960 static void be_tx_compl_clean(struct be_adapter *adapter)
1961 {
1962         struct be_tx_obj *txo;
1963         struct be_queue_info *txq;
1964         struct be_eth_tx_compl *txcp;
1965         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1966         struct sk_buff *sent_skb;
1967         bool dummy_wrb;
1968         int i, pending_txqs;
1969
1970         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1971         do {
1972                 pending_txqs = adapter->num_tx_qs;
1973
1974                 for_all_tx_queues(adapter, txo, i) {
1975                         txq = &txo->q;
1976                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1977                                 end_idx =
1978                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1979                                                       wrb_index, txcp);
1980                                 num_wrbs += be_tx_compl_process(adapter, txo,
1981                                                                 end_idx);
1982                                 cmpl++;
1983                         }
1984                         if (cmpl) {
1985                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1986                                 atomic_sub(num_wrbs, &txq->used);
1987                                 cmpl = 0;
1988                                 num_wrbs = 0;
1989                         }
1990                         if (atomic_read(&txq->used) == 0)
1991                                 pending_txqs--;
1992                 }
1993
1994                 if (pending_txqs == 0 || ++timeo > 200)
1995                         break;
1996
1997                 mdelay(1);
1998         } while (true);
1999
2000         for_all_tx_queues(adapter, txo, i) {
2001                 txq = &txo->q;
2002                 if (atomic_read(&txq->used))
2003                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2004                                 atomic_read(&txq->used));
2005
2006                 /* free posted tx for which compls will never arrive */
2007                 while (atomic_read(&txq->used)) {
2008                         sent_skb = txo->sent_skb_list[txq->tail];
2009                         end_idx = txq->tail;
2010                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2011                                                    &dummy_wrb);
2012                         index_adv(&end_idx, num_wrbs - 1, txq->len);
2013                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2014                         atomic_sub(num_wrbs, &txq->used);
2015                 }
2016         }
2017 }
2018
2019 static void be_evt_queues_destroy(struct be_adapter *adapter)
2020 {
2021         struct be_eq_obj *eqo;
2022         int i;
2023
2024         for_all_evt_queues(adapter, eqo, i) {
2025                 if (eqo->q.created) {
2026                         be_eq_clean(eqo);
2027                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2028                         napi_hash_del(&eqo->napi);
2029                         netif_napi_del(&eqo->napi);
2030                 }
2031                 be_queue_free(adapter, &eqo->q);
2032         }
2033 }
2034
2035 static int be_evt_queues_create(struct be_adapter *adapter)
2036 {
2037         struct be_queue_info *eq;
2038         struct be_eq_obj *eqo;
2039         struct be_aic_obj *aic;
2040         int i, rc;
2041
2042         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2043                                     adapter->cfg_num_qs);
2044
2045         for_all_evt_queues(adapter, eqo, i) {
2046                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2047                                BE_NAPI_WEIGHT);
2048                 napi_hash_add(&eqo->napi);
2049                 aic = &adapter->aic_obj[i];
2050                 eqo->adapter = adapter;
2051                 eqo->tx_budget = BE_TX_BUDGET;
2052                 eqo->idx = i;
2053                 aic->max_eqd = BE_MAX_EQD;
2054                 aic->enable = true;
2055
2056                 eq = &eqo->q;
2057                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2058                                         sizeof(struct be_eq_entry));
2059                 if (rc)
2060                         return rc;
2061
2062                 rc = be_cmd_eq_create(adapter, eqo);
2063                 if (rc)
2064                         return rc;
2065         }
2066         return 0;
2067 }
2068
2069 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2070 {
2071         struct be_queue_info *q;
2072
2073         q = &adapter->mcc_obj.q;
2074         if (q->created)
2075                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2076         be_queue_free(adapter, q);
2077
2078         q = &adapter->mcc_obj.cq;
2079         if (q->created)
2080                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2081         be_queue_free(adapter, q);
2082 }
2083
2084 /* Must be called only after TX qs are created as MCC shares TX EQ */
2085 static int be_mcc_queues_create(struct be_adapter *adapter)
2086 {
2087         struct be_queue_info *q, *cq;
2088
2089         cq = &adapter->mcc_obj.cq;
2090         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2091                         sizeof(struct be_mcc_compl)))
2092                 goto err;
2093
2094         /* Use the default EQ for MCC completions */
2095         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2096                 goto mcc_cq_free;
2097
2098         q = &adapter->mcc_obj.q;
2099         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2100                 goto mcc_cq_destroy;
2101
2102         if (be_cmd_mccq_create(adapter, q, cq))
2103                 goto mcc_q_free;
2104
2105         return 0;
2106
2107 mcc_q_free:
2108         be_queue_free(adapter, q);
2109 mcc_cq_destroy:
2110         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2111 mcc_cq_free:
2112         be_queue_free(adapter, cq);
2113 err:
2114         return -1;
2115 }
2116
2117 static void be_tx_queues_destroy(struct be_adapter *adapter)
2118 {
2119         struct be_queue_info *q;
2120         struct be_tx_obj *txo;
2121         u8 i;
2122
2123         for_all_tx_queues(adapter, txo, i) {
2124                 q = &txo->q;
2125                 if (q->created)
2126                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2127                 be_queue_free(adapter, q);
2128
2129                 q = &txo->cq;
2130                 if (q->created)
2131                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2132                 be_queue_free(adapter, q);
2133         }
2134 }
2135
2136 static int be_tx_qs_create(struct be_adapter *adapter)
2137 {
2138         struct be_queue_info *cq, *eq;
2139         struct be_tx_obj *txo;
2140         int status, i;
2141
2142         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2143
2144         for_all_tx_queues(adapter, txo, i) {
2145                 cq = &txo->cq;
2146                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2147                                         sizeof(struct be_eth_tx_compl));
2148                 if (status)
2149                         return status;
2150
2151                 u64_stats_init(&txo->stats.sync);
2152                 u64_stats_init(&txo->stats.sync_compl);
2153
2154                 /* If num_evt_qs is less than num_tx_qs, then more than
2155                  * one txq share an eq
2156                  */
2157                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2158                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2159                 if (status)
2160                         return status;
2161
2162                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2163                                         sizeof(struct be_eth_wrb));
2164                 if (status)
2165                         return status;
2166
2167                 status = be_cmd_txq_create(adapter, txo);
2168                 if (status)
2169                         return status;
2170         }
2171
2172         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2173                  adapter->num_tx_qs);
2174         return 0;
2175 }
2176
2177 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2178 {
2179         struct be_queue_info *q;
2180         struct be_rx_obj *rxo;
2181         int i;
2182
2183         for_all_rx_queues(adapter, rxo, i) {
2184                 q = &rxo->cq;
2185                 if (q->created)
2186                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2187                 be_queue_free(adapter, q);
2188         }
2189 }
2190
2191 static int be_rx_cqs_create(struct be_adapter *adapter)
2192 {
2193         struct be_queue_info *eq, *cq;
2194         struct be_rx_obj *rxo;
2195         int rc, i;
2196
2197         /* We can create as many RSS rings as there are EQs. */
2198         adapter->num_rx_qs = adapter->num_evt_qs;
2199
2200         /* We'll use RSS only if atleast 2 RSS rings are supported.
2201          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2202          */
2203         if (adapter->num_rx_qs > 1)
2204                 adapter->num_rx_qs++;
2205
2206         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2207         for_all_rx_queues(adapter, rxo, i) {
2208                 rxo->adapter = adapter;
2209                 cq = &rxo->cq;
2210                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2211                                 sizeof(struct be_eth_rx_compl));
2212                 if (rc)
2213                         return rc;
2214
2215                 u64_stats_init(&rxo->stats.sync);
2216                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2217                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2218                 if (rc)
2219                         return rc;
2220         }
2221
2222         dev_info(&adapter->pdev->dev,
2223                  "created %d RSS queue(s) and 1 default RX queue\n",
2224                  adapter->num_rx_qs - 1);
2225         return 0;
2226 }
2227
2228 static irqreturn_t be_intx(int irq, void *dev)
2229 {
2230         struct be_eq_obj *eqo = dev;
2231         struct be_adapter *adapter = eqo->adapter;
2232         int num_evts = 0;
2233
2234         /* IRQ is not expected when NAPI is scheduled as the EQ
2235          * will not be armed.
2236          * But, this can happen on Lancer INTx where it takes
2237          * a while to de-assert INTx or in BE2 where occasionaly
2238          * an interrupt may be raised even when EQ is unarmed.
2239          * If NAPI is already scheduled, then counting & notifying
2240          * events will orphan them.
2241          */
2242         if (napi_schedule_prep(&eqo->napi)) {
2243                 num_evts = events_get(eqo);
2244                 __napi_schedule(&eqo->napi);
2245                 if (num_evts)
2246                         eqo->spurious_intr = 0;
2247         }
2248         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2249
2250         /* Return IRQ_HANDLED only for the the first spurious intr
2251          * after a valid intr to stop the kernel from branding
2252          * this irq as a bad one!
2253          */
2254         if (num_evts || eqo->spurious_intr++ == 0)
2255                 return IRQ_HANDLED;
2256         else
2257                 return IRQ_NONE;
2258 }
2259
2260 static irqreturn_t be_msix(int irq, void *dev)
2261 {
2262         struct be_eq_obj *eqo = dev;
2263
2264         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2265         napi_schedule(&eqo->napi);
2266         return IRQ_HANDLED;
2267 }
2268
2269 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2270 {
2271         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2272 }
2273
2274 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2275                         int budget, int polling)
2276 {
2277         struct be_adapter *adapter = rxo->adapter;
2278         struct be_queue_info *rx_cq = &rxo->cq;
2279         struct be_rx_compl_info *rxcp;
2280         u32 work_done;
2281
2282         for (work_done = 0; work_done < budget; work_done++) {
2283                 rxcp = be_rx_compl_get(rxo);
2284                 if (!rxcp)
2285                         break;
2286
2287                 /* Is it a flush compl that has no data */
2288                 if (unlikely(rxcp->num_rcvd == 0))
2289                         goto loop_continue;
2290
2291                 /* Discard compl with partial DMA Lancer B0 */
2292                 if (unlikely(!rxcp->pkt_size)) {
2293                         be_rx_compl_discard(rxo, rxcp);
2294                         goto loop_continue;
2295                 }
2296
2297                 /* On BE drop pkts that arrive due to imperfect filtering in
2298                  * promiscuous mode on some skews
2299                  */
2300                 if (unlikely(rxcp->port != adapter->port_num &&
2301                                 !lancer_chip(adapter))) {
2302                         be_rx_compl_discard(rxo, rxcp);
2303                         goto loop_continue;
2304                 }
2305
2306                 /* Don't do gro when we're busy_polling */
2307                 if (do_gro(rxcp) && polling != BUSY_POLLING)
2308                         be_rx_compl_process_gro(rxo, napi, rxcp);
2309                 else
2310                         be_rx_compl_process(rxo, napi, rxcp);
2311
2312 loop_continue:
2313                 be_rx_stats_update(rxo, rxcp);
2314         }
2315
2316         if (work_done) {
2317                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2318
2319                 /* When an rx-obj gets into post_starved state, just
2320                  * let be_worker do the posting.
2321                  */
2322                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2323                     !rxo->rx_post_starved)
2324                         be_post_rx_frags(rxo, GFP_ATOMIC);
2325         }
2326
2327         return work_done;
2328 }
2329
2330 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2331                           int budget, int idx)
2332 {
2333         struct be_eth_tx_compl *txcp;
2334         int num_wrbs = 0, work_done;
2335
2336         for (work_done = 0; work_done < budget; work_done++) {
2337                 txcp = be_tx_compl_get(&txo->cq);
2338                 if (!txcp)
2339                         break;
2340                 num_wrbs += be_tx_compl_process(adapter, txo,
2341                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2342                                         wrb_index, txcp));
2343         }
2344
2345         if (work_done) {
2346                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2347                 atomic_sub(num_wrbs, &txo->q.used);
2348
2349                 /* As Tx wrbs have been freed up, wake up netdev queue
2350                  * if it was stopped due to lack of tx wrbs.  */
2351                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2352                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2353                         netif_wake_subqueue(adapter->netdev, idx);
2354                 }
2355
2356                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2357                 tx_stats(txo)->tx_compl += work_done;
2358                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2359         }
2360         return (work_done < budget); /* Done */
2361 }
2362
2363 int be_poll(struct napi_struct *napi, int budget)
2364 {
2365         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2366         struct be_adapter *adapter = eqo->adapter;
2367         int max_work = 0, work, i, num_evts;
2368         struct be_rx_obj *rxo;
2369         bool tx_done;
2370
2371         num_evts = events_get(eqo);
2372
2373         /* Process all TXQs serviced by this EQ */
2374         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2375                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2376                                         eqo->tx_budget, i);
2377                 if (!tx_done)
2378                         max_work = budget;
2379         }
2380
2381         if (be_lock_napi(eqo)) {
2382                 /* This loop will iterate twice for EQ0 in which
2383                  * completions of the last RXQ (default one) are also processed
2384                  * For other EQs the loop iterates only once
2385                  */
2386                 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2387                         work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2388                         max_work = max(work, max_work);
2389                 }
2390                 be_unlock_napi(eqo);
2391         } else {
2392                 max_work = budget;
2393         }
2394
2395         if (is_mcc_eqo(eqo))
2396                 be_process_mcc(adapter);
2397
2398         if (max_work < budget) {
2399                 napi_complete(napi);
2400                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2401         } else {
2402                 /* As we'll continue in polling mode, count and clear events */
2403                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2404         }
2405         return max_work;
2406 }
2407
2408 #ifdef CONFIG_NET_RX_BUSY_POLL
2409 static int be_busy_poll(struct napi_struct *napi)
2410 {
2411         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2412         struct be_adapter *adapter = eqo->adapter;
2413         struct be_rx_obj *rxo;
2414         int i, work = 0;
2415
2416         if (!be_lock_busy_poll(eqo))
2417                 return LL_FLUSH_BUSY;
2418
2419         for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2420                 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2421                 if (work)
2422                         break;
2423         }
2424
2425         be_unlock_busy_poll(eqo);
2426         return work;
2427 }
2428 #endif
2429
2430 void be_detect_error(struct be_adapter *adapter)
2431 {
2432         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2433         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2434         u32 i;
2435
2436         if (be_hw_error(adapter))
2437                 return;
2438
2439         if (lancer_chip(adapter)) {
2440                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2441                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2442                         sliport_err1 = ioread32(adapter->db +
2443                                         SLIPORT_ERROR1_OFFSET);
2444                         sliport_err2 = ioread32(adapter->db +
2445                                         SLIPORT_ERROR2_OFFSET);
2446                 }
2447         } else {
2448                 pci_read_config_dword(adapter->pdev,
2449                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2450                 pci_read_config_dword(adapter->pdev,
2451                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2452                 pci_read_config_dword(adapter->pdev,
2453                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2454                 pci_read_config_dword(adapter->pdev,
2455                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2456
2457                 ue_lo = (ue_lo & ~ue_lo_mask);
2458                 ue_hi = (ue_hi & ~ue_hi_mask);
2459         }
2460
2461         /* On certain platforms BE hardware can indicate spurious UEs.
2462          * Allow the h/w to stop working completely in case of a real UE.
2463          * Hence not setting the hw_error for UE detection.
2464          */
2465         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2466                 adapter->hw_error = true;
2467                 /* Do not log error messages if its a FW reset */
2468                 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2469                     sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2470                         dev_info(&adapter->pdev->dev,
2471                                  "Firmware update in progress\n");
2472                         return;
2473                 } else {
2474                         dev_err(&adapter->pdev->dev,
2475                                 "Error detected in the card\n");
2476                 }
2477         }
2478
2479         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2480                 dev_err(&adapter->pdev->dev,
2481                         "ERR: sliport status 0x%x\n", sliport_status);
2482                 dev_err(&adapter->pdev->dev,
2483                         "ERR: sliport error1 0x%x\n", sliport_err1);
2484                 dev_err(&adapter->pdev->dev,
2485                         "ERR: sliport error2 0x%x\n", sliport_err2);
2486         }
2487
2488         if (ue_lo) {
2489                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2490                         if (ue_lo & 1)
2491                                 dev_err(&adapter->pdev->dev,
2492                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2493                 }
2494         }
2495
2496         if (ue_hi) {
2497                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2498                         if (ue_hi & 1)
2499                                 dev_err(&adapter->pdev->dev,
2500                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2501                 }
2502         }
2503
2504 }
2505
2506 static void be_msix_disable(struct be_adapter *adapter)
2507 {
2508         if (msix_enabled(adapter)) {
2509                 pci_disable_msix(adapter->pdev);
2510                 adapter->num_msix_vec = 0;
2511                 adapter->num_msix_roce_vec = 0;
2512         }
2513 }
2514
2515 static int be_msix_enable(struct be_adapter *adapter)
2516 {
2517         int i, status, num_vec;
2518         struct device *dev = &adapter->pdev->dev;
2519
2520         /* If RoCE is supported, program the max number of NIC vectors that
2521          * may be configured via set-channels, along with vectors needed for
2522          * RoCe. Else, just program the number we'll use initially.
2523          */
2524         if (be_roce_supported(adapter))
2525                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2526                                 2 * num_online_cpus());
2527         else
2528                 num_vec = adapter->cfg_num_qs;
2529
2530         for (i = 0; i < num_vec; i++)
2531                 adapter->msix_entries[i].entry = i;
2532
2533         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2534         if (status == 0) {
2535                 goto done;
2536         } else if (status >= MIN_MSIX_VECTORS) {
2537                 num_vec = status;
2538                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2539                                          num_vec);
2540                 if (!status)
2541                         goto done;
2542         }
2543
2544         dev_warn(dev, "MSIx enable failed\n");
2545
2546         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2547         if (!be_physfn(adapter))
2548                 return status;
2549         return 0;
2550 done:
2551         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2552                 adapter->num_msix_roce_vec = num_vec / 2;
2553                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2554                          adapter->num_msix_roce_vec);
2555         }
2556
2557         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2558
2559         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2560                  adapter->num_msix_vec);
2561         return 0;
2562 }
2563
2564 static inline int be_msix_vec_get(struct be_adapter *adapter,
2565                                 struct be_eq_obj *eqo)
2566 {
2567         return adapter->msix_entries[eqo->msix_idx].vector;
2568 }
2569
2570 static int be_msix_register(struct be_adapter *adapter)
2571 {
2572         struct net_device *netdev = adapter->netdev;
2573         struct be_eq_obj *eqo;
2574         int status, i, vec;
2575
2576         for_all_evt_queues(adapter, eqo, i) {
2577                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2578                 vec = be_msix_vec_get(adapter, eqo);
2579                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2580                 if (status)
2581                         goto err_msix;
2582         }
2583
2584         return 0;
2585 err_msix:
2586         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2587                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2588         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2589                 status);
2590         be_msix_disable(adapter);
2591         return status;
2592 }
2593
2594 static int be_irq_register(struct be_adapter *adapter)
2595 {
2596         struct net_device *netdev = adapter->netdev;
2597         int status;
2598
2599         if (msix_enabled(adapter)) {
2600                 status = be_msix_register(adapter);
2601                 if (status == 0)
2602                         goto done;
2603                 /* INTx is not supported for VF */
2604                 if (!be_physfn(adapter))
2605                         return status;
2606         }
2607
2608         /* INTx: only the first EQ is used */
2609         netdev->irq = adapter->pdev->irq;
2610         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2611                              &adapter->eq_obj[0]);
2612         if (status) {
2613                 dev_err(&adapter->pdev->dev,
2614                         "INTx request IRQ failed - err %d\n", status);
2615                 return status;
2616         }
2617 done:
2618         adapter->isr_registered = true;
2619         return 0;
2620 }
2621
2622 static void be_irq_unregister(struct be_adapter *adapter)
2623 {
2624         struct net_device *netdev = adapter->netdev;
2625         struct be_eq_obj *eqo;
2626         int i;
2627
2628         if (!adapter->isr_registered)
2629                 return;
2630
2631         /* INTx */
2632         if (!msix_enabled(adapter)) {
2633                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2634                 goto done;
2635         }
2636
2637         /* MSIx */
2638         for_all_evt_queues(adapter, eqo, i)
2639                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2640
2641 done:
2642         adapter->isr_registered = false;
2643 }
2644
2645 static void be_rx_qs_destroy(struct be_adapter *adapter)
2646 {
2647         struct be_queue_info *q;
2648         struct be_rx_obj *rxo;
2649         int i;
2650
2651         for_all_rx_queues(adapter, rxo, i) {
2652                 q = &rxo->q;
2653                 if (q->created) {
2654                         be_cmd_rxq_destroy(adapter, q);
2655                         be_rx_cq_clean(rxo);
2656                 }
2657                 be_queue_free(adapter, q);
2658         }
2659 }
2660
2661 static int be_close(struct net_device *netdev)
2662 {
2663         struct be_adapter *adapter = netdev_priv(netdev);
2664         struct be_eq_obj *eqo;
2665         int i;
2666
2667         be_roce_dev_close(adapter);
2668
2669         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2670                 for_all_evt_queues(adapter, eqo, i) {
2671                         napi_disable(&eqo->napi);
2672                         be_disable_busy_poll(eqo);
2673                 }
2674                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2675         }
2676
2677         be_async_mcc_disable(adapter);
2678
2679         /* Wait for all pending tx completions to arrive so that
2680          * all tx skbs are freed.
2681          */
2682         netif_tx_disable(netdev);
2683         be_tx_compl_clean(adapter);
2684
2685         be_rx_qs_destroy(adapter);
2686
2687         for (i = 1; i < (adapter->uc_macs + 1); i++)
2688                 be_cmd_pmac_del(adapter, adapter->if_handle,
2689                                 adapter->pmac_id[i], 0);
2690         adapter->uc_macs = 0;
2691
2692         for_all_evt_queues(adapter, eqo, i) {
2693                 if (msix_enabled(adapter))
2694                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2695                 else
2696                         synchronize_irq(netdev->irq);
2697                 be_eq_clean(eqo);
2698         }
2699
2700         be_irq_unregister(adapter);
2701
2702         return 0;
2703 }
2704
2705 static int be_rx_qs_create(struct be_adapter *adapter)
2706 {
2707         struct be_rx_obj *rxo;
2708         int rc, i, j;
2709         u8 rsstable[128];
2710
2711         for_all_rx_queues(adapter, rxo, i) {
2712                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2713                                     sizeof(struct be_eth_rx_d));
2714                 if (rc)
2715                         return rc;
2716         }
2717
2718         /* The FW would like the default RXQ to be created first */
2719         rxo = default_rxo(adapter);
2720         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2721                                adapter->if_handle, false, &rxo->rss_id);
2722         if (rc)
2723                 return rc;
2724
2725         for_all_rss_queues(adapter, rxo, i) {
2726                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2727                                        rx_frag_size, adapter->if_handle,
2728                                        true, &rxo->rss_id);
2729                 if (rc)
2730                         return rc;
2731         }
2732
2733         if (be_multi_rxq(adapter)) {
2734                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2735                         for_all_rss_queues(adapter, rxo, i) {
2736                                 if ((j + i) >= 128)
2737                                         break;
2738                                 rsstable[j + i] = rxo->rss_id;
2739                         }
2740                 }
2741                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2742                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2743
2744                 if (!BEx_chip(adapter))
2745                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2746                                                 RSS_ENABLE_UDP_IPV6;
2747         } else {
2748                 /* Disable RSS, if only default RX Q is created */
2749                 adapter->rss_flags = RSS_ENABLE_NONE;
2750         }
2751
2752         rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2753                                128);
2754         if (rc) {
2755                 adapter->rss_flags = RSS_ENABLE_NONE;
2756                 return rc;
2757         }
2758
2759         /* First time posting */
2760         for_all_rx_queues(adapter, rxo, i)
2761                 be_post_rx_frags(rxo, GFP_KERNEL);
2762         return 0;
2763 }
2764
2765 static int be_open(struct net_device *netdev)
2766 {
2767         struct be_adapter *adapter = netdev_priv(netdev);
2768         struct be_eq_obj *eqo;
2769         struct be_rx_obj *rxo;
2770         struct be_tx_obj *txo;
2771         u8 link_status;
2772         int status, i;
2773
2774         status = be_rx_qs_create(adapter);
2775         if (status)
2776                 goto err;
2777
2778         status = be_irq_register(adapter);
2779         if (status)
2780                 goto err;
2781
2782         for_all_rx_queues(adapter, rxo, i)
2783                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2784
2785         for_all_tx_queues(adapter, txo, i)
2786                 be_cq_notify(adapter, txo->cq.id, true, 0);
2787
2788         be_async_mcc_enable(adapter);
2789
2790         for_all_evt_queues(adapter, eqo, i) {
2791                 napi_enable(&eqo->napi);
2792                 be_enable_busy_poll(eqo);
2793                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2794         }
2795         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2796
2797         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2798         if (!status)
2799                 be_link_status_update(adapter, link_status);
2800
2801         netif_tx_start_all_queues(netdev);
2802         be_roce_dev_open(adapter);
2803         return 0;
2804 err:
2805         be_close(adapter->netdev);
2806         return -EIO;
2807 }
2808
2809 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2810 {
2811         struct be_dma_mem cmd;
2812         int status = 0;
2813         u8 mac[ETH_ALEN];
2814
2815         memset(mac, 0, ETH_ALEN);
2816
2817         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2818         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2819                                      GFP_KERNEL);
2820         if (cmd.va == NULL)
2821                 return -1;
2822
2823         if (enable) {
2824                 status = pci_write_config_dword(adapter->pdev,
2825                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2826                 if (status) {
2827                         dev_err(&adapter->pdev->dev,
2828                                 "Could not enable Wake-on-lan\n");
2829                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2830                                           cmd.dma);
2831                         return status;
2832                 }
2833                 status = be_cmd_enable_magic_wol(adapter,
2834                                 adapter->netdev->dev_addr, &cmd);
2835                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2836                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2837         } else {
2838                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2839                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2840                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2841         }
2842
2843         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2844         return status;
2845 }
2846
2847 /*
2848  * Generate a seed MAC address from the PF MAC Address using jhash.
2849  * MAC Address for VFs are assigned incrementally starting from the seed.
2850  * These addresses are programmed in the ASIC by the PF and the VF driver
2851  * queries for the MAC address during its probe.
2852  */
2853 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2854 {
2855         u32 vf;
2856         int status = 0;
2857         u8 mac[ETH_ALEN];
2858         struct be_vf_cfg *vf_cfg;
2859
2860         be_vf_eth_addr_generate(adapter, mac);
2861
2862         for_all_vfs(adapter, vf_cfg, vf) {
2863                 if (BEx_chip(adapter))
2864                         status = be_cmd_pmac_add(adapter, mac,
2865                                                  vf_cfg->if_handle,
2866                                                  &vf_cfg->pmac_id, vf + 1);
2867                 else
2868                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2869                                                 vf + 1);
2870
2871                 if (status)
2872                         dev_err(&adapter->pdev->dev,
2873                         "Mac address assignment failed for VF %d\n", vf);
2874                 else
2875                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2876
2877                 mac[5] += 1;
2878         }
2879         return status;
2880 }
2881
2882 static int be_vfs_mac_query(struct be_adapter *adapter)
2883 {
2884         int status, vf;
2885         u8 mac[ETH_ALEN];
2886         struct be_vf_cfg *vf_cfg;
2887         bool active = false;
2888
2889         for_all_vfs(adapter, vf_cfg, vf) {
2890                 be_cmd_get_mac_from_list(adapter, mac, &active,
2891                                          &vf_cfg->pmac_id, 0);
2892
2893                 status = be_cmd_mac_addr_query(adapter, mac, false,
2894                                                vf_cfg->if_handle, 0);
2895                 if (status)
2896                         return status;
2897                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2898         }
2899         return 0;
2900 }
2901
2902 static void be_vf_clear(struct be_adapter *adapter)
2903 {
2904         struct be_vf_cfg *vf_cfg;
2905         u32 vf;
2906
2907         if (pci_vfs_assigned(adapter->pdev)) {
2908                 dev_warn(&adapter->pdev->dev,
2909                          "VFs are assigned to VMs: not disabling VFs\n");
2910                 goto done;
2911         }
2912
2913         pci_disable_sriov(adapter->pdev);
2914
2915         for_all_vfs(adapter, vf_cfg, vf) {
2916                 if (BEx_chip(adapter))
2917                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2918                                         vf_cfg->pmac_id, vf + 1);
2919                 else
2920                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2921                                        vf + 1);
2922
2923                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2924         }
2925 done:
2926         kfree(adapter->vf_cfg);
2927         adapter->num_vfs = 0;
2928 }
2929
2930 static void be_clear_queues(struct be_adapter *adapter)
2931 {
2932         be_mcc_queues_destroy(adapter);
2933         be_rx_cqs_destroy(adapter);
2934         be_tx_queues_destroy(adapter);
2935         be_evt_queues_destroy(adapter);
2936 }
2937
2938 static void be_cancel_worker(struct be_adapter *adapter)
2939 {
2940         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2941                 cancel_delayed_work_sync(&adapter->work);
2942                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2943         }
2944 }
2945
2946 static void be_mac_clear(struct be_adapter *adapter)
2947 {
2948         int i;
2949
2950         if (adapter->pmac_id) {
2951                 for (i = 0; i < (adapter->uc_macs + 1); i++)
2952                         be_cmd_pmac_del(adapter, adapter->if_handle,
2953                                         adapter->pmac_id[i], 0);
2954                 adapter->uc_macs = 0;
2955
2956                 kfree(adapter->pmac_id);
2957                 adapter->pmac_id = NULL;
2958         }
2959 }
2960
2961 static int be_clear(struct be_adapter *adapter)
2962 {
2963         be_cancel_worker(adapter);
2964
2965         if (sriov_enabled(adapter))
2966                 be_vf_clear(adapter);
2967
2968         /* delete the primary mac along with the uc-mac list */
2969         be_mac_clear(adapter);
2970
2971         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2972
2973         be_clear_queues(adapter);
2974
2975         be_msix_disable(adapter);
2976         return 0;
2977 }
2978
2979 static int be_vfs_if_create(struct be_adapter *adapter)
2980 {
2981         struct be_resources res = {0};
2982         struct be_vf_cfg *vf_cfg;
2983         u32 cap_flags, en_flags, vf;
2984         int status = 0;
2985
2986         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2987                     BE_IF_FLAGS_MULTICAST;
2988
2989         for_all_vfs(adapter, vf_cfg, vf) {
2990                 if (!BE3_chip(adapter)) {
2991                         status = be_cmd_get_profile_config(adapter, &res,
2992                                                            vf + 1);
2993                         if (!status)
2994                                 cap_flags = res.if_cap_flags;
2995                 }
2996
2997                 /* If a FW profile exists, then cap_flags are updated */
2998                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2999                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3000                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3001                                           &vf_cfg->if_handle, vf + 1);
3002                 if (status)
3003                         goto err;
3004         }
3005 err:
3006         return status;
3007 }
3008
3009 static int be_vf_setup_init(struct be_adapter *adapter)
3010 {
3011         struct be_vf_cfg *vf_cfg;
3012         int vf;
3013
3014         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3015                                   GFP_KERNEL);
3016         if (!adapter->vf_cfg)
3017                 return -ENOMEM;
3018
3019         for_all_vfs(adapter, vf_cfg, vf) {
3020                 vf_cfg->if_handle = -1;
3021                 vf_cfg->pmac_id = -1;
3022         }
3023         return 0;
3024 }
3025
3026 static int be_vf_setup(struct be_adapter *adapter)
3027 {
3028         struct be_vf_cfg *vf_cfg;
3029         u16 def_vlan, lnk_speed;
3030         int status, old_vfs, vf;
3031         struct device *dev = &adapter->pdev->dev;
3032         u32 privileges;
3033
3034         old_vfs = pci_num_vf(adapter->pdev);
3035         if (old_vfs) {
3036                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3037                 if (old_vfs != num_vfs)
3038                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3039                 adapter->num_vfs = old_vfs;
3040         } else {
3041                 if (num_vfs > be_max_vfs(adapter))
3042                         dev_info(dev, "Device supports %d VFs and not %d\n",
3043                                  be_max_vfs(adapter), num_vfs);
3044                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3045                 if (!adapter->num_vfs)
3046                         return 0;
3047         }
3048
3049         status = be_vf_setup_init(adapter);
3050         if (status)
3051                 goto err;
3052
3053         if (old_vfs) {
3054                 for_all_vfs(adapter, vf_cfg, vf) {
3055                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3056                         if (status)
3057                                 goto err;
3058                 }
3059         } else {
3060                 status = be_vfs_if_create(adapter);
3061                 if (status)
3062                         goto err;
3063         }
3064
3065         if (old_vfs) {
3066                 status = be_vfs_mac_query(adapter);
3067                 if (status)
3068                         goto err;
3069         } else {
3070                 status = be_vf_eth_addr_config(adapter);
3071                 if (status)
3072                         goto err;
3073         }
3074
3075         for_all_vfs(adapter, vf_cfg, vf) {
3076                 /* Allow VFs to programs MAC/VLAN filters */
3077                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3078                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3079                         status = be_cmd_set_fn_privileges(adapter,
3080                                                           privileges |
3081                                                           BE_PRIV_FILTMGMT,
3082                                                           vf + 1);
3083                         if (!status)
3084                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3085                                          vf);
3086                 }
3087
3088                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3089                  * Allow full available bandwidth
3090                  */
3091                 if (BE3_chip(adapter) && !old_vfs)
3092                         be_cmd_set_qos(adapter, 1000, vf+1);
3093
3094                 status = be_cmd_link_status_query(adapter, &lnk_speed,
3095                                                   NULL, vf + 1);
3096                 if (!status)
3097                         vf_cfg->tx_rate = lnk_speed;
3098
3099                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
3100                                                vf + 1, vf_cfg->if_handle, NULL);
3101                 if (status)
3102                         goto err;
3103                 vf_cfg->def_vid = def_vlan;
3104
3105                 if (!old_vfs)
3106                         be_cmd_enable_vf(adapter, vf + 1);
3107         }
3108
3109         if (!old_vfs) {
3110                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3111                 if (status) {
3112                         dev_err(dev, "SRIOV enable failed\n");
3113                         adapter->num_vfs = 0;
3114                         goto err;
3115                 }
3116         }
3117         return 0;
3118 err:
3119         dev_err(dev, "VF setup failed\n");
3120         be_vf_clear(adapter);
3121         return status;
3122 }
3123
3124 /* On BE2/BE3 FW does not suggest the supported limits */
3125 static void BEx_get_resources(struct be_adapter *adapter,
3126                               struct be_resources *res)
3127 {
3128         struct pci_dev *pdev = adapter->pdev;
3129         bool use_sriov = false;
3130         int max_vfs;
3131
3132         max_vfs = pci_sriov_get_totalvfs(pdev);
3133
3134         if (BE3_chip(adapter) && sriov_want(adapter)) {
3135                 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3136                 use_sriov = res->max_vfs;
3137         }
3138
3139         if (be_physfn(adapter))
3140                 res->max_uc_mac = BE_UC_PMAC_COUNT;
3141         else
3142                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3143
3144         if (adapter->function_mode & FLEX10_MODE)
3145                 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3146         else if (adapter->function_mode & UMC_ENABLED)
3147                 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
3148         else
3149                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3150         res->max_mcast_mac = BE_MAX_MC;
3151
3152         /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
3153         if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
3154             !be_physfn(adapter) || (adapter->port_num > 1))
3155                 res->max_tx_qs = 1;
3156         else
3157                 res->max_tx_qs = BE3_MAX_TX_QS;
3158
3159         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3160             !use_sriov && be_physfn(adapter))
3161                 res->max_rss_qs = (adapter->be3_native) ?
3162                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3163         res->max_rx_qs = res->max_rss_qs + 1;
3164
3165         if (be_physfn(adapter))
3166                 res->max_evt_qs = (max_vfs > 0) ?
3167                                         BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3168         else
3169                 res->max_evt_qs = 1;
3170
3171         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3172         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3173                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3174 }
3175
3176 static void be_setup_init(struct be_adapter *adapter)
3177 {
3178         adapter->vlan_prio_bmap = 0xff;
3179         adapter->phy.link_speed = -1;
3180         adapter->if_handle = -1;
3181         adapter->be3_native = false;
3182         adapter->promiscuous = false;
3183         if (be_physfn(adapter))
3184                 adapter->cmd_privileges = MAX_PRIVILEGES;
3185         else
3186                 adapter->cmd_privileges = MIN_PRIVILEGES;
3187 }
3188
3189 static int be_get_resources(struct be_adapter *adapter)
3190 {
3191         struct device *dev = &adapter->pdev->dev;
3192         struct be_resources res = {0};
3193         int status;
3194
3195         if (BEx_chip(adapter)) {
3196                 BEx_get_resources(adapter, &res);
3197                 adapter->res = res;
3198         }
3199
3200         /* For Lancer, SH etc read per-function resource limits from FW.
3201          * GET_FUNC_CONFIG returns per function guaranteed limits.
3202          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3203          */
3204         if (!BEx_chip(adapter)) {
3205                 status = be_cmd_get_func_config(adapter, &res);
3206                 if (status)
3207                         return status;
3208
3209                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3210                 if (be_roce_supported(adapter))
3211                         res.max_evt_qs /= 2;
3212                 adapter->res = res;
3213
3214                 if (be_physfn(adapter)) {
3215                         status = be_cmd_get_profile_config(adapter, &res, 0);
3216                         if (status)
3217                                 return status;
3218                         adapter->res.max_vfs = res.max_vfs;
3219                 }
3220
3221                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3222                          be_max_txqs(adapter), be_max_rxqs(adapter),
3223                          be_max_rss(adapter), be_max_eqs(adapter),
3224                          be_max_vfs(adapter));
3225                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3226                          be_max_uc(adapter), be_max_mc(adapter),
3227                          be_max_vlans(adapter));
3228         }
3229
3230         return 0;
3231 }
3232
3233 /* Routine to query per function resource limits */
3234 static int be_get_config(struct be_adapter *adapter)
3235 {
3236         int status;
3237
3238         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3239                                      &adapter->function_mode,
3240                                      &adapter->function_caps,
3241                                      &adapter->asic_rev);
3242         if (status)
3243                 return status;
3244
3245         status = be_get_resources(adapter);
3246         if (status)
3247                 return status;
3248
3249         /* primary mac needs 1 pmac entry */
3250         adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3251                                    GFP_KERNEL);
3252         if (!adapter->pmac_id)
3253                 return -ENOMEM;
3254
3255         /* Sanitize cfg_num_qs based on HW and platform limits */
3256         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3257
3258         return 0;
3259 }
3260
3261 static int be_mac_setup(struct be_adapter *adapter)
3262 {
3263         u8 mac[ETH_ALEN];
3264         int status;
3265
3266         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3267                 status = be_cmd_get_perm_mac(adapter, mac);
3268                 if (status)
3269                         return status;
3270
3271                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3272                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3273         } else {
3274                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3275                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3276         }
3277
3278         /* For BE3-R VFs, the PF programs the initial MAC address */
3279         if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3280                 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3281                                 &adapter->pmac_id[0], 0);
3282         return 0;
3283 }
3284
3285 static void be_schedule_worker(struct be_adapter *adapter)
3286 {
3287         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3288         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3289 }
3290
3291 static int be_setup_queues(struct be_adapter *adapter)
3292 {
3293         struct net_device *netdev = adapter->netdev;
3294         int status;
3295
3296         status = be_evt_queues_create(adapter);
3297         if (status)
3298                 goto err;
3299
3300         status = be_tx_qs_create(adapter);
3301         if (status)
3302                 goto err;
3303
3304         status = be_rx_cqs_create(adapter);
3305         if (status)
3306                 goto err;
3307
3308         status = be_mcc_queues_create(adapter);
3309         if (status)
3310                 goto err;
3311
3312         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3313         if (status)
3314                 goto err;
3315
3316         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3317         if (status)
3318                 goto err;
3319
3320         return 0;
3321 err:
3322         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3323         return status;
3324 }
3325
3326 int be_update_queues(struct be_adapter *adapter)
3327 {
3328         struct net_device *netdev = adapter->netdev;
3329         int status;
3330
3331         if (netif_running(netdev))
3332                 be_close(netdev);
3333
3334         be_cancel_worker(adapter);
3335
3336         /* If any vectors have been shared with RoCE we cannot re-program
3337          * the MSIx table.
3338          */
3339         if (!adapter->num_msix_roce_vec)
3340                 be_msix_disable(adapter);
3341
3342         be_clear_queues(adapter);
3343
3344         if (!msix_enabled(adapter)) {
3345                 status = be_msix_enable(adapter);
3346                 if (status)
3347                         return status;
3348         }
3349
3350         status = be_setup_queues(adapter);
3351         if (status)
3352                 return status;
3353
3354         be_schedule_worker(adapter);
3355
3356         if (netif_running(netdev))
3357                 status = be_open(netdev);
3358
3359         return status;
3360 }
3361
3362 static int be_setup(struct be_adapter *adapter)
3363 {
3364         struct device *dev = &adapter->pdev->dev;
3365         u32 tx_fc, rx_fc, en_flags;
3366         int status;
3367
3368         be_setup_init(adapter);
3369
3370         if (!lancer_chip(adapter))
3371                 be_cmd_req_native_mode(adapter);
3372
3373         status = be_get_config(adapter);
3374         if (status)
3375                 goto err;
3376
3377         status = be_msix_enable(adapter);
3378         if (status)
3379                 goto err;
3380
3381         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3382                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3383         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3384                 en_flags |= BE_IF_FLAGS_RSS;
3385         en_flags = en_flags & be_if_cap_flags(adapter);
3386         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3387                                   &adapter->if_handle, 0);
3388         if (status)
3389                 goto err;
3390
3391         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3392         rtnl_lock();
3393         status = be_setup_queues(adapter);
3394         rtnl_unlock();
3395         if (status)
3396                 goto err;
3397
3398         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3399         /* In UMC mode FW does not return right privileges.
3400          * Override with correct privilege equivalent to PF.
3401          */
3402         if (be_is_mc(adapter))
3403                 adapter->cmd_privileges = MAX_PRIVILEGES;
3404
3405         status = be_mac_setup(adapter);
3406         if (status)
3407                 goto err;
3408
3409         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3410
3411         if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3412                 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3413                         adapter->fw_ver);
3414                 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3415         }
3416
3417         if (adapter->vlans_added)
3418                 be_vid_config(adapter);
3419
3420         be_set_rx_mode(adapter->netdev);
3421
3422         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3423
3424         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3425                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3426                                         adapter->rx_fc);
3427
3428         if (sriov_want(adapter)) {
3429                 if (be_max_vfs(adapter))
3430                         be_vf_setup(adapter);
3431                 else
3432                         dev_warn(dev, "device doesn't support SRIOV\n");
3433         }
3434
3435         status = be_cmd_get_phy_info(adapter);
3436         if (!status && be_pause_supported(adapter))
3437                 adapter->phy.fc_autoneg = 1;
3438
3439         be_schedule_worker(adapter);
3440         return 0;
3441 err:
3442         be_clear(adapter);
3443         return status;
3444 }
3445
3446 #ifdef CONFIG_NET_POLL_CONTROLLER
3447 static void be_netpoll(struct net_device *netdev)
3448 {
3449         struct be_adapter *adapter = netdev_priv(netdev);
3450         struct be_eq_obj *eqo;
3451         int i;
3452
3453         for_all_evt_queues(adapter, eqo, i) {
3454                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3455                 napi_schedule(&eqo->napi);
3456         }
3457
3458         return;
3459 }
3460 #endif
3461
3462 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3463 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3464
3465 static bool be_flash_redboot(struct be_adapter *adapter,
3466                         const u8 *p, u32 img_start, int image_size,
3467                         int hdr_size)
3468 {
3469         u32 crc_offset;
3470         u8 flashed_crc[4];
3471         int status;
3472
3473         crc_offset = hdr_size + img_start + image_size - 4;
3474
3475         p += crc_offset;
3476
3477         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3478                         (image_size - 4));
3479         if (status) {
3480                 dev_err(&adapter->pdev->dev,
3481                 "could not get crc from flash, not flashing redboot\n");
3482                 return false;
3483         }
3484
3485         /*update redboot only if crc does not match*/
3486         if (!memcmp(flashed_crc, p, 4))
3487                 return false;
3488         else
3489                 return true;
3490 }
3491
3492 static bool phy_flashing_required(struct be_adapter *adapter)
3493 {
3494         return (adapter->phy.phy_type == TN_8022 &&
3495                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3496 }
3497
3498 static bool is_comp_in_ufi(struct be_adapter *adapter,
3499                            struct flash_section_info *fsec, int type)
3500 {
3501         int i = 0, img_type = 0;
3502         struct flash_section_info_g2 *fsec_g2 = NULL;
3503
3504         if (BE2_chip(adapter))
3505                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3506
3507         for (i = 0; i < MAX_FLASH_COMP; i++) {
3508                 if (fsec_g2)
3509                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3510                 else
3511                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3512
3513                 if (img_type == type)
3514                         return true;
3515         }
3516         return false;
3517
3518 }
3519
3520 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3521                                          int header_size,
3522                                          const struct firmware *fw)
3523 {
3524         struct flash_section_info *fsec = NULL;
3525         const u8 *p = fw->data;
3526
3527         p += header_size;
3528         while (p < (fw->data + fw->size)) {
3529                 fsec = (struct flash_section_info *)p;
3530                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3531                         return fsec;
3532                 p += 32;
3533         }
3534         return NULL;
3535 }
3536
3537 static int be_flash(struct be_adapter *adapter, const u8 *img,
3538                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3539 {
3540         u32 total_bytes = 0, flash_op, num_bytes = 0;
3541         int status = 0;
3542         struct be_cmd_write_flashrom *req = flash_cmd->va;
3543
3544         total_bytes = img_size;
3545         while (total_bytes) {
3546                 num_bytes = min_t(u32, 32*1024, total_bytes);
3547
3548                 total_bytes -= num_bytes;
3549
3550                 if (!total_bytes) {
3551                         if (optype == OPTYPE_PHY_FW)
3552                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3553                         else
3554                                 flash_op = FLASHROM_OPER_FLASH;
3555                 } else {
3556                         if (optype == OPTYPE_PHY_FW)
3557                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3558                         else
3559                                 flash_op = FLASHROM_OPER_SAVE;
3560                 }
3561
3562                 memcpy(req->data_buf, img, num_bytes);
3563                 img += num_bytes;
3564                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3565                                                 flash_op, num_bytes);
3566                 if (status) {
3567                         if (status == ILLEGAL_IOCTL_REQ &&
3568                             optype == OPTYPE_PHY_FW)
3569                                 break;
3570                         dev_err(&adapter->pdev->dev,
3571                                 "cmd to write to flash rom failed.\n");
3572                         return status;
3573                 }
3574         }
3575         return 0;
3576 }
3577
3578 /* For BE2, BE3 and BE3-R */
3579 static int be_flash_BEx(struct be_adapter *adapter,
3580                          const struct firmware *fw,
3581                          struct be_dma_mem *flash_cmd,
3582                          int num_of_images)
3583
3584 {
3585         int status = 0, i, filehdr_size = 0;
3586         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3587         const u8 *p = fw->data;
3588         const struct flash_comp *pflashcomp;
3589         int num_comp, redboot;
3590         struct flash_section_info *fsec = NULL;
3591
3592         struct flash_comp gen3_flash_types[] = {
3593                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3594                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3595                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3596                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3597                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3598                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3599                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3600                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3601                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3602                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3603                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3604                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3605                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3606                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3607                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3608                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3609                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3610                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3611                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3612                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3613         };
3614
3615         struct flash_comp gen2_flash_types[] = {
3616                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3617                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3618                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3619                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3620                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3621                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3622                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3623                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3624                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3625                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3626                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3627                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3628                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3629                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3630                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3631                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3632         };
3633
3634         if (BE3_chip(adapter)) {
3635                 pflashcomp = gen3_flash_types;
3636                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3637                 num_comp = ARRAY_SIZE(gen3_flash_types);
3638         } else {
3639                 pflashcomp = gen2_flash_types;
3640                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3641                 num_comp = ARRAY_SIZE(gen2_flash_types);
3642         }
3643
3644         /* Get flash section info*/
3645         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3646         if (!fsec) {
3647                 dev_err(&adapter->pdev->dev,
3648                         "Invalid Cookie. UFI corrupted ?\n");
3649                 return -1;
3650         }
3651         for (i = 0; i < num_comp; i++) {
3652                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3653                         continue;
3654
3655                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3656                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3657                         continue;
3658
3659                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3660                     !phy_flashing_required(adapter))
3661                                 continue;
3662
3663                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3664                         redboot = be_flash_redboot(adapter, fw->data,
3665                                 pflashcomp[i].offset, pflashcomp[i].size,
3666                                 filehdr_size + img_hdrs_size);
3667                         if (!redboot)
3668                                 continue;
3669                 }
3670
3671                 p = fw->data;
3672                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3673                 if (p + pflashcomp[i].size > fw->data + fw->size)
3674                         return -1;
3675
3676                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3677                                         pflashcomp[i].size);
3678                 if (status) {
3679                         dev_err(&adapter->pdev->dev,
3680                                 "Flashing section type %d failed.\n",
3681                                 pflashcomp[i].img_type);
3682                         return status;
3683                 }
3684         }
3685         return 0;
3686 }
3687
3688 static int be_flash_skyhawk(struct be_adapter *adapter,
3689                 const struct firmware *fw,
3690                 struct be_dma_mem *flash_cmd, int num_of_images)
3691 {
3692         int status = 0, i, filehdr_size = 0;
3693         int img_offset, img_size, img_optype, redboot;
3694         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3695         const u8 *p = fw->data;
3696         struct flash_section_info *fsec = NULL;
3697
3698         filehdr_size = sizeof(struct flash_file_hdr_g3);
3699         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3700         if (!fsec) {
3701                 dev_err(&adapter->pdev->dev,
3702                         "Invalid Cookie. UFI corrupted ?\n");
3703                 return -1;
3704         }
3705
3706         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3707                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3708                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3709
3710                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3711                 case IMAGE_FIRMWARE_iSCSI:
3712                         img_optype = OPTYPE_ISCSI_ACTIVE;
3713                         break;
3714                 case IMAGE_BOOT_CODE:
3715                         img_optype = OPTYPE_REDBOOT;
3716                         break;
3717                 case IMAGE_OPTION_ROM_ISCSI:
3718                         img_optype = OPTYPE_BIOS;
3719                         break;
3720                 case IMAGE_OPTION_ROM_PXE:
3721                         img_optype = OPTYPE_PXE_BIOS;
3722                         break;
3723                 case IMAGE_OPTION_ROM_FCoE:
3724                         img_optype = OPTYPE_FCOE_BIOS;
3725                         break;
3726                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3727                         img_optype = OPTYPE_ISCSI_BACKUP;
3728                         break;
3729                 case IMAGE_NCSI:
3730                         img_optype = OPTYPE_NCSI_FW;
3731                         break;
3732                 default:
3733                         continue;
3734                 }
3735
3736                 if (img_optype == OPTYPE_REDBOOT) {
3737                         redboot = be_flash_redboot(adapter, fw->data,
3738                                         img_offset, img_size,
3739                                         filehdr_size + img_hdrs_size);
3740                         if (!redboot)
3741                                 continue;
3742                 }
3743
3744                 p = fw->data;
3745                 p += filehdr_size + img_offset + img_hdrs_size;
3746                 if (p + img_size > fw->data + fw->size)
3747                         return -1;
3748
3749                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3750                 if (status) {
3751                         dev_err(&adapter->pdev->dev,
3752                                 "Flashing section type %d failed.\n",
3753                                 fsec->fsec_entry[i].type);
3754                         return status;
3755                 }
3756         }
3757         return 0;
3758 }
3759
3760 static int lancer_fw_download(struct be_adapter *adapter,
3761                                 const struct firmware *fw)
3762 {
3763 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3764 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3765         struct be_dma_mem flash_cmd;
3766         const u8 *data_ptr = NULL;
3767         u8 *dest_image_ptr = NULL;
3768         size_t image_size = 0;
3769         u32 chunk_size = 0;
3770         u32 data_written = 0;
3771         u32 offset = 0;
3772         int status = 0;
3773         u8 add_status = 0;
3774         u8 change_status;
3775
3776         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3777                 dev_err(&adapter->pdev->dev,
3778                         "FW Image not properly aligned. "
3779                         "Length must be 4 byte aligned.\n");
3780                 status = -EINVAL;
3781                 goto lancer_fw_exit;
3782         }
3783
3784         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3785                                 + LANCER_FW_DOWNLOAD_CHUNK;
3786         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3787                                           &flash_cmd.dma, GFP_KERNEL);
3788         if (!flash_cmd.va) {
3789                 status = -ENOMEM;
3790                 goto lancer_fw_exit;
3791         }
3792
3793         dest_image_ptr = flash_cmd.va +
3794                                 sizeof(struct lancer_cmd_req_write_object);
3795         image_size = fw->size;
3796         data_ptr = fw->data;
3797
3798         while (image_size) {
3799                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3800
3801                 /* Copy the image chunk content. */
3802                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3803
3804                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3805                                                  chunk_size, offset,
3806                                                  LANCER_FW_DOWNLOAD_LOCATION,
3807                                                  &data_written, &change_status,
3808                                                  &add_status);
3809                 if (status)
3810                         break;
3811
3812                 offset += data_written;
3813                 data_ptr += data_written;
3814                 image_size -= data_written;
3815         }
3816
3817         if (!status) {
3818                 /* Commit the FW written */
3819                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3820                                                  0, offset,
3821                                                  LANCER_FW_DOWNLOAD_LOCATION,
3822                                                  &data_written, &change_status,
3823                                                  &add_status);
3824         }
3825
3826         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3827                                 flash_cmd.dma);
3828         if (status) {
3829                 dev_err(&adapter->pdev->dev,
3830                         "Firmware load error. "
3831                         "Status code: 0x%x Additional Status: 0x%x\n",
3832                         status, add_status);
3833                 goto lancer_fw_exit;
3834         }
3835
3836         if (change_status == LANCER_FW_RESET_NEEDED) {
3837                 dev_info(&adapter->pdev->dev,
3838                          "Resetting adapter to activate new FW\n");
3839                 status = lancer_physdev_ctrl(adapter,
3840                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3841                 if (status) {
3842                         dev_err(&adapter->pdev->dev,
3843                                 "Adapter busy for FW reset.\n"
3844                                 "New FW will not be active.\n");
3845                         goto lancer_fw_exit;
3846                 }
3847         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3848                         dev_err(&adapter->pdev->dev,
3849                                 "System reboot required for new FW"
3850                                 " to be active\n");
3851         }
3852
3853         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3854 lancer_fw_exit:
3855         return status;
3856 }
3857
3858 #define UFI_TYPE2               2
3859 #define UFI_TYPE3               3
3860 #define UFI_TYPE3R              10
3861 #define UFI_TYPE4               4
3862 static int be_get_ufi_type(struct be_adapter *adapter,
3863                            struct flash_file_hdr_g3 *fhdr)
3864 {
3865         if (fhdr == NULL)
3866                 goto be_get_ufi_exit;
3867
3868         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3869                 return UFI_TYPE4;
3870         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3871                 if (fhdr->asic_type_rev == 0x10)
3872                         return UFI_TYPE3R;
3873                 else
3874                         return UFI_TYPE3;
3875         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3876                 return UFI_TYPE2;
3877
3878 be_get_ufi_exit:
3879         dev_err(&adapter->pdev->dev,
3880                 "UFI and Interface are not compatible for flashing\n");
3881         return -1;
3882 }
3883
3884 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3885 {
3886         struct flash_file_hdr_g3 *fhdr3;
3887         struct image_hdr *img_hdr_ptr = NULL;
3888         struct be_dma_mem flash_cmd;
3889         const u8 *p;
3890         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3891
3892         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3893         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3894                                           &flash_cmd.dma, GFP_KERNEL);
3895         if (!flash_cmd.va) {
3896                 status = -ENOMEM;
3897                 goto be_fw_exit;
3898         }
3899
3900         p = fw->data;
3901         fhdr3 = (struct flash_file_hdr_g3 *)p;
3902
3903         ufi_type = be_get_ufi_type(adapter, fhdr3);
3904
3905         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3906         for (i = 0; i < num_imgs; i++) {
3907                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3908                                 (sizeof(struct flash_file_hdr_g3) +
3909                                  i * sizeof(struct image_hdr)));
3910                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3911                         switch (ufi_type) {
3912                         case UFI_TYPE4:
3913                                 status = be_flash_skyhawk(adapter, fw,
3914                                                         &flash_cmd, num_imgs);
3915                                 break;
3916                         case UFI_TYPE3R:
3917                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3918                                                       num_imgs);
3919                                 break;
3920                         case UFI_TYPE3:
3921                                 /* Do not flash this ufi on BE3-R cards */
3922                                 if (adapter->asic_rev < 0x10)
3923                                         status = be_flash_BEx(adapter, fw,
3924                                                               &flash_cmd,
3925                                                               num_imgs);
3926                                 else {
3927                                         status = -1;
3928                                         dev_err(&adapter->pdev->dev,
3929                                                 "Can't load BE3 UFI on BE3R\n");
3930                                 }
3931                         }
3932                 }
3933         }
3934
3935         if (ufi_type == UFI_TYPE2)
3936                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3937         else if (ufi_type == -1)
3938                 status = -1;
3939
3940         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3941                           flash_cmd.dma);
3942         if (status) {
3943                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3944                 goto be_fw_exit;
3945         }
3946
3947         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3948
3949 be_fw_exit:
3950         return status;
3951 }
3952
3953 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3954 {
3955         const struct firmware *fw;
3956         int status;
3957
3958         if (!netif_running(adapter->netdev)) {
3959                 dev_err(&adapter->pdev->dev,
3960                         "Firmware load not allowed (interface is down)\n");
3961                 return -1;
3962         }
3963
3964         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3965         if (status)
3966                 goto fw_exit;
3967
3968         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3969
3970         if (lancer_chip(adapter))
3971                 status = lancer_fw_download(adapter, fw);
3972         else
3973                 status = be_fw_download(adapter, fw);
3974
3975         if (!status)
3976                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3977                                   adapter->fw_on_flash);
3978
3979 fw_exit:
3980         release_firmware(fw);
3981         return status;
3982 }
3983
3984 static int be_ndo_bridge_setlink(struct net_device *dev,
3985                                     struct nlmsghdr *nlh)
3986 {
3987         struct be_adapter *adapter = netdev_priv(dev);
3988         struct nlattr *attr, *br_spec;
3989         int rem;
3990         int status = 0;
3991         u16 mode = 0;
3992
3993         if (!sriov_enabled(adapter))
3994                 return -EOPNOTSUPP;
3995
3996         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3997
3998         nla_for_each_nested(attr, br_spec, rem) {
3999                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4000                         continue;
4001
4002                 mode = nla_get_u16(attr);
4003                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4004                         return -EINVAL;
4005
4006                 status = be_cmd_set_hsw_config(adapter, 0, 0,
4007                                                adapter->if_handle,
4008                                                mode == BRIDGE_MODE_VEPA ?
4009                                                PORT_FWD_TYPE_VEPA :
4010                                                PORT_FWD_TYPE_VEB);
4011                 if (status)
4012                         goto err;
4013
4014                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4015                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4016
4017                 return status;
4018         }
4019 err:
4020         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4021                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4022
4023         return status;
4024 }
4025
4026 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4027                                     struct net_device *dev,
4028                                     u32 filter_mask)
4029 {
4030         struct be_adapter *adapter = netdev_priv(dev);
4031         int status = 0;
4032         u8 hsw_mode;
4033
4034         if (!sriov_enabled(adapter))
4035                 return 0;
4036
4037         /* BE and Lancer chips support VEB mode only */
4038         if (BEx_chip(adapter) || lancer_chip(adapter)) {
4039                 hsw_mode = PORT_FWD_TYPE_VEB;
4040         } else {
4041                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4042                                                adapter->if_handle, &hsw_mode);
4043                 if (status)
4044                         return 0;
4045         }
4046
4047         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4048                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
4049                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4050 }
4051
4052 static const struct net_device_ops be_netdev_ops = {
4053         .ndo_open               = be_open,
4054         .ndo_stop               = be_close,
4055         .ndo_start_xmit         = be_xmit,
4056         .ndo_set_rx_mode        = be_set_rx_mode,
4057         .ndo_set_mac_address    = be_mac_addr_set,
4058         .ndo_change_mtu         = be_change_mtu,
4059         .ndo_get_stats64        = be_get_stats64,
4060         .ndo_validate_addr      = eth_validate_addr,
4061         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
4062         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
4063         .ndo_set_vf_mac         = be_set_vf_mac,
4064         .ndo_set_vf_vlan        = be_set_vf_vlan,
4065         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
4066         .ndo_get_vf_config      = be_get_vf_config,
4067 #ifdef CONFIG_NET_POLL_CONTROLLER
4068         .ndo_poll_controller    = be_netpoll,
4069 #endif
4070         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
4071         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
4072 #ifdef CONFIG_NET_RX_BUSY_POLL
4073         .ndo_busy_poll          = be_busy_poll
4074 #endif
4075 };
4076
4077 static void be_netdev_init(struct net_device *netdev)
4078 {
4079         struct be_adapter *adapter = netdev_priv(netdev);
4080
4081         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4082                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4083                 NETIF_F_HW_VLAN_CTAG_TX;
4084         if (be_multi_rxq(adapter))
4085                 netdev->hw_features |= NETIF_F_RXHASH;
4086
4087         netdev->features |= netdev->hw_features |
4088                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4089
4090         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4091                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4092
4093         netdev->priv_flags |= IFF_UNICAST_FLT;
4094
4095         netdev->flags |= IFF_MULTICAST;
4096
4097         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4098
4099         netdev->netdev_ops = &be_netdev_ops;
4100
4101         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4102 }
4103
4104 static void be_unmap_pci_bars(struct be_adapter *adapter)
4105 {
4106         if (adapter->csr)
4107                 pci_iounmap(adapter->pdev, adapter->csr);
4108         if (adapter->db)
4109                 pci_iounmap(adapter->pdev, adapter->db);
4110 }
4111
4112 static int db_bar(struct be_adapter *adapter)
4113 {
4114         if (lancer_chip(adapter) || !be_physfn(adapter))
4115                 return 0;
4116         else
4117                 return 4;
4118 }
4119
4120 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4121 {
4122         if (skyhawk_chip(adapter)) {
4123                 adapter->roce_db.size = 4096;
4124                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4125                                                               db_bar(adapter));
4126                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4127                                                                db_bar(adapter));
4128         }
4129         return 0;
4130 }
4131
4132 static int be_map_pci_bars(struct be_adapter *adapter)
4133 {
4134         u8 __iomem *addr;
4135
4136         if (BEx_chip(adapter) && be_physfn(adapter)) {
4137                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4138                 if (adapter->csr == NULL)
4139                         return -ENOMEM;
4140         }
4141
4142         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4143         if (addr == NULL)
4144                 goto pci_map_err;
4145         adapter->db = addr;
4146
4147         be_roce_map_pci_bars(adapter);
4148         return 0;
4149
4150 pci_map_err:
4151         be_unmap_pci_bars(adapter);
4152         return -ENOMEM;
4153 }
4154
4155 static void be_ctrl_cleanup(struct be_adapter *adapter)
4156 {
4157         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4158
4159         be_unmap_pci_bars(adapter);
4160
4161         if (mem->va)
4162                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4163                                   mem->dma);
4164
4165         mem = &adapter->rx_filter;
4166         if (mem->va)
4167                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4168                                   mem->dma);
4169 }
4170
4171 static int be_ctrl_init(struct be_adapter *adapter)
4172 {
4173         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4174         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4175         struct be_dma_mem *rx_filter = &adapter->rx_filter;
4176         u32 sli_intf;
4177         int status;
4178
4179         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4180         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4181                                  SLI_INTF_FAMILY_SHIFT;
4182         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4183
4184         status = be_map_pci_bars(adapter);
4185         if (status)
4186                 goto done;
4187
4188         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4189         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4190                                                 mbox_mem_alloc->size,
4191                                                 &mbox_mem_alloc->dma,
4192                                                 GFP_KERNEL);
4193         if (!mbox_mem_alloc->va) {
4194                 status = -ENOMEM;
4195                 goto unmap_pci_bars;
4196         }
4197         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4198         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4199         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4200         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4201
4202         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4203         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4204                                             rx_filter->size, &rx_filter->dma,
4205                                             GFP_KERNEL);
4206         if (rx_filter->va == NULL) {
4207                 status = -ENOMEM;
4208                 goto free_mbox;
4209         }
4210
4211         mutex_init(&adapter->mbox_lock);
4212         spin_lock_init(&adapter->mcc_lock);
4213         spin_lock_init(&adapter->mcc_cq_lock);
4214
4215         init_completion(&adapter->et_cmd_compl);
4216         pci_save_state(adapter->pdev);
4217         return 0;
4218
4219 free_mbox:
4220         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4221                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4222
4223 unmap_pci_bars:
4224         be_unmap_pci_bars(adapter);
4225
4226 done:
4227         return status;
4228 }
4229
4230 static void be_stats_cleanup(struct be_adapter *adapter)
4231 {
4232         struct be_dma_mem *cmd = &adapter->stats_cmd;
4233
4234         if (cmd->va)
4235                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4236                                   cmd->va, cmd->dma);
4237 }
4238
4239 static int be_stats_init(struct be_adapter *adapter)
4240 {
4241         struct be_dma_mem *cmd = &adapter->stats_cmd;
4242
4243         if (lancer_chip(adapter))
4244                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4245         else if (BE2_chip(adapter))
4246                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4247         else if (BE3_chip(adapter))
4248                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4249         else
4250                 /* ALL non-BE ASICs */
4251                 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4252
4253         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4254                                       GFP_KERNEL);
4255         if (cmd->va == NULL)
4256                 return -1;
4257         return 0;
4258 }
4259
4260 static void be_remove(struct pci_dev *pdev)
4261 {
4262         struct be_adapter *adapter = pci_get_drvdata(pdev);
4263
4264         if (!adapter)
4265                 return;
4266
4267         be_roce_dev_remove(adapter);
4268         be_intr_set(adapter, false);
4269
4270         cancel_delayed_work_sync(&adapter->func_recovery_work);
4271
4272         unregister_netdev(adapter->netdev);
4273
4274         be_clear(adapter);
4275
4276         /* tell fw we're done with firing cmds */
4277         be_cmd_fw_clean(adapter);
4278
4279         be_stats_cleanup(adapter);
4280
4281         be_ctrl_cleanup(adapter);
4282
4283         pci_disable_pcie_error_reporting(pdev);
4284
4285         pci_release_regions(pdev);
4286         pci_disable_device(pdev);
4287
4288         free_netdev(adapter->netdev);
4289 }
4290
4291 bool be_is_wol_supported(struct be_adapter *adapter)
4292 {
4293         return ((adapter->wol_cap & BE_WOL_CAP) &&
4294                 !be_is_wol_excluded(adapter)) ? true : false;
4295 }
4296
4297 u32 be_get_fw_log_level(struct be_adapter *adapter)
4298 {
4299         struct be_dma_mem extfat_cmd;
4300         struct be_fat_conf_params *cfgs;
4301         int status;
4302         u32 level = 0;
4303         int j;
4304
4305         if (lancer_chip(adapter))
4306                 return 0;
4307
4308         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4309         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4310         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4311                                              &extfat_cmd.dma);
4312
4313         if (!extfat_cmd.va) {
4314                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4315                         __func__);
4316                 goto err;
4317         }
4318
4319         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4320         if (!status) {
4321                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4322                                                 sizeof(struct be_cmd_resp_hdr));
4323                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4324                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4325                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4326                 }
4327         }
4328         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4329                             extfat_cmd.dma);
4330 err:
4331         return level;
4332 }
4333
4334 static int be_get_initial_config(struct be_adapter *adapter)
4335 {
4336         int status;
4337         u32 level;
4338
4339         status = be_cmd_get_cntl_attributes(adapter);
4340         if (status)
4341                 return status;
4342
4343         status = be_cmd_get_acpi_wol_cap(adapter);
4344         if (status) {
4345                 /* in case of a failure to get wol capabillities
4346                  * check the exclusion list to determine WOL capability */
4347                 if (!be_is_wol_excluded(adapter))
4348                         adapter->wol_cap |= BE_WOL_CAP;
4349         }
4350
4351         if (be_is_wol_supported(adapter))
4352                 adapter->wol = true;
4353
4354         /* Must be a power of 2 or else MODULO will BUG_ON */
4355         adapter->be_get_temp_freq = 64;
4356
4357         level = be_get_fw_log_level(adapter);
4358         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4359
4360         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4361         return 0;
4362 }
4363
4364 static int lancer_recover_func(struct be_adapter *adapter)
4365 {
4366         struct device *dev = &adapter->pdev->dev;
4367         int status;
4368
4369         status = lancer_test_and_set_rdy_state(adapter);
4370         if (status)
4371                 goto err;
4372
4373         if (netif_running(adapter->netdev))
4374                 be_close(adapter->netdev);
4375
4376         be_clear(adapter);
4377
4378         be_clear_all_error(adapter);
4379
4380         status = be_setup(adapter);
4381         if (status)
4382                 goto err;
4383
4384         if (netif_running(adapter->netdev)) {
4385                 status = be_open(adapter->netdev);
4386                 if (status)
4387                         goto err;
4388         }
4389
4390         dev_err(dev, "Adapter recovery successful\n");
4391         return 0;
4392 err:
4393         if (status == -EAGAIN)
4394                 dev_err(dev, "Waiting for resource provisioning\n");
4395         else
4396                 dev_err(dev, "Adapter recovery failed\n");
4397
4398         return status;
4399 }
4400
4401 static void be_func_recovery_task(struct work_struct *work)
4402 {
4403         struct be_adapter *adapter =
4404                 container_of(work, struct be_adapter,  func_recovery_work.work);
4405         int status = 0;
4406
4407         be_detect_error(adapter);
4408
4409         if (adapter->hw_error && lancer_chip(adapter)) {
4410
4411                 rtnl_lock();
4412                 netif_device_detach(adapter->netdev);
4413                 rtnl_unlock();
4414
4415                 status = lancer_recover_func(adapter);
4416                 if (!status)
4417                         netif_device_attach(adapter->netdev);
4418         }
4419
4420         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4421          * no need to attempt further recovery.
4422          */
4423         if (!status || status == -EAGAIN)
4424                 schedule_delayed_work(&adapter->func_recovery_work,
4425                                       msecs_to_jiffies(1000));
4426 }
4427
4428 static void be_worker(struct work_struct *work)
4429 {
4430         struct be_adapter *adapter =
4431                 container_of(work, struct be_adapter, work.work);
4432         struct be_rx_obj *rxo;
4433         int i;
4434
4435         /* when interrupts are not yet enabled, just reap any pending
4436         * mcc completions */
4437         if (!netif_running(adapter->netdev)) {
4438                 local_bh_disable();
4439                 be_process_mcc(adapter);
4440                 local_bh_enable();
4441                 goto reschedule;
4442         }
4443
4444         if (!adapter->stats_cmd_sent) {
4445                 if (lancer_chip(adapter))
4446                         lancer_cmd_get_pport_stats(adapter,
4447                                                 &adapter->stats_cmd);
4448                 else
4449                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4450         }
4451
4452         if (be_physfn(adapter) &&
4453             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4454                 be_cmd_get_die_temperature(adapter);
4455
4456         for_all_rx_queues(adapter, rxo, i) {
4457                 /* Replenish RX-queues starved due to memory
4458                  * allocation failures.
4459                  */
4460                 if (rxo->rx_post_starved)
4461                         be_post_rx_frags(rxo, GFP_KERNEL);
4462         }
4463
4464         be_eqd_update(adapter);
4465
4466 reschedule:
4467         adapter->work_counter++;
4468         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4469 }
4470
4471 /* If any VFs are already enabled don't FLR the PF */
4472 static bool be_reset_required(struct be_adapter *adapter)
4473 {
4474         return pci_num_vf(adapter->pdev) ? false : true;
4475 }
4476
4477 static char *mc_name(struct be_adapter *adapter)
4478 {
4479         if (adapter->function_mode & FLEX10_MODE)
4480                 return "FLEX10";
4481         else if (adapter->function_mode & VNIC_MODE)
4482                 return "vNIC";
4483         else if (adapter->function_mode & UMC_ENABLED)
4484                 return "UMC";
4485         else
4486                 return "";
4487 }
4488
4489 static inline char *func_name(struct be_adapter *adapter)
4490 {
4491         return be_physfn(adapter) ? "PF" : "VF";
4492 }
4493
4494 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4495 {
4496         int status = 0;
4497         struct be_adapter *adapter;
4498         struct net_device *netdev;
4499         char port_name;
4500
4501         status = pci_enable_device(pdev);
4502         if (status)
4503                 goto do_none;
4504
4505         status = pci_request_regions(pdev, DRV_NAME);
4506         if (status)
4507                 goto disable_dev;
4508         pci_set_master(pdev);
4509
4510         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4511         if (netdev == NULL) {
4512                 status = -ENOMEM;
4513                 goto rel_reg;
4514         }
4515         adapter = netdev_priv(netdev);
4516         adapter->pdev = pdev;
4517         pci_set_drvdata(pdev, adapter);
4518         adapter->netdev = netdev;
4519         SET_NETDEV_DEV(netdev, &pdev->dev);
4520
4521         status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4522         if (!status) {
4523                 netdev->features |= NETIF_F_HIGHDMA;
4524         } else {
4525                 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4526                 if (status) {
4527                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4528                         goto free_netdev;
4529                 }
4530         }
4531
4532         if (be_physfn(adapter)) {
4533                 status = pci_enable_pcie_error_reporting(pdev);
4534                 if (!status)
4535                         dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4536         }
4537
4538         status = be_ctrl_init(adapter);
4539         if (status)
4540                 goto free_netdev;
4541
4542         /* sync up with fw's ready state */
4543         if (be_physfn(adapter)) {
4544                 status = be_fw_wait_ready(adapter);
4545                 if (status)
4546                         goto ctrl_clean;
4547         }
4548
4549         if (be_reset_required(adapter)) {
4550                 status = be_cmd_reset_function(adapter);
4551                 if (status)
4552                         goto ctrl_clean;
4553
4554                 /* Wait for interrupts to quiesce after an FLR */
4555                 msleep(100);
4556         }
4557
4558         /* Allow interrupts for other ULPs running on NIC function */
4559         be_intr_set(adapter, true);
4560
4561         /* tell fw we're ready to fire cmds */
4562         status = be_cmd_fw_init(adapter);
4563         if (status)
4564                 goto ctrl_clean;
4565
4566         status = be_stats_init(adapter);
4567         if (status)
4568                 goto ctrl_clean;
4569
4570         status = be_get_initial_config(adapter);
4571         if (status)
4572                 goto stats_clean;
4573
4574         INIT_DELAYED_WORK(&adapter->work, be_worker);
4575         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4576         adapter->rx_fc = adapter->tx_fc = true;
4577
4578         status = be_setup(adapter);
4579         if (status)
4580                 goto stats_clean;
4581
4582         be_netdev_init(netdev);
4583         status = register_netdev(netdev);
4584         if (status != 0)
4585                 goto unsetup;
4586
4587         be_roce_dev_add(adapter);
4588
4589         schedule_delayed_work(&adapter->func_recovery_work,
4590                               msecs_to_jiffies(1000));
4591
4592         be_cmd_query_port_name(adapter, &port_name);
4593
4594         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4595                  func_name(adapter), mc_name(adapter), port_name);
4596
4597         return 0;
4598
4599 unsetup:
4600         be_clear(adapter);
4601 stats_clean:
4602         be_stats_cleanup(adapter);
4603 ctrl_clean:
4604         be_ctrl_cleanup(adapter);
4605 free_netdev:
4606         free_netdev(netdev);
4607 rel_reg:
4608         pci_release_regions(pdev);
4609 disable_dev:
4610         pci_disable_device(pdev);
4611 do_none:
4612         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4613         return status;
4614 }
4615
4616 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4617 {
4618         struct be_adapter *adapter = pci_get_drvdata(pdev);
4619         struct net_device *netdev =  adapter->netdev;
4620
4621         if (adapter->wol)
4622                 be_setup_wol(adapter, true);
4623
4624         be_intr_set(adapter, false);
4625         cancel_delayed_work_sync(&adapter->func_recovery_work);
4626
4627         netif_device_detach(netdev);
4628         if (netif_running(netdev)) {
4629                 rtnl_lock();
4630                 be_close(netdev);
4631                 rtnl_unlock();
4632         }
4633         be_clear(adapter);
4634
4635         pci_save_state(pdev);
4636         pci_disable_device(pdev);
4637         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4638         return 0;
4639 }
4640
4641 static int be_resume(struct pci_dev *pdev)
4642 {
4643         int status = 0;
4644         struct be_adapter *adapter = pci_get_drvdata(pdev);
4645         struct net_device *netdev =  adapter->netdev;
4646
4647         netif_device_detach(netdev);
4648
4649         status = pci_enable_device(pdev);
4650         if (status)
4651                 return status;
4652
4653         pci_set_power_state(pdev, PCI_D0);
4654         pci_restore_state(pdev);
4655
4656         status = be_fw_wait_ready(adapter);
4657         if (status)
4658                 return status;
4659
4660         be_intr_set(adapter, true);
4661         /* tell fw we're ready to fire cmds */
4662         status = be_cmd_fw_init(adapter);
4663         if (status)
4664                 return status;
4665
4666         be_setup(adapter);
4667         if (netif_running(netdev)) {
4668                 rtnl_lock();
4669                 be_open(netdev);
4670                 rtnl_unlock();
4671         }
4672
4673         schedule_delayed_work(&adapter->func_recovery_work,
4674                               msecs_to_jiffies(1000));
4675         netif_device_attach(netdev);
4676
4677         if (adapter->wol)
4678                 be_setup_wol(adapter, false);
4679
4680         return 0;
4681 }
4682
4683 /*
4684  * An FLR will stop BE from DMAing any data.
4685  */
4686 static void be_shutdown(struct pci_dev *pdev)
4687 {
4688         struct be_adapter *adapter = pci_get_drvdata(pdev);
4689
4690         if (!adapter)
4691                 return;
4692
4693         cancel_delayed_work_sync(&adapter->work);
4694         cancel_delayed_work_sync(&adapter->func_recovery_work);
4695
4696         netif_device_detach(adapter->netdev);
4697
4698         be_cmd_reset_function(adapter);
4699
4700         pci_disable_device(pdev);
4701 }
4702
4703 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4704                                 pci_channel_state_t state)
4705 {
4706         struct be_adapter *adapter = pci_get_drvdata(pdev);
4707         struct net_device *netdev =  adapter->netdev;
4708
4709         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4710
4711         if (!adapter->eeh_error) {
4712                 adapter->eeh_error = true;
4713
4714                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4715
4716                 rtnl_lock();
4717                 netif_device_detach(netdev);
4718                 if (netif_running(netdev))
4719                         be_close(netdev);
4720                 rtnl_unlock();
4721
4722                 be_clear(adapter);
4723         }
4724
4725         if (state == pci_channel_io_perm_failure)
4726                 return PCI_ERS_RESULT_DISCONNECT;
4727
4728         pci_disable_device(pdev);
4729
4730         /* The error could cause the FW to trigger a flash debug dump.
4731          * Resetting the card while flash dump is in progress
4732          * can cause it not to recover; wait for it to finish.
4733          * Wait only for first function as it is needed only once per
4734          * adapter.
4735          */
4736         if (pdev->devfn == 0)
4737                 ssleep(30);
4738
4739         return PCI_ERS_RESULT_NEED_RESET;
4740 }
4741
4742 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4743 {
4744         struct be_adapter *adapter = pci_get_drvdata(pdev);
4745         int status;
4746
4747         dev_info(&adapter->pdev->dev, "EEH reset\n");
4748
4749         status = pci_enable_device(pdev);
4750         if (status)
4751                 return PCI_ERS_RESULT_DISCONNECT;
4752
4753         pci_set_master(pdev);
4754         pci_set_power_state(pdev, PCI_D0);
4755         pci_restore_state(pdev);
4756
4757         /* Check if card is ok and fw is ready */
4758         dev_info(&adapter->pdev->dev,
4759                  "Waiting for FW to be ready after EEH reset\n");
4760         status = be_fw_wait_ready(adapter);
4761         if (status)
4762                 return PCI_ERS_RESULT_DISCONNECT;
4763
4764         pci_cleanup_aer_uncorrect_error_status(pdev);
4765         be_clear_all_error(adapter);
4766         return PCI_ERS_RESULT_RECOVERED;
4767 }
4768
4769 static void be_eeh_resume(struct pci_dev *pdev)
4770 {
4771         int status = 0;
4772         struct be_adapter *adapter = pci_get_drvdata(pdev);
4773         struct net_device *netdev =  adapter->netdev;
4774
4775         dev_info(&adapter->pdev->dev, "EEH resume\n");
4776
4777         pci_save_state(pdev);
4778
4779         status = be_cmd_reset_function(adapter);
4780         if (status)
4781                 goto err;
4782
4783         /* tell fw we're ready to fire cmds */
4784         status = be_cmd_fw_init(adapter);
4785         if (status)
4786                 goto err;
4787
4788         status = be_setup(adapter);
4789         if (status)
4790                 goto err;
4791
4792         if (netif_running(netdev)) {
4793                 status = be_open(netdev);
4794                 if (status)
4795                         goto err;
4796         }
4797
4798         schedule_delayed_work(&adapter->func_recovery_work,
4799                               msecs_to_jiffies(1000));
4800         netif_device_attach(netdev);
4801         return;
4802 err:
4803         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4804 }
4805
4806 static const struct pci_error_handlers be_eeh_handlers = {
4807         .error_detected = be_eeh_err_detected,
4808         .slot_reset = be_eeh_reset,
4809         .resume = be_eeh_resume,
4810 };
4811
4812 static struct pci_driver be_driver = {
4813         .name = DRV_NAME,
4814         .id_table = be_dev_ids,
4815         .probe = be_probe,
4816         .remove = be_remove,
4817         .suspend = be_suspend,
4818         .resume = be_resume,
4819         .shutdown = be_shutdown,
4820         .err_handler = &be_eeh_handlers
4821 };
4822
4823 static int __init be_init_module(void)
4824 {
4825         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4826             rx_frag_size != 2048) {
4827                 printk(KERN_WARNING DRV_NAME
4828                         " : Module param rx_frag_size must be 2048/4096/8192."
4829                         " Using 2048\n");
4830                 rx_frag_size = 2048;
4831         }
4832
4833         return pci_register_driver(&be_driver);
4834 }
4835 module_init(be_init_module);
4836
4837 static void __exit be_exit_module(void)
4838 {
4839         pci_unregister_driver(&be_driver);
4840 }
4841 module_exit(be_exit_module);