Merge remote-tracking branches 'asoc/fix/atmel', 'asoc/fix/fsl', 'asoc/fix/tegra...
[linux-drm-fsl-dcu.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26
27 MODULE_VERSION(DRV_VER);
28 MODULE_DEVICE_TABLE(pci, be_dev_ids);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
32
33 static unsigned int num_vfs;
34 module_param(num_vfs, uint, S_IRUGO);
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static ushort rx_frag_size = 2048;
38 module_param(rx_frag_size, ushort, S_IRUGO);
39 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
41 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
42         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
44         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
49         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
50         { 0 }
51 };
52 MODULE_DEVICE_TABLE(pci, be_dev_ids);
53 /* UE Status Low CSR */
54 static const char * const ue_status_low_desc[] = {
55         "CEV",
56         "CTX",
57         "DBUF",
58         "ERX",
59         "Host",
60         "MPU",
61         "NDMA",
62         "PTC ",
63         "RDMA ",
64         "RXF ",
65         "RXIPS ",
66         "RXULP0 ",
67         "RXULP1 ",
68         "RXULP2 ",
69         "TIM ",
70         "TPOST ",
71         "TPRE ",
72         "TXIPS ",
73         "TXULP0 ",
74         "TXULP1 ",
75         "UC ",
76         "WDMA ",
77         "TXULP2 ",
78         "HOST1 ",
79         "P0_OB_LINK ",
80         "P1_OB_LINK ",
81         "HOST_GPIO ",
82         "MBOX ",
83         "AXGMAC0",
84         "AXGMAC1",
85         "JTAG",
86         "MPU_INTPEND"
87 };
88 /* UE Status High CSR */
89 static const char * const ue_status_hi_desc[] = {
90         "LPCMEMHOST",
91         "MGMT_MAC",
92         "PCS0ONLINE",
93         "MPU_IRAM",
94         "PCS1ONLINE",
95         "PCTL0",
96         "PCTL1",
97         "PMEM",
98         "RR",
99         "TXPB",
100         "RXPP",
101         "XAUI",
102         "TXP",
103         "ARM",
104         "IPC",
105         "HOST2",
106         "HOST3",
107         "HOST4",
108         "HOST5",
109         "HOST6",
110         "HOST7",
111         "HOST8",
112         "HOST9",
113         "NETC",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown",
121         "Unknown"
122 };
123
124 /* Is BE in a multi-channel mode */
125 static inline bool be_is_mc(struct be_adapter *adapter) {
126         return (adapter->function_mode & FLEX10_MODE ||
127                 adapter->function_mode & VNIC_MODE ||
128                 adapter->function_mode & UMC_ENABLED);
129 }
130
131 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
132 {
133         struct be_dma_mem *mem = &q->dma_mem;
134         if (mem->va) {
135                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
136                                   mem->dma);
137                 mem->va = NULL;
138         }
139 }
140
141 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
142                 u16 len, u16 entry_size)
143 {
144         struct be_dma_mem *mem = &q->dma_mem;
145
146         memset(q, 0, sizeof(*q));
147         q->len = len;
148         q->entry_size = entry_size;
149         mem->size = len * entry_size;
150         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
151                                       GFP_KERNEL);
152         if (!mem->va)
153                 return -ENOMEM;
154         return 0;
155 }
156
157 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
158 {
159         u32 reg, enabled;
160
161         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162                                 &reg);
163         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165         if (!enabled && enable)
166                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else if (enabled && !enable)
168                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else
170                 return;
171
172         pci_write_config_dword(adapter->pdev,
173                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175
176 static void be_intr_set(struct be_adapter *adapter, bool enable)
177 {
178         int status = 0;
179
180         /* On lancer interrupts can't be controlled via this register */
181         if (lancer_chip(adapter))
182                 return;
183
184         if (adapter->eeh_error)
185                 return;
186
187         status = be_cmd_intr_set(adapter, enable);
188         if (status)
189                 be_reg_intr_set(adapter, enable);
190 }
191
192 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
193 {
194         u32 val = 0;
195         val |= qid & DB_RQ_RING_ID_MASK;
196         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
197
198         wmb();
199         iowrite32(val, adapter->db + DB_RQ_OFFSET);
200 }
201
202 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
203                           u16 posted)
204 {
205         u32 val = 0;
206         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
207         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
208
209         wmb();
210         iowrite32(val, adapter->db + txo->db_offset);
211 }
212
213 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
214                 bool arm, bool clear_int, u16 num_popped)
215 {
216         u32 val = 0;
217         val |= qid & DB_EQ_RING_ID_MASK;
218         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
219                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
220
221         if (adapter->eeh_error)
222                 return;
223
224         if (arm)
225                 val |= 1 << DB_EQ_REARM_SHIFT;
226         if (clear_int)
227                 val |= 1 << DB_EQ_CLR_SHIFT;
228         val |= 1 << DB_EQ_EVNT_SHIFT;
229         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
230         iowrite32(val, adapter->db + DB_EQ_OFFSET);
231 }
232
233 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
234 {
235         u32 val = 0;
236         val |= qid & DB_CQ_RING_ID_MASK;
237         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
238                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
239
240         if (adapter->eeh_error)
241                 return;
242
243         if (arm)
244                 val |= 1 << DB_CQ_REARM_SHIFT;
245         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
246         iowrite32(val, adapter->db + DB_CQ_OFFSET);
247 }
248
249 static int be_mac_addr_set(struct net_device *netdev, void *p)
250 {
251         struct be_adapter *adapter = netdev_priv(netdev);
252         struct device *dev = &adapter->pdev->dev;
253         struct sockaddr *addr = p;
254         int status;
255         u8 mac[ETH_ALEN];
256         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
257
258         if (!is_valid_ether_addr(addr->sa_data))
259                 return -EADDRNOTAVAIL;
260
261         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262          * privilege or if PF did not provision the new MAC address.
263          * On BE3, this cmd will always fail if the VF doesn't have the
264          * FILTMGMT privilege. This failure is OK, only if the PF programmed
265          * the MAC for the VF.
266          */
267         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268                                  adapter->if_handle, &adapter->pmac_id[0], 0);
269         if (!status) {
270                 curr_pmac_id = adapter->pmac_id[0];
271
272                 /* Delete the old programmed MAC. This call may fail if the
273                  * old MAC was already deleted by the PF driver.
274                  */
275                 if (adapter->pmac_id[0] != old_pmac_id)
276                         be_cmd_pmac_del(adapter, adapter->if_handle,
277                                         old_pmac_id, 0);
278         }
279
280         /* Decide if the new MAC is successfully activated only after
281          * querying the FW
282          */
283         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
284         if (status)
285                 goto err;
286
287         /* The MAC change did not happen, either due to lack of privilege
288          * or PF didn't pre-provision.
289          */
290         if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
291                 status = -EPERM;
292                 goto err;
293         }
294
295         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
296         dev_info(dev, "MAC address changed to %pM\n", mac);
297         return 0;
298 err:
299         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
300         return status;
301 }
302
303 /* BE2 supports only v0 cmd */
304 static void *hw_stats_from_cmd(struct be_adapter *adapter)
305 {
306         if (BE2_chip(adapter)) {
307                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
308
309                 return &cmd->hw_stats;
310         } else if (BE3_chip(adapter)) {
311                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
312
313                 return &cmd->hw_stats;
314         } else {
315                 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
316
317                 return &cmd->hw_stats;
318         }
319 }
320
321 /* BE2 supports only v0 cmd */
322 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
323 {
324         if (BE2_chip(adapter)) {
325                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
326
327                 return &hw_stats->erx;
328         } else if (BE3_chip(adapter)) {
329                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
330
331                 return &hw_stats->erx;
332         } else {
333                 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
334
335                 return &hw_stats->erx;
336         }
337 }
338
339 static void populate_be_v0_stats(struct be_adapter *adapter)
340 {
341         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
342         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
343         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
344         struct be_port_rxf_stats_v0 *port_stats =
345                                         &rxf_stats->port[adapter->port_num];
346         struct be_drv_stats *drvs = &adapter->drv_stats;
347
348         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
349         drvs->rx_pause_frames = port_stats->rx_pause_frames;
350         drvs->rx_crc_errors = port_stats->rx_crc_errors;
351         drvs->rx_control_frames = port_stats->rx_control_frames;
352         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
353         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
354         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
355         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
356         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
357         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
358         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
359         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
360         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
361         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
362         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
363         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
364         drvs->rx_dropped_header_too_small =
365                 port_stats->rx_dropped_header_too_small;
366         drvs->rx_address_filtered =
367                                         port_stats->rx_address_filtered +
368                                         port_stats->rx_vlan_filtered;
369         drvs->rx_alignment_symbol_errors =
370                 port_stats->rx_alignment_symbol_errors;
371
372         drvs->tx_pauseframes = port_stats->tx_pauseframes;
373         drvs->tx_controlframes = port_stats->tx_controlframes;
374
375         if (adapter->port_num)
376                 drvs->jabber_events = rxf_stats->port1_jabber_events;
377         else
378                 drvs->jabber_events = rxf_stats->port0_jabber_events;
379         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
380         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
381         drvs->forwarded_packets = rxf_stats->forwarded_packets;
382         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
383         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
384         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
385         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
386 }
387
388 static void populate_be_v1_stats(struct be_adapter *adapter)
389 {
390         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
391         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
392         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
393         struct be_port_rxf_stats_v1 *port_stats =
394                                         &rxf_stats->port[adapter->port_num];
395         struct be_drv_stats *drvs = &adapter->drv_stats;
396
397         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
398         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
399         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
400         drvs->rx_pause_frames = port_stats->rx_pause_frames;
401         drvs->rx_crc_errors = port_stats->rx_crc_errors;
402         drvs->rx_control_frames = port_stats->rx_control_frames;
403         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
404         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
405         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
406         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
407         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
408         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
409         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
410         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
411         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
412         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
413         drvs->rx_dropped_header_too_small =
414                 port_stats->rx_dropped_header_too_small;
415         drvs->rx_input_fifo_overflow_drop =
416                 port_stats->rx_input_fifo_overflow_drop;
417         drvs->rx_address_filtered = port_stats->rx_address_filtered;
418         drvs->rx_alignment_symbol_errors =
419                 port_stats->rx_alignment_symbol_errors;
420         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
421         drvs->tx_pauseframes = port_stats->tx_pauseframes;
422         drvs->tx_controlframes = port_stats->tx_controlframes;
423         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
424         drvs->jabber_events = port_stats->jabber_events;
425         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
426         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
427         drvs->forwarded_packets = rxf_stats->forwarded_packets;
428         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
429         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
430         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
431         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
432 }
433
434 static void populate_be_v2_stats(struct be_adapter *adapter)
435 {
436         struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
437         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
438         struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
439         struct be_port_rxf_stats_v2 *port_stats =
440                                         &rxf_stats->port[adapter->port_num];
441         struct be_drv_stats *drvs = &adapter->drv_stats;
442
443         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
444         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
445         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
446         drvs->rx_pause_frames = port_stats->rx_pause_frames;
447         drvs->rx_crc_errors = port_stats->rx_crc_errors;
448         drvs->rx_control_frames = port_stats->rx_control_frames;
449         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
450         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
451         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
452         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
453         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
454         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
455         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
456         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
457         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
458         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
459         drvs->rx_dropped_header_too_small =
460                 port_stats->rx_dropped_header_too_small;
461         drvs->rx_input_fifo_overflow_drop =
462                 port_stats->rx_input_fifo_overflow_drop;
463         drvs->rx_address_filtered = port_stats->rx_address_filtered;
464         drvs->rx_alignment_symbol_errors =
465                 port_stats->rx_alignment_symbol_errors;
466         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
467         drvs->tx_pauseframes = port_stats->tx_pauseframes;
468         drvs->tx_controlframes = port_stats->tx_controlframes;
469         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
470         drvs->jabber_events = port_stats->jabber_events;
471         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
472         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
473         drvs->forwarded_packets = rxf_stats->forwarded_packets;
474         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
475         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
476         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
477         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
478         if (be_roce_supported(adapter))  {
479                 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
480                 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
481                 drvs->rx_roce_frames = port_stats->roce_frames_received;
482                 drvs->roce_drops_crc = port_stats->roce_drops_crc;
483                 drvs->roce_drops_payload_len =
484                         port_stats->roce_drops_payload_len;
485         }
486 }
487
488 static void populate_lancer_stats(struct be_adapter *adapter)
489 {
490
491         struct be_drv_stats *drvs = &adapter->drv_stats;
492         struct lancer_pport_stats *pport_stats =
493                                         pport_stats_from_cmd(adapter);
494
495         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
499         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
500         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
501         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505         drvs->rx_dropped_tcp_length =
506                                 pport_stats->rx_dropped_invalid_tcp_length;
507         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510         drvs->rx_dropped_header_too_small =
511                                 pport_stats->rx_dropped_header_too_small;
512         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
513         drvs->rx_address_filtered =
514                                         pport_stats->rx_address_filtered +
515                                         pport_stats->rx_vlan_filtered;
516         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
517         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
518         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
520         drvs->jabber_events = pport_stats->rx_jabbers;
521         drvs->forwarded_packets = pport_stats->num_forwards_lo;
522         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
523         drvs->rx_drops_too_many_frags =
524                                 pport_stats->rx_drops_too_many_frags_lo;
525 }
526
527 static void accumulate_16bit_val(u32 *acc, u16 val)
528 {
529 #define lo(x)                   (x & 0xFFFF)
530 #define hi(x)                   (x & 0xFFFF0000)
531         bool wrapped = val < lo(*acc);
532         u32 newacc = hi(*acc) + val;
533
534         if (wrapped)
535                 newacc += 65536;
536         ACCESS_ONCE(*acc) = newacc;
537 }
538
539 static void populate_erx_stats(struct be_adapter *adapter,
540                         struct be_rx_obj *rxo,
541                         u32 erx_stat)
542 {
543         if (!BEx_chip(adapter))
544                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
545         else
546                 /* below erx HW counter can actually wrap around after
547                  * 65535. Driver accumulates a 32-bit value
548                  */
549                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
550                                      (u16)erx_stat);
551 }
552
553 void be_parse_stats(struct be_adapter *adapter)
554 {
555         struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
556         struct be_rx_obj *rxo;
557         int i;
558         u32 erx_stat;
559
560         if (lancer_chip(adapter)) {
561                 populate_lancer_stats(adapter);
562         } else {
563                 if (BE2_chip(adapter))
564                         populate_be_v0_stats(adapter);
565                 else if (BE3_chip(adapter))
566                         /* for BE3 */
567                         populate_be_v1_stats(adapter);
568                 else
569                         populate_be_v2_stats(adapter);
570
571                 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
572                 for_all_rx_queues(adapter, rxo, i) {
573                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
574                         populate_erx_stats(adapter, rxo, erx_stat);
575                 }
576         }
577 }
578
579 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
580                                         struct rtnl_link_stats64 *stats)
581 {
582         struct be_adapter *adapter = netdev_priv(netdev);
583         struct be_drv_stats *drvs = &adapter->drv_stats;
584         struct be_rx_obj *rxo;
585         struct be_tx_obj *txo;
586         u64 pkts, bytes;
587         unsigned int start;
588         int i;
589
590         for_all_rx_queues(adapter, rxo, i) {
591                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
592                 do {
593                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
594                         pkts = rx_stats(rxo)->rx_pkts;
595                         bytes = rx_stats(rxo)->rx_bytes;
596                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
597                 stats->rx_packets += pkts;
598                 stats->rx_bytes += bytes;
599                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
600                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
601                                         rx_stats(rxo)->rx_drops_no_frags;
602         }
603
604         for_all_tx_queues(adapter, txo, i) {
605                 const struct be_tx_stats *tx_stats = tx_stats(txo);
606                 do {
607                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
608                         pkts = tx_stats(txo)->tx_pkts;
609                         bytes = tx_stats(txo)->tx_bytes;
610                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
611                 stats->tx_packets += pkts;
612                 stats->tx_bytes += bytes;
613         }
614
615         /* bad pkts received */
616         stats->rx_errors = drvs->rx_crc_errors +
617                 drvs->rx_alignment_symbol_errors +
618                 drvs->rx_in_range_errors +
619                 drvs->rx_out_range_errors +
620                 drvs->rx_frame_too_long +
621                 drvs->rx_dropped_too_small +
622                 drvs->rx_dropped_too_short +
623                 drvs->rx_dropped_header_too_small +
624                 drvs->rx_dropped_tcp_length +
625                 drvs->rx_dropped_runt;
626
627         /* detailed rx errors */
628         stats->rx_length_errors = drvs->rx_in_range_errors +
629                 drvs->rx_out_range_errors +
630                 drvs->rx_frame_too_long;
631
632         stats->rx_crc_errors = drvs->rx_crc_errors;
633
634         /* frame alignment errors */
635         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
636
637         /* receiver fifo overrun */
638         /* drops_no_pbuf is no per i/f, it's per BE card */
639         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
640                                 drvs->rx_input_fifo_overflow_drop +
641                                 drvs->rx_drops_no_pbuf;
642         return stats;
643 }
644
645 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
646 {
647         struct net_device *netdev = adapter->netdev;
648
649         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
650                 netif_carrier_off(netdev);
651                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
652         }
653
654         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
655                 netif_carrier_on(netdev);
656         else
657                 netif_carrier_off(netdev);
658 }
659
660 static void be_tx_stats_update(struct be_tx_obj *txo,
661                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
662 {
663         struct be_tx_stats *stats = tx_stats(txo);
664
665         u64_stats_update_begin(&stats->sync);
666         stats->tx_reqs++;
667         stats->tx_wrbs += wrb_cnt;
668         stats->tx_bytes += copied;
669         stats->tx_pkts += (gso_segs ? gso_segs : 1);
670         if (stopped)
671                 stats->tx_stops++;
672         u64_stats_update_end(&stats->sync);
673 }
674
675 /* Determine number of WRB entries needed to xmit data in an skb */
676 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
677                                                                 bool *dummy)
678 {
679         int cnt = (skb->len > skb->data_len);
680
681         cnt += skb_shinfo(skb)->nr_frags;
682
683         /* to account for hdr wrb */
684         cnt++;
685         if (lancer_chip(adapter) || !(cnt & 1)) {
686                 *dummy = false;
687         } else {
688                 /* add a dummy to make it an even num */
689                 cnt++;
690                 *dummy = true;
691         }
692         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693         return cnt;
694 }
695
696 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697 {
698         wrb->frag_pa_hi = upper_32_bits(addr);
699         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
701         wrb->rsvd0 = 0;
702 }
703
704 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
705                                         struct sk_buff *skb)
706 {
707         u8 vlan_prio;
708         u16 vlan_tag;
709
710         vlan_tag = vlan_tx_tag_get(skb);
711         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712         /* If vlan priority provided by OS is NOT in available bmap */
713         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715                                 adapter->recommended_prio;
716
717         return vlan_tag;
718 }
719
720 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
721                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
722 {
723         u16 vlan_tag;
724
725         memset(hdr, 0, sizeof(*hdr));
726
727         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
728
729         if (skb_is_gso(skb)) {
730                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
731                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
732                         hdr, skb_shinfo(skb)->gso_size);
733                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
734                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
735         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
736                 if (is_tcp_pkt(skb))
737                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
738                 else if (is_udp_pkt(skb))
739                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
740         }
741
742         if (vlan_tx_tag_present(skb)) {
743                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
744                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
745                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
746         }
747
748         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
749         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
750         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
751         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
752         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
753 }
754
755 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
756                 bool unmap_single)
757 {
758         dma_addr_t dma;
759
760         be_dws_le_to_cpu(wrb, sizeof(*wrb));
761
762         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
763         if (wrb->frag_len) {
764                 if (unmap_single)
765                         dma_unmap_single(dev, dma, wrb->frag_len,
766                                          DMA_TO_DEVICE);
767                 else
768                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
769         }
770 }
771
772 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
773                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
774                 bool skip_hw_vlan)
775 {
776         dma_addr_t busaddr;
777         int i, copied = 0;
778         struct device *dev = &adapter->pdev->dev;
779         struct sk_buff *first_skb = skb;
780         struct be_eth_wrb *wrb;
781         struct be_eth_hdr_wrb *hdr;
782         bool map_single = false;
783         u16 map_head;
784
785         hdr = queue_head_node(txq);
786         queue_head_inc(txq);
787         map_head = txq->head;
788
789         if (skb->len > skb->data_len) {
790                 int len = skb_headlen(skb);
791                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
792                 if (dma_mapping_error(dev, busaddr))
793                         goto dma_err;
794                 map_single = true;
795                 wrb = queue_head_node(txq);
796                 wrb_fill(wrb, busaddr, len);
797                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
798                 queue_head_inc(txq);
799                 copied += len;
800         }
801
802         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
803                 const struct skb_frag_struct *frag =
804                         &skb_shinfo(skb)->frags[i];
805                 busaddr = skb_frag_dma_map(dev, frag, 0,
806                                            skb_frag_size(frag), DMA_TO_DEVICE);
807                 if (dma_mapping_error(dev, busaddr))
808                         goto dma_err;
809                 wrb = queue_head_node(txq);
810                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
811                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
812                 queue_head_inc(txq);
813                 copied += skb_frag_size(frag);
814         }
815
816         if (dummy_wrb) {
817                 wrb = queue_head_node(txq);
818                 wrb_fill(wrb, 0, 0);
819                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
820                 queue_head_inc(txq);
821         }
822
823         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
824         be_dws_cpu_to_le(hdr, sizeof(*hdr));
825
826         return copied;
827 dma_err:
828         txq->head = map_head;
829         while (copied) {
830                 wrb = queue_head_node(txq);
831                 unmap_tx_frag(dev, wrb, map_single);
832                 map_single = false;
833                 copied -= wrb->frag_len;
834                 queue_head_inc(txq);
835         }
836         return 0;
837 }
838
839 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
840                                              struct sk_buff *skb,
841                                              bool *skip_hw_vlan)
842 {
843         u16 vlan_tag = 0;
844
845         skb = skb_share_check(skb, GFP_ATOMIC);
846         if (unlikely(!skb))
847                 return skb;
848
849         if (vlan_tx_tag_present(skb))
850                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
851
852         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
853                 if (!vlan_tag)
854                         vlan_tag = adapter->pvid;
855                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
856                  * skip VLAN insertion
857                  */
858                 if (skip_hw_vlan)
859                         *skip_hw_vlan = true;
860         }
861
862         if (vlan_tag) {
863                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
864                 if (unlikely(!skb))
865                         return skb;
866                 skb->vlan_tci = 0;
867         }
868
869         /* Insert the outer VLAN, if any */
870         if (adapter->qnq_vid) {
871                 vlan_tag = adapter->qnq_vid;
872                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
873                 if (unlikely(!skb))
874                         return skb;
875                 if (skip_hw_vlan)
876                         *skip_hw_vlan = true;
877         }
878
879         return skb;
880 }
881
882 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
883 {
884         struct ethhdr *eh = (struct ethhdr *)skb->data;
885         u16 offset = ETH_HLEN;
886
887         if (eh->h_proto == htons(ETH_P_IPV6)) {
888                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
889
890                 offset += sizeof(struct ipv6hdr);
891                 if (ip6h->nexthdr != NEXTHDR_TCP &&
892                     ip6h->nexthdr != NEXTHDR_UDP) {
893                         struct ipv6_opt_hdr *ehdr =
894                                 (struct ipv6_opt_hdr *) (skb->data + offset);
895
896                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
897                         if (ehdr->hdrlen == 0xff)
898                                 return true;
899                 }
900         }
901         return false;
902 }
903
904 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
905 {
906         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
907 }
908
909 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
910                                 struct sk_buff *skb)
911 {
912         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
913 }
914
915 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
916                                            struct sk_buff *skb,
917                                            bool *skip_hw_vlan)
918 {
919         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
920         unsigned int eth_hdr_len;
921         struct iphdr *ip;
922
923         /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
924          * may cause a transmit stall on that port. So the work-around is to
925          * pad short packets (<= 32 bytes) to a 36-byte length.
926          */
927         if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
928                 if (skb_padto(skb, 36))
929                         goto tx_drop;
930                 skb->len = 36;
931         }
932
933         /* For padded packets, BE HW modifies tot_len field in IP header
934          * incorrecly when VLAN tag is inserted by HW.
935          * For padded packets, Lancer computes incorrect checksum.
936          */
937         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
938                                                 VLAN_ETH_HLEN : ETH_HLEN;
939         if (skb->len <= 60 &&
940             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
941             is_ipv4_pkt(skb)) {
942                 ip = (struct iphdr *)ip_hdr(skb);
943                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
944         }
945
946         /* If vlan tag is already inlined in the packet, skip HW VLAN
947          * tagging in UMC mode
948          */
949         if ((adapter->function_mode & UMC_ENABLED) &&
950             veh->h_vlan_proto == htons(ETH_P_8021Q))
951                         *skip_hw_vlan = true;
952
953         /* HW has a bug wherein it will calculate CSUM for VLAN
954          * pkts even though it is disabled.
955          * Manually insert VLAN in pkt.
956          */
957         if (skb->ip_summed != CHECKSUM_PARTIAL &&
958             vlan_tx_tag_present(skb)) {
959                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
960                 if (unlikely(!skb))
961                         goto tx_drop;
962         }
963
964         /* HW may lockup when VLAN HW tagging is requested on
965          * certain ipv6 packets. Drop such pkts if the HW workaround to
966          * skip HW tagging is not enabled by FW.
967          */
968         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
969             (adapter->pvid || adapter->qnq_vid) &&
970             !qnq_async_evt_rcvd(adapter)))
971                 goto tx_drop;
972
973         /* Manual VLAN tag insertion to prevent:
974          * ASIC lockup when the ASIC inserts VLAN tag into
975          * certain ipv6 packets. Insert VLAN tags in driver,
976          * and set event, completion, vlan bits accordingly
977          * in the Tx WRB.
978          */
979         if (be_ipv6_tx_stall_chk(adapter, skb) &&
980             be_vlan_tag_tx_chk(adapter, skb)) {
981                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
982                 if (unlikely(!skb))
983                         goto tx_drop;
984         }
985
986         return skb;
987 tx_drop:
988         dev_kfree_skb_any(skb);
989         return NULL;
990 }
991
992 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
993 {
994         struct be_adapter *adapter = netdev_priv(netdev);
995         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
996         struct be_queue_info *txq = &txo->q;
997         bool dummy_wrb, stopped = false;
998         u32 wrb_cnt = 0, copied = 0;
999         bool skip_hw_vlan = false;
1000         u32 start = txq->head;
1001
1002         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1003         if (!skb) {
1004                 tx_stats(txo)->tx_drv_drops++;
1005                 return NETDEV_TX_OK;
1006         }
1007
1008         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1009
1010         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1011                               skip_hw_vlan);
1012         if (copied) {
1013                 int gso_segs = skb_shinfo(skb)->gso_segs;
1014
1015                 /* record the sent skb in the sent_skb table */
1016                 BUG_ON(txo->sent_skb_list[start]);
1017                 txo->sent_skb_list[start] = skb;
1018
1019                 /* Ensure txq has space for the next skb; Else stop the queue
1020                  * *BEFORE* ringing the tx doorbell, so that we serialze the
1021                  * tx compls of the current transmit which'll wake up the queue
1022                  */
1023                 atomic_add(wrb_cnt, &txq->used);
1024                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1025                                                                 txq->len) {
1026                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1027                         stopped = true;
1028                 }
1029
1030                 be_txq_notify(adapter, txo, wrb_cnt);
1031
1032                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1033         } else {
1034                 txq->head = start;
1035                 tx_stats(txo)->tx_drv_drops++;
1036                 dev_kfree_skb_any(skb);
1037         }
1038         return NETDEV_TX_OK;
1039 }
1040
1041 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1042 {
1043         struct be_adapter *adapter = netdev_priv(netdev);
1044         if (new_mtu < BE_MIN_MTU ||
1045                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1046                                         (ETH_HLEN + ETH_FCS_LEN))) {
1047                 dev_info(&adapter->pdev->dev,
1048                         "MTU must be between %d and %d bytes\n",
1049                         BE_MIN_MTU,
1050                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1051                 return -EINVAL;
1052         }
1053         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1054                         netdev->mtu, new_mtu);
1055         netdev->mtu = new_mtu;
1056         return 0;
1057 }
1058
1059 /*
1060  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1061  * If the user configures more, place BE in vlan promiscuous mode.
1062  */
1063 static int be_vid_config(struct be_adapter *adapter)
1064 {
1065         u16 vids[BE_NUM_VLANS_SUPPORTED];
1066         u16 num = 0, i;
1067         int status = 0;
1068
1069         /* No need to further configure vids if in promiscuous mode */
1070         if (adapter->promiscuous)
1071                 return 0;
1072
1073         if (adapter->vlans_added > be_max_vlans(adapter))
1074                 goto set_vlan_promisc;
1075
1076         /* Construct VLAN Table to give to HW */
1077         for (i = 0; i < VLAN_N_VID; i++)
1078                 if (adapter->vlan_tag[i])
1079                         vids[num++] = cpu_to_le16(i);
1080
1081         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1082                                     vids, num, 0);
1083
1084         if (status) {
1085                 /* Set to VLAN promisc mode as setting VLAN filter failed */
1086                 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1087                         goto set_vlan_promisc;
1088                 dev_err(&adapter->pdev->dev,
1089                         "Setting HW VLAN filtering failed.\n");
1090         } else {
1091                 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1092                         /* hw VLAN filtering re-enabled. */
1093                         status = be_cmd_rx_filter(adapter,
1094                                                   BE_FLAGS_VLAN_PROMISC, OFF);
1095                         if (!status) {
1096                                 dev_info(&adapter->pdev->dev,
1097                                          "Disabling VLAN Promiscuous mode.\n");
1098                                 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1099                                 dev_info(&adapter->pdev->dev,
1100                                          "Re-Enabling HW VLAN filtering\n");
1101                         }
1102                 }
1103         }
1104
1105         return status;
1106
1107 set_vlan_promisc:
1108         dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1109
1110         status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1111         if (!status) {
1112                 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1113                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1114                 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1115         } else
1116                 dev_err(&adapter->pdev->dev,
1117                         "Failed to enable VLAN Promiscuous mode.\n");
1118         return status;
1119 }
1120
1121 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1122 {
1123         struct be_adapter *adapter = netdev_priv(netdev);
1124         int status = 0;
1125
1126
1127         /* Packets with VID 0 are always received by Lancer by default */
1128         if (lancer_chip(adapter) && vid == 0)
1129                 goto ret;
1130
1131         adapter->vlan_tag[vid] = 1;
1132         if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
1133                 status = be_vid_config(adapter);
1134
1135         if (!status)
1136                 adapter->vlans_added++;
1137         else
1138                 adapter->vlan_tag[vid] = 0;
1139 ret:
1140         return status;
1141 }
1142
1143 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1144 {
1145         struct be_adapter *adapter = netdev_priv(netdev);
1146         int status = 0;
1147
1148         /* Packets with VID 0 are always received by Lancer by default */
1149         if (lancer_chip(adapter) && vid == 0)
1150                 goto ret;
1151
1152         adapter->vlan_tag[vid] = 0;
1153         if (adapter->vlans_added <= be_max_vlans(adapter))
1154                 status = be_vid_config(adapter);
1155
1156         if (!status)
1157                 adapter->vlans_added--;
1158         else
1159                 adapter->vlan_tag[vid] = 1;
1160 ret:
1161         return status;
1162 }
1163
1164 static void be_set_rx_mode(struct net_device *netdev)
1165 {
1166         struct be_adapter *adapter = netdev_priv(netdev);
1167         int status;
1168
1169         if (netdev->flags & IFF_PROMISC) {
1170                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1171                 adapter->promiscuous = true;
1172                 goto done;
1173         }
1174
1175         /* BE was previously in promiscuous mode; disable it */
1176         if (adapter->promiscuous) {
1177                 adapter->promiscuous = false;
1178                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1179
1180                 if (adapter->vlans_added)
1181                         be_vid_config(adapter);
1182         }
1183
1184         /* Enable multicast promisc if num configured exceeds what we support */
1185         if (netdev->flags & IFF_ALLMULTI ||
1186             netdev_mc_count(netdev) > be_max_mc(adapter)) {
1187                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1188                 goto done;
1189         }
1190
1191         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1192                 struct netdev_hw_addr *ha;
1193                 int i = 1; /* First slot is claimed by the Primary MAC */
1194
1195                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1196                         be_cmd_pmac_del(adapter, adapter->if_handle,
1197                                         adapter->pmac_id[i], 0);
1198                 }
1199
1200                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1201                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1202                         adapter->promiscuous = true;
1203                         goto done;
1204                 }
1205
1206                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1207                         adapter->uc_macs++; /* First slot is for Primary MAC */
1208                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1209                                         adapter->if_handle,
1210                                         &adapter->pmac_id[adapter->uc_macs], 0);
1211                 }
1212         }
1213
1214         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1215
1216         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1217         if (status) {
1218                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1219                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1220                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1221         }
1222 done:
1223         return;
1224 }
1225
1226 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1227 {
1228         struct be_adapter *adapter = netdev_priv(netdev);
1229         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1230         int status;
1231
1232         if (!sriov_enabled(adapter))
1233                 return -EPERM;
1234
1235         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1236                 return -EINVAL;
1237
1238         if (BEx_chip(adapter)) {
1239                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1240                                 vf + 1);
1241
1242                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1243                                          &vf_cfg->pmac_id, vf + 1);
1244         } else {
1245                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1246                                         vf + 1);
1247         }
1248
1249         if (status)
1250                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1251                                 mac, vf);
1252         else
1253                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1254
1255         return status;
1256 }
1257
1258 static int be_get_vf_config(struct net_device *netdev, int vf,
1259                         struct ifla_vf_info *vi)
1260 {
1261         struct be_adapter *adapter = netdev_priv(netdev);
1262         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1263
1264         if (!sriov_enabled(adapter))
1265                 return -EPERM;
1266
1267         if (vf >= adapter->num_vfs)
1268                 return -EINVAL;
1269
1270         vi->vf = vf;
1271         vi->tx_rate = vf_cfg->tx_rate;
1272         vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1273         vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1274         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1275
1276         return 0;
1277 }
1278
1279 static int be_set_vf_vlan(struct net_device *netdev,
1280                         int vf, u16 vlan, u8 qos)
1281 {
1282         struct be_adapter *adapter = netdev_priv(netdev);
1283         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1284         int status = 0;
1285
1286         if (!sriov_enabled(adapter))
1287                 return -EPERM;
1288
1289         if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1290                 return -EINVAL;
1291
1292         if (vlan || qos) {
1293                 vlan |= qos << VLAN_PRIO_SHIFT;
1294                 if (vf_cfg->vlan_tag != vlan) {
1295                         /* If this is new value, program it. Else skip. */
1296                         vf_cfg->vlan_tag = vlan;
1297                         status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1298                                                        vf_cfg->if_handle, 0);
1299                 }
1300         } else {
1301                 /* Reset Transparent Vlan Tagging. */
1302                 vf_cfg->vlan_tag = 0;
1303                 vlan = vf_cfg->def_vid;
1304                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1305                                                vf_cfg->if_handle, 0);
1306         }
1307
1308
1309         if (status)
1310                 dev_info(&adapter->pdev->dev,
1311                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1312         return status;
1313 }
1314
1315 static int be_set_vf_tx_rate(struct net_device *netdev,
1316                         int vf, int rate)
1317 {
1318         struct be_adapter *adapter = netdev_priv(netdev);
1319         int status = 0;
1320
1321         if (!sriov_enabled(adapter))
1322                 return -EPERM;
1323
1324         if (vf >= adapter->num_vfs)
1325                 return -EINVAL;
1326
1327         if (rate < 100 || rate > 10000) {
1328                 dev_err(&adapter->pdev->dev,
1329                         "tx rate must be between 100 and 10000 Mbps\n");
1330                 return -EINVAL;
1331         }
1332
1333         if (lancer_chip(adapter))
1334                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1335         else
1336                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1337
1338         if (status)
1339                 dev_err(&adapter->pdev->dev,
1340                                 "tx rate %d on VF %d failed\n", rate, vf);
1341         else
1342                 adapter->vf_cfg[vf].tx_rate = rate;
1343         return status;
1344 }
1345
1346 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1347                           ulong now)
1348 {
1349         aic->rx_pkts_prev = rx_pkts;
1350         aic->tx_reqs_prev = tx_pkts;
1351         aic->jiffies = now;
1352 }
1353
1354 static void be_eqd_update(struct be_adapter *adapter)
1355 {
1356         struct be_set_eqd set_eqd[MAX_EVT_QS];
1357         int eqd, i, num = 0, start;
1358         struct be_aic_obj *aic;
1359         struct be_eq_obj *eqo;
1360         struct be_rx_obj *rxo;
1361         struct be_tx_obj *txo;
1362         u64 rx_pkts, tx_pkts;
1363         ulong now;
1364         u32 pps, delta;
1365
1366         for_all_evt_queues(adapter, eqo, i) {
1367                 aic = &adapter->aic_obj[eqo->idx];
1368                 if (!aic->enable) {
1369                         if (aic->jiffies)
1370                                 aic->jiffies = 0;
1371                         eqd = aic->et_eqd;
1372                         goto modify_eqd;
1373                 }
1374
1375                 rxo = &adapter->rx_obj[eqo->idx];
1376                 do {
1377                         start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1378                         rx_pkts = rxo->stats.rx_pkts;
1379                 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
1380
1381                 txo = &adapter->tx_obj[eqo->idx];
1382                 do {
1383                         start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1384                         tx_pkts = txo->stats.tx_reqs;
1385                 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
1386
1387
1388                 /* Skip, if wrapped around or first calculation */
1389                 now = jiffies;
1390                 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1391                     rx_pkts < aic->rx_pkts_prev ||
1392                     tx_pkts < aic->tx_reqs_prev) {
1393                         be_aic_update(aic, rx_pkts, tx_pkts, now);
1394                         continue;
1395                 }
1396
1397                 delta = jiffies_to_msecs(now - aic->jiffies);
1398                 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1399                         (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1400                 eqd = (pps / 15000) << 2;
1401
1402                 if (eqd < 8)
1403                         eqd = 0;
1404                 eqd = min_t(u32, eqd, aic->max_eqd);
1405                 eqd = max_t(u32, eqd, aic->min_eqd);
1406
1407                 be_aic_update(aic, rx_pkts, tx_pkts, now);
1408 modify_eqd:
1409                 if (eqd != aic->prev_eqd) {
1410                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
1411                         set_eqd[num].eq_id = eqo->q.id;
1412                         aic->prev_eqd = eqd;
1413                         num++;
1414                 }
1415         }
1416
1417         if (num)
1418                 be_cmd_modify_eqd(adapter, set_eqd, num);
1419 }
1420
1421 static void be_rx_stats_update(struct be_rx_obj *rxo,
1422                 struct be_rx_compl_info *rxcp)
1423 {
1424         struct be_rx_stats *stats = rx_stats(rxo);
1425
1426         u64_stats_update_begin(&stats->sync);
1427         stats->rx_compl++;
1428         stats->rx_bytes += rxcp->pkt_size;
1429         stats->rx_pkts++;
1430         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1431                 stats->rx_mcast_pkts++;
1432         if (rxcp->err)
1433                 stats->rx_compl_err++;
1434         u64_stats_update_end(&stats->sync);
1435 }
1436
1437 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1438 {
1439         /* L4 checksum is not reliable for non TCP/UDP packets.
1440          * Also ignore ipcksm for ipv6 pkts */
1441         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1442                                 (rxcp->ip_csum || rxcp->ipv6);
1443 }
1444
1445 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1446                                                 u16 frag_idx)
1447 {
1448         struct be_adapter *adapter = rxo->adapter;
1449         struct be_rx_page_info *rx_page_info;
1450         struct be_queue_info *rxq = &rxo->q;
1451
1452         rx_page_info = &rxo->page_info_tbl[frag_idx];
1453         BUG_ON(!rx_page_info->page);
1454
1455         if (rx_page_info->last_page_user) {
1456                 dma_unmap_page(&adapter->pdev->dev,
1457                                dma_unmap_addr(rx_page_info, bus),
1458                                adapter->big_page_size, DMA_FROM_DEVICE);
1459                 rx_page_info->last_page_user = false;
1460         }
1461
1462         atomic_dec(&rxq->used);
1463         return rx_page_info;
1464 }
1465
1466 /* Throwaway the data in the Rx completion */
1467 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1468                                 struct be_rx_compl_info *rxcp)
1469 {
1470         struct be_queue_info *rxq = &rxo->q;
1471         struct be_rx_page_info *page_info;
1472         u16 i, num_rcvd = rxcp->num_rcvd;
1473
1474         for (i = 0; i < num_rcvd; i++) {
1475                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1476                 put_page(page_info->page);
1477                 memset(page_info, 0, sizeof(*page_info));
1478                 index_inc(&rxcp->rxq_idx, rxq->len);
1479         }
1480 }
1481
1482 /*
1483  * skb_fill_rx_data forms a complete skb for an ether frame
1484  * indicated by rxcp.
1485  */
1486 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1487                              struct be_rx_compl_info *rxcp)
1488 {
1489         struct be_queue_info *rxq = &rxo->q;
1490         struct be_rx_page_info *page_info;
1491         u16 i, j;
1492         u16 hdr_len, curr_frag_len, remaining;
1493         u8 *start;
1494
1495         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1496         start = page_address(page_info->page) + page_info->page_offset;
1497         prefetch(start);
1498
1499         /* Copy data in the first descriptor of this completion */
1500         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1501
1502         skb->len = curr_frag_len;
1503         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1504                 memcpy(skb->data, start, curr_frag_len);
1505                 /* Complete packet has now been moved to data */
1506                 put_page(page_info->page);
1507                 skb->data_len = 0;
1508                 skb->tail += curr_frag_len;
1509         } else {
1510                 hdr_len = ETH_HLEN;
1511                 memcpy(skb->data, start, hdr_len);
1512                 skb_shinfo(skb)->nr_frags = 1;
1513                 skb_frag_set_page(skb, 0, page_info->page);
1514                 skb_shinfo(skb)->frags[0].page_offset =
1515                                         page_info->page_offset + hdr_len;
1516                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1517                 skb->data_len = curr_frag_len - hdr_len;
1518                 skb->truesize += rx_frag_size;
1519                 skb->tail += hdr_len;
1520         }
1521         page_info->page = NULL;
1522
1523         if (rxcp->pkt_size <= rx_frag_size) {
1524                 BUG_ON(rxcp->num_rcvd != 1);
1525                 return;
1526         }
1527
1528         /* More frags present for this completion */
1529         index_inc(&rxcp->rxq_idx, rxq->len);
1530         remaining = rxcp->pkt_size - curr_frag_len;
1531         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1532                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1533                 curr_frag_len = min(remaining, rx_frag_size);
1534
1535                 /* Coalesce all frags from the same physical page in one slot */
1536                 if (page_info->page_offset == 0) {
1537                         /* Fresh page */
1538                         j++;
1539                         skb_frag_set_page(skb, j, page_info->page);
1540                         skb_shinfo(skb)->frags[j].page_offset =
1541                                                         page_info->page_offset;
1542                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1543                         skb_shinfo(skb)->nr_frags++;
1544                 } else {
1545                         put_page(page_info->page);
1546                 }
1547
1548                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1549                 skb->len += curr_frag_len;
1550                 skb->data_len += curr_frag_len;
1551                 skb->truesize += rx_frag_size;
1552                 remaining -= curr_frag_len;
1553                 index_inc(&rxcp->rxq_idx, rxq->len);
1554                 page_info->page = NULL;
1555         }
1556         BUG_ON(j > MAX_SKB_FRAGS);
1557 }
1558
1559 /* Process the RX completion indicated by rxcp when GRO is disabled */
1560 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1561                                 struct be_rx_compl_info *rxcp)
1562 {
1563         struct be_adapter *adapter = rxo->adapter;
1564         struct net_device *netdev = adapter->netdev;
1565         struct sk_buff *skb;
1566
1567         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1568         if (unlikely(!skb)) {
1569                 rx_stats(rxo)->rx_drops_no_skbs++;
1570                 be_rx_compl_discard(rxo, rxcp);
1571                 return;
1572         }
1573
1574         skb_fill_rx_data(rxo, skb, rxcp);
1575
1576         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1577                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1578         else
1579                 skb_checksum_none_assert(skb);
1580
1581         skb->protocol = eth_type_trans(skb, netdev);
1582         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1583         if (netdev->features & NETIF_F_RXHASH)
1584                 skb->rxhash = rxcp->rss_hash;
1585         skb_mark_napi_id(skb, napi);
1586
1587         if (rxcp->vlanf)
1588                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1589
1590         netif_receive_skb(skb);
1591 }
1592
1593 /* Process the RX completion indicated by rxcp when GRO is enabled */
1594 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1595                                     struct napi_struct *napi,
1596                                     struct be_rx_compl_info *rxcp)
1597 {
1598         struct be_adapter *adapter = rxo->adapter;
1599         struct be_rx_page_info *page_info;
1600         struct sk_buff *skb = NULL;
1601         struct be_queue_info *rxq = &rxo->q;
1602         u16 remaining, curr_frag_len;
1603         u16 i, j;
1604
1605         skb = napi_get_frags(napi);
1606         if (!skb) {
1607                 be_rx_compl_discard(rxo, rxcp);
1608                 return;
1609         }
1610
1611         remaining = rxcp->pkt_size;
1612         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1613                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1614
1615                 curr_frag_len = min(remaining, rx_frag_size);
1616
1617                 /* Coalesce all frags from the same physical page in one slot */
1618                 if (i == 0 || page_info->page_offset == 0) {
1619                         /* First frag or Fresh page */
1620                         j++;
1621                         skb_frag_set_page(skb, j, page_info->page);
1622                         skb_shinfo(skb)->frags[j].page_offset =
1623                                                         page_info->page_offset;
1624                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1625                 } else {
1626                         put_page(page_info->page);
1627                 }
1628                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1629                 skb->truesize += rx_frag_size;
1630                 remaining -= curr_frag_len;
1631                 index_inc(&rxcp->rxq_idx, rxq->len);
1632                 memset(page_info, 0, sizeof(*page_info));
1633         }
1634         BUG_ON(j > MAX_SKB_FRAGS);
1635
1636         skb_shinfo(skb)->nr_frags = j + 1;
1637         skb->len = rxcp->pkt_size;
1638         skb->data_len = rxcp->pkt_size;
1639         skb->ip_summed = CHECKSUM_UNNECESSARY;
1640         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1641         if (adapter->netdev->features & NETIF_F_RXHASH)
1642                 skb->rxhash = rxcp->rss_hash;
1643         skb_mark_napi_id(skb, napi);
1644
1645         if (rxcp->vlanf)
1646                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1647
1648         napi_gro_frags(napi);
1649 }
1650
1651 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1652                                  struct be_rx_compl_info *rxcp)
1653 {
1654         rxcp->pkt_size =
1655                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1656         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1657         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1658         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1659         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1660         rxcp->ip_csum =
1661                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1662         rxcp->l4_csum =
1663                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1664         rxcp->ipv6 =
1665                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1666         rxcp->rxq_idx =
1667                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1668         rxcp->num_rcvd =
1669                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1670         rxcp->pkt_type =
1671                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1672         rxcp->rss_hash =
1673                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1674         if (rxcp->vlanf) {
1675                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1676                                           compl);
1677                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1678                                                compl);
1679         }
1680         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1681 }
1682
1683 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1684                                  struct be_rx_compl_info *rxcp)
1685 {
1686         rxcp->pkt_size =
1687                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1688         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1689         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1690         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1691         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1692         rxcp->ip_csum =
1693                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1694         rxcp->l4_csum =
1695                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1696         rxcp->ipv6 =
1697                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1698         rxcp->rxq_idx =
1699                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1700         rxcp->num_rcvd =
1701                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1702         rxcp->pkt_type =
1703                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1704         rxcp->rss_hash =
1705                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1706         if (rxcp->vlanf) {
1707                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1708                                           compl);
1709                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1710                                                compl);
1711         }
1712         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1713         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1714                                       ip_frag, compl);
1715 }
1716
1717 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1718 {
1719         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1720         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1721         struct be_adapter *adapter = rxo->adapter;
1722
1723         /* For checking the valid bit it is Ok to use either definition as the
1724          * valid bit is at the same position in both v0 and v1 Rx compl */
1725         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1726                 return NULL;
1727
1728         rmb();
1729         be_dws_le_to_cpu(compl, sizeof(*compl));
1730
1731         if (adapter->be3_native)
1732                 be_parse_rx_compl_v1(compl, rxcp);
1733         else
1734                 be_parse_rx_compl_v0(compl, rxcp);
1735
1736         if (rxcp->ip_frag)
1737                 rxcp->l4_csum = 0;
1738
1739         if (rxcp->vlanf) {
1740                 /* vlanf could be wrongly set in some cards.
1741                  * ignore if vtm is not set */
1742                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1743                         rxcp->vlanf = 0;
1744
1745                 if (!lancer_chip(adapter))
1746                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1747
1748                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1749                     !adapter->vlan_tag[rxcp->vlan_tag])
1750                         rxcp->vlanf = 0;
1751         }
1752
1753         /* As the compl has been parsed, reset it; we wont touch it again */
1754         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1755
1756         queue_tail_inc(&rxo->cq);
1757         return rxcp;
1758 }
1759
1760 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1761 {
1762         u32 order = get_order(size);
1763
1764         if (order > 0)
1765                 gfp |= __GFP_COMP;
1766         return  alloc_pages(gfp, order);
1767 }
1768
1769 /*
1770  * Allocate a page, split it to fragments of size rx_frag_size and post as
1771  * receive buffers to BE
1772  */
1773 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1774 {
1775         struct be_adapter *adapter = rxo->adapter;
1776         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1777         struct be_queue_info *rxq = &rxo->q;
1778         struct page *pagep = NULL;
1779         struct be_eth_rx_d *rxd;
1780         u64 page_dmaaddr = 0, frag_dmaaddr;
1781         u32 posted, page_offset = 0;
1782
1783         page_info = &rxo->page_info_tbl[rxq->head];
1784         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1785                 if (!pagep) {
1786                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1787                         if (unlikely(!pagep)) {
1788                                 rx_stats(rxo)->rx_post_fail++;
1789                                 break;
1790                         }
1791                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1792                                                     0, adapter->big_page_size,
1793                                                     DMA_FROM_DEVICE);
1794                         page_info->page_offset = 0;
1795                 } else {
1796                         get_page(pagep);
1797                         page_info->page_offset = page_offset + rx_frag_size;
1798                 }
1799                 page_offset = page_info->page_offset;
1800                 page_info->page = pagep;
1801                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1802                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1803
1804                 rxd = queue_head_node(rxq);
1805                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1806                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1807
1808                 /* Any space left in the current big page for another frag? */
1809                 if ((page_offset + rx_frag_size + rx_frag_size) >
1810                                         adapter->big_page_size) {
1811                         pagep = NULL;
1812                         page_info->last_page_user = true;
1813                 }
1814
1815                 prev_page_info = page_info;
1816                 queue_head_inc(rxq);
1817                 page_info = &rxo->page_info_tbl[rxq->head];
1818         }
1819         if (pagep)
1820                 prev_page_info->last_page_user = true;
1821
1822         if (posted) {
1823                 atomic_add(posted, &rxq->used);
1824                 if (rxo->rx_post_starved)
1825                         rxo->rx_post_starved = false;
1826                 be_rxq_notify(adapter, rxq->id, posted);
1827         } else if (atomic_read(&rxq->used) == 0) {
1828                 /* Let be_worker replenish when memory is available */
1829                 rxo->rx_post_starved = true;
1830         }
1831 }
1832
1833 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1834 {
1835         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1836
1837         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1838                 return NULL;
1839
1840         rmb();
1841         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1842
1843         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1844
1845         queue_tail_inc(tx_cq);
1846         return txcp;
1847 }
1848
1849 static u16 be_tx_compl_process(struct be_adapter *adapter,
1850                 struct be_tx_obj *txo, u16 last_index)
1851 {
1852         struct be_queue_info *txq = &txo->q;
1853         struct be_eth_wrb *wrb;
1854         struct sk_buff **sent_skbs = txo->sent_skb_list;
1855         struct sk_buff *sent_skb;
1856         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1857         bool unmap_skb_hdr = true;
1858
1859         sent_skb = sent_skbs[txq->tail];
1860         BUG_ON(!sent_skb);
1861         sent_skbs[txq->tail] = NULL;
1862
1863         /* skip header wrb */
1864         queue_tail_inc(txq);
1865
1866         do {
1867                 cur_index = txq->tail;
1868                 wrb = queue_tail_node(txq);
1869                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1870                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1871                 unmap_skb_hdr = false;
1872
1873                 num_wrbs++;
1874                 queue_tail_inc(txq);
1875         } while (cur_index != last_index);
1876
1877         kfree_skb(sent_skb);
1878         return num_wrbs;
1879 }
1880
1881 /* Return the number of events in the event queue */
1882 static inline int events_get(struct be_eq_obj *eqo)
1883 {
1884         struct be_eq_entry *eqe;
1885         int num = 0;
1886
1887         do {
1888                 eqe = queue_tail_node(&eqo->q);
1889                 if (eqe->evt == 0)
1890                         break;
1891
1892                 rmb();
1893                 eqe->evt = 0;
1894                 num++;
1895                 queue_tail_inc(&eqo->q);
1896         } while (true);
1897
1898         return num;
1899 }
1900
1901 /* Leaves the EQ is disarmed state */
1902 static void be_eq_clean(struct be_eq_obj *eqo)
1903 {
1904         int num = events_get(eqo);
1905
1906         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1907 }
1908
1909 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1910 {
1911         struct be_rx_page_info *page_info;
1912         struct be_queue_info *rxq = &rxo->q;
1913         struct be_queue_info *rx_cq = &rxo->cq;
1914         struct be_rx_compl_info *rxcp;
1915         struct be_adapter *adapter = rxo->adapter;
1916         int flush_wait = 0;
1917         u16 tail;
1918
1919         /* Consume pending rx completions.
1920          * Wait for the flush completion (identified by zero num_rcvd)
1921          * to arrive. Notify CQ even when there are no more CQ entries
1922          * for HW to flush partially coalesced CQ entries.
1923          * In Lancer, there is no need to wait for flush compl.
1924          */
1925         for (;;) {
1926                 rxcp = be_rx_compl_get(rxo);
1927                 if (rxcp == NULL) {
1928                         if (lancer_chip(adapter))
1929                                 break;
1930
1931                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1932                                 dev_warn(&adapter->pdev->dev,
1933                                          "did not receive flush compl\n");
1934                                 break;
1935                         }
1936                         be_cq_notify(adapter, rx_cq->id, true, 0);
1937                         mdelay(1);
1938                 } else {
1939                         be_rx_compl_discard(rxo, rxcp);
1940                         be_cq_notify(adapter, rx_cq->id, false, 1);
1941                         if (rxcp->num_rcvd == 0)
1942                                 break;
1943                 }
1944         }
1945
1946         /* After cleanup, leave the CQ in unarmed state */
1947         be_cq_notify(adapter, rx_cq->id, false, 0);
1948
1949         /* Then free posted rx buffers that were not used */
1950         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1951         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1952                 page_info = get_rx_page_info(rxo, tail);
1953                 put_page(page_info->page);
1954                 memset(page_info, 0, sizeof(*page_info));
1955         }
1956         BUG_ON(atomic_read(&rxq->used));
1957         rxq->tail = rxq->head = 0;
1958 }
1959
1960 static void be_tx_compl_clean(struct be_adapter *adapter)
1961 {
1962         struct be_tx_obj *txo;
1963         struct be_queue_info *txq;
1964         struct be_eth_tx_compl *txcp;
1965         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1966         struct sk_buff *sent_skb;
1967         bool dummy_wrb;
1968         int i, pending_txqs;
1969
1970         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1971         do {
1972                 pending_txqs = adapter->num_tx_qs;
1973
1974                 for_all_tx_queues(adapter, txo, i) {
1975                         txq = &txo->q;
1976                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1977                                 end_idx =
1978                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1979                                                       wrb_index, txcp);
1980                                 num_wrbs += be_tx_compl_process(adapter, txo,
1981                                                                 end_idx);
1982                                 cmpl++;
1983                         }
1984                         if (cmpl) {
1985                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1986                                 atomic_sub(num_wrbs, &txq->used);
1987                                 cmpl = 0;
1988                                 num_wrbs = 0;
1989                         }
1990                         if (atomic_read(&txq->used) == 0)
1991                                 pending_txqs--;
1992                 }
1993
1994                 if (pending_txqs == 0 || ++timeo > 200)
1995                         break;
1996
1997                 mdelay(1);
1998         } while (true);
1999
2000         for_all_tx_queues(adapter, txo, i) {
2001                 txq = &txo->q;
2002                 if (atomic_read(&txq->used))
2003                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2004                                 atomic_read(&txq->used));
2005
2006                 /* free posted tx for which compls will never arrive */
2007                 while (atomic_read(&txq->used)) {
2008                         sent_skb = txo->sent_skb_list[txq->tail];
2009                         end_idx = txq->tail;
2010                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2011                                                    &dummy_wrb);
2012                         index_adv(&end_idx, num_wrbs - 1, txq->len);
2013                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2014                         atomic_sub(num_wrbs, &txq->used);
2015                 }
2016         }
2017 }
2018
2019 static void be_evt_queues_destroy(struct be_adapter *adapter)
2020 {
2021         struct be_eq_obj *eqo;
2022         int i;
2023
2024         for_all_evt_queues(adapter, eqo, i) {
2025                 if (eqo->q.created) {
2026                         be_eq_clean(eqo);
2027                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2028                         napi_hash_del(&eqo->napi);
2029                         netif_napi_del(&eqo->napi);
2030                 }
2031                 be_queue_free(adapter, &eqo->q);
2032         }
2033 }
2034
2035 static int be_evt_queues_create(struct be_adapter *adapter)
2036 {
2037         struct be_queue_info *eq;
2038         struct be_eq_obj *eqo;
2039         struct be_aic_obj *aic;
2040         int i, rc;
2041
2042         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2043                                     adapter->cfg_num_qs);
2044
2045         for_all_evt_queues(adapter, eqo, i) {
2046                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2047                                BE_NAPI_WEIGHT);
2048                 napi_hash_add(&eqo->napi);
2049                 aic = &adapter->aic_obj[i];
2050                 eqo->adapter = adapter;
2051                 eqo->tx_budget = BE_TX_BUDGET;
2052                 eqo->idx = i;
2053                 aic->max_eqd = BE_MAX_EQD;
2054                 aic->enable = true;
2055
2056                 eq = &eqo->q;
2057                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2058                                         sizeof(struct be_eq_entry));
2059                 if (rc)
2060                         return rc;
2061
2062                 rc = be_cmd_eq_create(adapter, eqo);
2063                 if (rc)
2064                         return rc;
2065         }
2066         return 0;
2067 }
2068
2069 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2070 {
2071         struct be_queue_info *q;
2072
2073         q = &adapter->mcc_obj.q;
2074         if (q->created)
2075                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2076         be_queue_free(adapter, q);
2077
2078         q = &adapter->mcc_obj.cq;
2079         if (q->created)
2080                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2081         be_queue_free(adapter, q);
2082 }
2083
2084 /* Must be called only after TX qs are created as MCC shares TX EQ */
2085 static int be_mcc_queues_create(struct be_adapter *adapter)
2086 {
2087         struct be_queue_info *q, *cq;
2088
2089         cq = &adapter->mcc_obj.cq;
2090         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2091                         sizeof(struct be_mcc_compl)))
2092                 goto err;
2093
2094         /* Use the default EQ for MCC completions */
2095         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2096                 goto mcc_cq_free;
2097
2098         q = &adapter->mcc_obj.q;
2099         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2100                 goto mcc_cq_destroy;
2101
2102         if (be_cmd_mccq_create(adapter, q, cq))
2103                 goto mcc_q_free;
2104
2105         return 0;
2106
2107 mcc_q_free:
2108         be_queue_free(adapter, q);
2109 mcc_cq_destroy:
2110         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2111 mcc_cq_free:
2112         be_queue_free(adapter, cq);
2113 err:
2114         return -1;
2115 }
2116
2117 static void be_tx_queues_destroy(struct be_adapter *adapter)
2118 {
2119         struct be_queue_info *q;
2120         struct be_tx_obj *txo;
2121         u8 i;
2122
2123         for_all_tx_queues(adapter, txo, i) {
2124                 q = &txo->q;
2125                 if (q->created)
2126                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2127                 be_queue_free(adapter, q);
2128
2129                 q = &txo->cq;
2130                 if (q->created)
2131                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2132                 be_queue_free(adapter, q);
2133         }
2134 }
2135
2136 static int be_tx_qs_create(struct be_adapter *adapter)
2137 {
2138         struct be_queue_info *cq, *eq;
2139         struct be_tx_obj *txo;
2140         int status, i;
2141
2142         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2143
2144         for_all_tx_queues(adapter, txo, i) {
2145                 cq = &txo->cq;
2146                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2147                                         sizeof(struct be_eth_tx_compl));
2148                 if (status)
2149                         return status;
2150
2151                 u64_stats_init(&txo->stats.sync);
2152                 u64_stats_init(&txo->stats.sync_compl);
2153
2154                 /* If num_evt_qs is less than num_tx_qs, then more than
2155                  * one txq share an eq
2156                  */
2157                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2158                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2159                 if (status)
2160                         return status;
2161
2162                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2163                                         sizeof(struct be_eth_wrb));
2164                 if (status)
2165                         return status;
2166
2167                 status = be_cmd_txq_create(adapter, txo);
2168                 if (status)
2169                         return status;
2170         }
2171
2172         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2173                  adapter->num_tx_qs);
2174         return 0;
2175 }
2176
2177 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2178 {
2179         struct be_queue_info *q;
2180         struct be_rx_obj *rxo;
2181         int i;
2182
2183         for_all_rx_queues(adapter, rxo, i) {
2184                 q = &rxo->cq;
2185                 if (q->created)
2186                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2187                 be_queue_free(adapter, q);
2188         }
2189 }
2190
2191 static int be_rx_cqs_create(struct be_adapter *adapter)
2192 {
2193         struct be_queue_info *eq, *cq;
2194         struct be_rx_obj *rxo;
2195         int rc, i;
2196
2197         /* We can create as many RSS rings as there are EQs. */
2198         adapter->num_rx_qs = adapter->num_evt_qs;
2199
2200         /* We'll use RSS only if atleast 2 RSS rings are supported.
2201          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2202          */
2203         if (adapter->num_rx_qs > 1)
2204                 adapter->num_rx_qs++;
2205
2206         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2207         for_all_rx_queues(adapter, rxo, i) {
2208                 rxo->adapter = adapter;
2209                 cq = &rxo->cq;
2210                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2211                                 sizeof(struct be_eth_rx_compl));
2212                 if (rc)
2213                         return rc;
2214
2215                 u64_stats_init(&rxo->stats.sync);
2216                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2217                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2218                 if (rc)
2219                         return rc;
2220         }
2221
2222         dev_info(&adapter->pdev->dev,
2223                  "created %d RSS queue(s) and 1 default RX queue\n",
2224                  adapter->num_rx_qs - 1);
2225         return 0;
2226 }
2227
2228 static irqreturn_t be_intx(int irq, void *dev)
2229 {
2230         struct be_eq_obj *eqo = dev;
2231         struct be_adapter *adapter = eqo->adapter;
2232         int num_evts = 0;
2233
2234         /* IRQ is not expected when NAPI is scheduled as the EQ
2235          * will not be armed.
2236          * But, this can happen on Lancer INTx where it takes
2237          * a while to de-assert INTx or in BE2 where occasionaly
2238          * an interrupt may be raised even when EQ is unarmed.
2239          * If NAPI is already scheduled, then counting & notifying
2240          * events will orphan them.
2241          */
2242         if (napi_schedule_prep(&eqo->napi)) {
2243                 num_evts = events_get(eqo);
2244                 __napi_schedule(&eqo->napi);
2245                 if (num_evts)
2246                         eqo->spurious_intr = 0;
2247         }
2248         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2249
2250         /* Return IRQ_HANDLED only for the the first spurious intr
2251          * after a valid intr to stop the kernel from branding
2252          * this irq as a bad one!
2253          */
2254         if (num_evts || eqo->spurious_intr++ == 0)
2255                 return IRQ_HANDLED;
2256         else
2257                 return IRQ_NONE;
2258 }
2259
2260 static irqreturn_t be_msix(int irq, void *dev)
2261 {
2262         struct be_eq_obj *eqo = dev;
2263
2264         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2265         napi_schedule(&eqo->napi);
2266         return IRQ_HANDLED;
2267 }
2268
2269 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2270 {
2271         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2272 }
2273
2274 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2275                         int budget, int polling)
2276 {
2277         struct be_adapter *adapter = rxo->adapter;
2278         struct be_queue_info *rx_cq = &rxo->cq;
2279         struct be_rx_compl_info *rxcp;
2280         u32 work_done;
2281
2282         for (work_done = 0; work_done < budget; work_done++) {
2283                 rxcp = be_rx_compl_get(rxo);
2284                 if (!rxcp)
2285                         break;
2286
2287                 /* Is it a flush compl that has no data */
2288                 if (unlikely(rxcp->num_rcvd == 0))
2289                         goto loop_continue;
2290
2291                 /* Discard compl with partial DMA Lancer B0 */
2292                 if (unlikely(!rxcp->pkt_size)) {
2293                         be_rx_compl_discard(rxo, rxcp);
2294                         goto loop_continue;
2295                 }
2296
2297                 /* On BE drop pkts that arrive due to imperfect filtering in
2298                  * promiscuous mode on some skews
2299                  */
2300                 if (unlikely(rxcp->port != adapter->port_num &&
2301                                 !lancer_chip(adapter))) {
2302                         be_rx_compl_discard(rxo, rxcp);
2303                         goto loop_continue;
2304                 }
2305
2306                 /* Don't do gro when we're busy_polling */
2307                 if (do_gro(rxcp) && polling != BUSY_POLLING)
2308                         be_rx_compl_process_gro(rxo, napi, rxcp);
2309                 else
2310                         be_rx_compl_process(rxo, napi, rxcp);
2311
2312 loop_continue:
2313                 be_rx_stats_update(rxo, rxcp);
2314         }
2315
2316         if (work_done) {
2317                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2318
2319                 /* When an rx-obj gets into post_starved state, just
2320                  * let be_worker do the posting.
2321                  */
2322                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2323                     !rxo->rx_post_starved)
2324                         be_post_rx_frags(rxo, GFP_ATOMIC);
2325         }
2326
2327         return work_done;
2328 }
2329
2330 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2331                           int budget, int idx)
2332 {
2333         struct be_eth_tx_compl *txcp;
2334         int num_wrbs = 0, work_done;
2335
2336         for (work_done = 0; work_done < budget; work_done++) {
2337                 txcp = be_tx_compl_get(&txo->cq);
2338                 if (!txcp)
2339                         break;
2340                 num_wrbs += be_tx_compl_process(adapter, txo,
2341                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2342                                         wrb_index, txcp));
2343         }
2344
2345         if (work_done) {
2346                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2347                 atomic_sub(num_wrbs, &txo->q.used);
2348
2349                 /* As Tx wrbs have been freed up, wake up netdev queue
2350                  * if it was stopped due to lack of tx wrbs.  */
2351                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2352                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2353                         netif_wake_subqueue(adapter->netdev, idx);
2354                 }
2355
2356                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2357                 tx_stats(txo)->tx_compl += work_done;
2358                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2359         }
2360         return (work_done < budget); /* Done */
2361 }
2362
2363 int be_poll(struct napi_struct *napi, int budget)
2364 {
2365         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2366         struct be_adapter *adapter = eqo->adapter;
2367         int max_work = 0, work, i, num_evts;
2368         struct be_rx_obj *rxo;
2369         bool tx_done;
2370
2371         num_evts = events_get(eqo);
2372
2373         /* Process all TXQs serviced by this EQ */
2374         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2375                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2376                                         eqo->tx_budget, i);
2377                 if (!tx_done)
2378                         max_work = budget;
2379         }
2380
2381         if (be_lock_napi(eqo)) {
2382                 /* This loop will iterate twice for EQ0 in which
2383                  * completions of the last RXQ (default one) are also processed
2384                  * For other EQs the loop iterates only once
2385                  */
2386                 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2387                         work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2388                         max_work = max(work, max_work);
2389                 }
2390                 be_unlock_napi(eqo);
2391         } else {
2392                 max_work = budget;
2393         }
2394
2395         if (is_mcc_eqo(eqo))
2396                 be_process_mcc(adapter);
2397
2398         if (max_work < budget) {
2399                 napi_complete(napi);
2400                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2401         } else {
2402                 /* As we'll continue in polling mode, count and clear events */
2403                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2404         }
2405         return max_work;
2406 }
2407
2408 #ifdef CONFIG_NET_RX_BUSY_POLL
2409 static int be_busy_poll(struct napi_struct *napi)
2410 {
2411         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2412         struct be_adapter *adapter = eqo->adapter;
2413         struct be_rx_obj *rxo;
2414         int i, work = 0;
2415
2416         if (!be_lock_busy_poll(eqo))
2417                 return LL_FLUSH_BUSY;
2418
2419         for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2420                 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2421                 if (work)
2422                         break;
2423         }
2424
2425         be_unlock_busy_poll(eqo);
2426         return work;
2427 }
2428 #endif
2429
2430 void be_detect_error(struct be_adapter *adapter)
2431 {
2432         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2433         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2434         u32 i;
2435
2436         if (be_hw_error(adapter))
2437                 return;
2438
2439         if (lancer_chip(adapter)) {
2440                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2441                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2442                         sliport_err1 = ioread32(adapter->db +
2443                                         SLIPORT_ERROR1_OFFSET);
2444                         sliport_err2 = ioread32(adapter->db +
2445                                         SLIPORT_ERROR2_OFFSET);
2446                 }
2447         } else {
2448                 pci_read_config_dword(adapter->pdev,
2449                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2450                 pci_read_config_dword(adapter->pdev,
2451                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2452                 pci_read_config_dword(adapter->pdev,
2453                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2454                 pci_read_config_dword(adapter->pdev,
2455                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2456
2457                 ue_lo = (ue_lo & ~ue_lo_mask);
2458                 ue_hi = (ue_hi & ~ue_hi_mask);
2459         }
2460
2461         /* On certain platforms BE hardware can indicate spurious UEs.
2462          * Allow the h/w to stop working completely in case of a real UE.
2463          * Hence not setting the hw_error for UE detection.
2464          */
2465         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2466                 adapter->hw_error = true;
2467                 dev_err(&adapter->pdev->dev,
2468                         "Error detected in the card\n");
2469         }
2470
2471         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2472                 dev_err(&adapter->pdev->dev,
2473                         "ERR: sliport status 0x%x\n", sliport_status);
2474                 dev_err(&adapter->pdev->dev,
2475                         "ERR: sliport error1 0x%x\n", sliport_err1);
2476                 dev_err(&adapter->pdev->dev,
2477                         "ERR: sliport error2 0x%x\n", sliport_err2);
2478         }
2479
2480         if (ue_lo) {
2481                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2482                         if (ue_lo & 1)
2483                                 dev_err(&adapter->pdev->dev,
2484                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2485                 }
2486         }
2487
2488         if (ue_hi) {
2489                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2490                         if (ue_hi & 1)
2491                                 dev_err(&adapter->pdev->dev,
2492                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2493                 }
2494         }
2495
2496 }
2497
2498 static void be_msix_disable(struct be_adapter *adapter)
2499 {
2500         if (msix_enabled(adapter)) {
2501                 pci_disable_msix(adapter->pdev);
2502                 adapter->num_msix_vec = 0;
2503                 adapter->num_msix_roce_vec = 0;
2504         }
2505 }
2506
2507 static int be_msix_enable(struct be_adapter *adapter)
2508 {
2509         int i, status, num_vec;
2510         struct device *dev = &adapter->pdev->dev;
2511
2512         /* If RoCE is supported, program the max number of NIC vectors that
2513          * may be configured via set-channels, along with vectors needed for
2514          * RoCe. Else, just program the number we'll use initially.
2515          */
2516         if (be_roce_supported(adapter))
2517                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2518                                 2 * num_online_cpus());
2519         else
2520                 num_vec = adapter->cfg_num_qs;
2521
2522         for (i = 0; i < num_vec; i++)
2523                 adapter->msix_entries[i].entry = i;
2524
2525         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2526         if (status == 0) {
2527                 goto done;
2528         } else if (status >= MIN_MSIX_VECTORS) {
2529                 num_vec = status;
2530                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2531                                          num_vec);
2532                 if (!status)
2533                         goto done;
2534         }
2535
2536         dev_warn(dev, "MSIx enable failed\n");
2537
2538         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2539         if (!be_physfn(adapter))
2540                 return status;
2541         return 0;
2542 done:
2543         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2544                 adapter->num_msix_roce_vec = num_vec / 2;
2545                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2546                          adapter->num_msix_roce_vec);
2547         }
2548
2549         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2550
2551         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2552                  adapter->num_msix_vec);
2553         return 0;
2554 }
2555
2556 static inline int be_msix_vec_get(struct be_adapter *adapter,
2557                                 struct be_eq_obj *eqo)
2558 {
2559         return adapter->msix_entries[eqo->msix_idx].vector;
2560 }
2561
2562 static int be_msix_register(struct be_adapter *adapter)
2563 {
2564         struct net_device *netdev = adapter->netdev;
2565         struct be_eq_obj *eqo;
2566         int status, i, vec;
2567
2568         for_all_evt_queues(adapter, eqo, i) {
2569                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2570                 vec = be_msix_vec_get(adapter, eqo);
2571                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2572                 if (status)
2573                         goto err_msix;
2574         }
2575
2576         return 0;
2577 err_msix:
2578         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2579                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2580         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2581                 status);
2582         be_msix_disable(adapter);
2583         return status;
2584 }
2585
2586 static int be_irq_register(struct be_adapter *adapter)
2587 {
2588         struct net_device *netdev = adapter->netdev;
2589         int status;
2590
2591         if (msix_enabled(adapter)) {
2592                 status = be_msix_register(adapter);
2593                 if (status == 0)
2594                         goto done;
2595                 /* INTx is not supported for VF */
2596                 if (!be_physfn(adapter))
2597                         return status;
2598         }
2599
2600         /* INTx: only the first EQ is used */
2601         netdev->irq = adapter->pdev->irq;
2602         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2603                              &adapter->eq_obj[0]);
2604         if (status) {
2605                 dev_err(&adapter->pdev->dev,
2606                         "INTx request IRQ failed - err %d\n", status);
2607                 return status;
2608         }
2609 done:
2610         adapter->isr_registered = true;
2611         return 0;
2612 }
2613
2614 static void be_irq_unregister(struct be_adapter *adapter)
2615 {
2616         struct net_device *netdev = adapter->netdev;
2617         struct be_eq_obj *eqo;
2618         int i;
2619
2620         if (!adapter->isr_registered)
2621                 return;
2622
2623         /* INTx */
2624         if (!msix_enabled(adapter)) {
2625                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2626                 goto done;
2627         }
2628
2629         /* MSIx */
2630         for_all_evt_queues(adapter, eqo, i)
2631                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2632
2633 done:
2634         adapter->isr_registered = false;
2635 }
2636
2637 static void be_rx_qs_destroy(struct be_adapter *adapter)
2638 {
2639         struct be_queue_info *q;
2640         struct be_rx_obj *rxo;
2641         int i;
2642
2643         for_all_rx_queues(adapter, rxo, i) {
2644                 q = &rxo->q;
2645                 if (q->created) {
2646                         be_cmd_rxq_destroy(adapter, q);
2647                         be_rx_cq_clean(rxo);
2648                 }
2649                 be_queue_free(adapter, q);
2650         }
2651 }
2652
2653 static int be_close(struct net_device *netdev)
2654 {
2655         struct be_adapter *adapter = netdev_priv(netdev);
2656         struct be_eq_obj *eqo;
2657         int i;
2658
2659         be_roce_dev_close(adapter);
2660
2661         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2662                 for_all_evt_queues(adapter, eqo, i) {
2663                         napi_disable(&eqo->napi);
2664                         be_disable_busy_poll(eqo);
2665                 }
2666                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2667         }
2668
2669         be_async_mcc_disable(adapter);
2670
2671         /* Wait for all pending tx completions to arrive so that
2672          * all tx skbs are freed.
2673          */
2674         netif_tx_disable(netdev);
2675         be_tx_compl_clean(adapter);
2676
2677         be_rx_qs_destroy(adapter);
2678
2679         for (i = 1; i < (adapter->uc_macs + 1); i++)
2680                 be_cmd_pmac_del(adapter, adapter->if_handle,
2681                                 adapter->pmac_id[i], 0);
2682         adapter->uc_macs = 0;
2683
2684         for_all_evt_queues(adapter, eqo, i) {
2685                 if (msix_enabled(adapter))
2686                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2687                 else
2688                         synchronize_irq(netdev->irq);
2689                 be_eq_clean(eqo);
2690         }
2691
2692         be_irq_unregister(adapter);
2693
2694         return 0;
2695 }
2696
2697 static int be_rx_qs_create(struct be_adapter *adapter)
2698 {
2699         struct be_rx_obj *rxo;
2700         int rc, i, j;
2701         u8 rsstable[128];
2702
2703         for_all_rx_queues(adapter, rxo, i) {
2704                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2705                                     sizeof(struct be_eth_rx_d));
2706                 if (rc)
2707                         return rc;
2708         }
2709
2710         /* The FW would like the default RXQ to be created first */
2711         rxo = default_rxo(adapter);
2712         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2713                                adapter->if_handle, false, &rxo->rss_id);
2714         if (rc)
2715                 return rc;
2716
2717         for_all_rss_queues(adapter, rxo, i) {
2718                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2719                                        rx_frag_size, adapter->if_handle,
2720                                        true, &rxo->rss_id);
2721                 if (rc)
2722                         return rc;
2723         }
2724
2725         if (be_multi_rxq(adapter)) {
2726                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2727                         for_all_rss_queues(adapter, rxo, i) {
2728                                 if ((j + i) >= 128)
2729                                         break;
2730                                 rsstable[j + i] = rxo->rss_id;
2731                         }
2732                 }
2733                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2734                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2735
2736                 if (!BEx_chip(adapter))
2737                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2738                                                 RSS_ENABLE_UDP_IPV6;
2739
2740                 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2741                                        128);
2742                 if (rc) {
2743                         adapter->rss_flags = 0;
2744                         return rc;
2745                 }
2746         }
2747
2748         /* First time posting */
2749         for_all_rx_queues(adapter, rxo, i)
2750                 be_post_rx_frags(rxo, GFP_KERNEL);
2751         return 0;
2752 }
2753
2754 static int be_open(struct net_device *netdev)
2755 {
2756         struct be_adapter *adapter = netdev_priv(netdev);
2757         struct be_eq_obj *eqo;
2758         struct be_rx_obj *rxo;
2759         struct be_tx_obj *txo;
2760         u8 link_status;
2761         int status, i;
2762
2763         status = be_rx_qs_create(adapter);
2764         if (status)
2765                 goto err;
2766
2767         status = be_irq_register(adapter);
2768         if (status)
2769                 goto err;
2770
2771         for_all_rx_queues(adapter, rxo, i)
2772                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2773
2774         for_all_tx_queues(adapter, txo, i)
2775                 be_cq_notify(adapter, txo->cq.id, true, 0);
2776
2777         be_async_mcc_enable(adapter);
2778
2779         for_all_evt_queues(adapter, eqo, i) {
2780                 napi_enable(&eqo->napi);
2781                 be_enable_busy_poll(eqo);
2782                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2783         }
2784         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2785
2786         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2787         if (!status)
2788                 be_link_status_update(adapter, link_status);
2789
2790         netif_tx_start_all_queues(netdev);
2791         be_roce_dev_open(adapter);
2792         return 0;
2793 err:
2794         be_close(adapter->netdev);
2795         return -EIO;
2796 }
2797
2798 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2799 {
2800         struct be_dma_mem cmd;
2801         int status = 0;
2802         u8 mac[ETH_ALEN];
2803
2804         memset(mac, 0, ETH_ALEN);
2805
2806         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2807         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2808                                      GFP_KERNEL);
2809         if (cmd.va == NULL)
2810                 return -1;
2811
2812         if (enable) {
2813                 status = pci_write_config_dword(adapter->pdev,
2814                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2815                 if (status) {
2816                         dev_err(&adapter->pdev->dev,
2817                                 "Could not enable Wake-on-lan\n");
2818                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2819                                           cmd.dma);
2820                         return status;
2821                 }
2822                 status = be_cmd_enable_magic_wol(adapter,
2823                                 adapter->netdev->dev_addr, &cmd);
2824                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2825                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2826         } else {
2827                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2828                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2829                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2830         }
2831
2832         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2833         return status;
2834 }
2835
2836 /*
2837  * Generate a seed MAC address from the PF MAC Address using jhash.
2838  * MAC Address for VFs are assigned incrementally starting from the seed.
2839  * These addresses are programmed in the ASIC by the PF and the VF driver
2840  * queries for the MAC address during its probe.
2841  */
2842 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2843 {
2844         u32 vf;
2845         int status = 0;
2846         u8 mac[ETH_ALEN];
2847         struct be_vf_cfg *vf_cfg;
2848
2849         be_vf_eth_addr_generate(adapter, mac);
2850
2851         for_all_vfs(adapter, vf_cfg, vf) {
2852                 if (BEx_chip(adapter))
2853                         status = be_cmd_pmac_add(adapter, mac,
2854                                                  vf_cfg->if_handle,
2855                                                  &vf_cfg->pmac_id, vf + 1);
2856                 else
2857                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2858                                                 vf + 1);
2859
2860                 if (status)
2861                         dev_err(&adapter->pdev->dev,
2862                         "Mac address assignment failed for VF %d\n", vf);
2863                 else
2864                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2865
2866                 mac[5] += 1;
2867         }
2868         return status;
2869 }
2870
2871 static int be_vfs_mac_query(struct be_adapter *adapter)
2872 {
2873         int status, vf;
2874         u8 mac[ETH_ALEN];
2875         struct be_vf_cfg *vf_cfg;
2876         bool active = false;
2877
2878         for_all_vfs(adapter, vf_cfg, vf) {
2879                 be_cmd_get_mac_from_list(adapter, mac, &active,
2880                                          &vf_cfg->pmac_id, 0);
2881
2882                 status = be_cmd_mac_addr_query(adapter, mac, false,
2883                                                vf_cfg->if_handle, 0);
2884                 if (status)
2885                         return status;
2886                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2887         }
2888         return 0;
2889 }
2890
2891 static void be_vf_clear(struct be_adapter *adapter)
2892 {
2893         struct be_vf_cfg *vf_cfg;
2894         u32 vf;
2895
2896         if (pci_vfs_assigned(adapter->pdev)) {
2897                 dev_warn(&adapter->pdev->dev,
2898                          "VFs are assigned to VMs: not disabling VFs\n");
2899                 goto done;
2900         }
2901
2902         pci_disable_sriov(adapter->pdev);
2903
2904         for_all_vfs(adapter, vf_cfg, vf) {
2905                 if (BEx_chip(adapter))
2906                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2907                                         vf_cfg->pmac_id, vf + 1);
2908                 else
2909                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2910                                        vf + 1);
2911
2912                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2913         }
2914 done:
2915         kfree(adapter->vf_cfg);
2916         adapter->num_vfs = 0;
2917 }
2918
2919 static void be_clear_queues(struct be_adapter *adapter)
2920 {
2921         be_mcc_queues_destroy(adapter);
2922         be_rx_cqs_destroy(adapter);
2923         be_tx_queues_destroy(adapter);
2924         be_evt_queues_destroy(adapter);
2925 }
2926
2927 static void be_cancel_worker(struct be_adapter *adapter)
2928 {
2929         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2930                 cancel_delayed_work_sync(&adapter->work);
2931                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2932         }
2933 }
2934
2935 static int be_clear(struct be_adapter *adapter)
2936 {
2937         int i;
2938
2939         be_cancel_worker(adapter);
2940
2941         if (sriov_enabled(adapter))
2942                 be_vf_clear(adapter);
2943
2944         /* delete the primary mac along with the uc-mac list */
2945         for (i = 0; i < (adapter->uc_macs + 1); i++)
2946                 be_cmd_pmac_del(adapter, adapter->if_handle,
2947                                 adapter->pmac_id[i], 0);
2948         adapter->uc_macs = 0;
2949
2950         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2951
2952         be_clear_queues(adapter);
2953
2954         kfree(adapter->pmac_id);
2955         adapter->pmac_id = NULL;
2956
2957         be_msix_disable(adapter);
2958         return 0;
2959 }
2960
2961 static int be_vfs_if_create(struct be_adapter *adapter)
2962 {
2963         struct be_resources res = {0};
2964         struct be_vf_cfg *vf_cfg;
2965         u32 cap_flags, en_flags, vf;
2966         int status = 0;
2967
2968         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2969                     BE_IF_FLAGS_MULTICAST;
2970
2971         for_all_vfs(adapter, vf_cfg, vf) {
2972                 if (!BE3_chip(adapter)) {
2973                         status = be_cmd_get_profile_config(adapter, &res,
2974                                                            vf + 1);
2975                         if (!status)
2976                                 cap_flags = res.if_cap_flags;
2977                 }
2978
2979                 /* If a FW profile exists, then cap_flags are updated */
2980                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2981                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2982                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2983                                           &vf_cfg->if_handle, vf + 1);
2984                 if (status)
2985                         goto err;
2986         }
2987 err:
2988         return status;
2989 }
2990
2991 static int be_vf_setup_init(struct be_adapter *adapter)
2992 {
2993         struct be_vf_cfg *vf_cfg;
2994         int vf;
2995
2996         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2997                                   GFP_KERNEL);
2998         if (!adapter->vf_cfg)
2999                 return -ENOMEM;
3000
3001         for_all_vfs(adapter, vf_cfg, vf) {
3002                 vf_cfg->if_handle = -1;
3003                 vf_cfg->pmac_id = -1;
3004         }
3005         return 0;
3006 }
3007
3008 static int be_vf_setup(struct be_adapter *adapter)
3009 {
3010         struct be_vf_cfg *vf_cfg;
3011         u16 def_vlan, lnk_speed;
3012         int status, old_vfs, vf;
3013         struct device *dev = &adapter->pdev->dev;
3014         u32 privileges;
3015
3016         old_vfs = pci_num_vf(adapter->pdev);
3017         if (old_vfs) {
3018                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3019                 if (old_vfs != num_vfs)
3020                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3021                 adapter->num_vfs = old_vfs;
3022         } else {
3023                 if (num_vfs > be_max_vfs(adapter))
3024                         dev_info(dev, "Device supports %d VFs and not %d\n",
3025                                  be_max_vfs(adapter), num_vfs);
3026                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3027                 if (!adapter->num_vfs)
3028                         return 0;
3029         }
3030
3031         status = be_vf_setup_init(adapter);
3032         if (status)
3033                 goto err;
3034
3035         if (old_vfs) {
3036                 for_all_vfs(adapter, vf_cfg, vf) {
3037                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3038                         if (status)
3039                                 goto err;
3040                 }
3041         } else {
3042                 status = be_vfs_if_create(adapter);
3043                 if (status)
3044                         goto err;
3045         }
3046
3047         if (old_vfs) {
3048                 status = be_vfs_mac_query(adapter);
3049                 if (status)
3050                         goto err;
3051         } else {
3052                 status = be_vf_eth_addr_config(adapter);
3053                 if (status)
3054                         goto err;
3055         }
3056
3057         for_all_vfs(adapter, vf_cfg, vf) {
3058                 /* Allow VFs to programs MAC/VLAN filters */
3059                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3060                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3061                         status = be_cmd_set_fn_privileges(adapter,
3062                                                           privileges |
3063                                                           BE_PRIV_FILTMGMT,
3064                                                           vf + 1);
3065                         if (!status)
3066                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3067                                          vf);
3068                 }
3069
3070                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3071                  * Allow full available bandwidth
3072                  */
3073                 if (BE3_chip(adapter) && !old_vfs)
3074                         be_cmd_set_qos(adapter, 1000, vf+1);
3075
3076                 status = be_cmd_link_status_query(adapter, &lnk_speed,
3077                                                   NULL, vf + 1);
3078                 if (!status)
3079                         vf_cfg->tx_rate = lnk_speed;
3080
3081                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
3082                                                vf + 1, vf_cfg->if_handle, NULL);
3083                 if (status)
3084                         goto err;
3085                 vf_cfg->def_vid = def_vlan;
3086
3087                 if (!old_vfs)
3088                         be_cmd_enable_vf(adapter, vf + 1);
3089         }
3090
3091         if (!old_vfs) {
3092                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3093                 if (status) {
3094                         dev_err(dev, "SRIOV enable failed\n");
3095                         adapter->num_vfs = 0;
3096                         goto err;
3097                 }
3098         }
3099         return 0;
3100 err:
3101         dev_err(dev, "VF setup failed\n");
3102         be_vf_clear(adapter);
3103         return status;
3104 }
3105
3106 /* On BE2/BE3 FW does not suggest the supported limits */
3107 static void BEx_get_resources(struct be_adapter *adapter,
3108                               struct be_resources *res)
3109 {
3110         struct pci_dev *pdev = adapter->pdev;
3111         bool use_sriov = false;
3112
3113         if (BE3_chip(adapter) && sriov_want(adapter)) {
3114                 int max_vfs;
3115
3116                 max_vfs = pci_sriov_get_totalvfs(pdev);
3117                 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3118                 use_sriov = res->max_vfs;
3119         }
3120
3121         if (be_physfn(adapter))
3122                 res->max_uc_mac = BE_UC_PMAC_COUNT;
3123         else
3124                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3125
3126         if (adapter->function_mode & FLEX10_MODE)
3127                 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3128         else if (adapter->function_mode & UMC_ENABLED)
3129                 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
3130         else
3131                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3132         res->max_mcast_mac = BE_MAX_MC;
3133
3134         /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
3135         if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
3136             !be_physfn(adapter) || (adapter->port_num > 1))
3137                 res->max_tx_qs = 1;
3138         else
3139                 res->max_tx_qs = BE3_MAX_TX_QS;
3140
3141         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3142             !use_sriov && be_physfn(adapter))
3143                 res->max_rss_qs = (adapter->be3_native) ?
3144                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3145         res->max_rx_qs = res->max_rss_qs + 1;
3146
3147         res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
3148
3149         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3150         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3151                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3152 }
3153
3154 static void be_setup_init(struct be_adapter *adapter)
3155 {
3156         adapter->vlan_prio_bmap = 0xff;
3157         adapter->phy.link_speed = -1;
3158         adapter->if_handle = -1;
3159         adapter->be3_native = false;
3160         adapter->promiscuous = false;
3161         if (be_physfn(adapter))
3162                 adapter->cmd_privileges = MAX_PRIVILEGES;
3163         else
3164                 adapter->cmd_privileges = MIN_PRIVILEGES;
3165 }
3166
3167 static int be_get_resources(struct be_adapter *adapter)
3168 {
3169         struct device *dev = &adapter->pdev->dev;
3170         struct be_resources res = {0};
3171         int status;
3172
3173         if (BEx_chip(adapter)) {
3174                 BEx_get_resources(adapter, &res);
3175                 adapter->res = res;
3176         }
3177
3178         /* For Lancer, SH etc read per-function resource limits from FW.
3179          * GET_FUNC_CONFIG returns per function guaranteed limits.
3180          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3181          */
3182         if (!BEx_chip(adapter)) {
3183                 status = be_cmd_get_func_config(adapter, &res);
3184                 if (status)
3185                         return status;
3186
3187                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3188                 if (be_roce_supported(adapter))
3189                         res.max_evt_qs /= 2;
3190                 adapter->res = res;
3191
3192                 if (be_physfn(adapter)) {
3193                         status = be_cmd_get_profile_config(adapter, &res, 0);
3194                         if (status)
3195                                 return status;
3196                         adapter->res.max_vfs = res.max_vfs;
3197                 }
3198
3199                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3200                          be_max_txqs(adapter), be_max_rxqs(adapter),
3201                          be_max_rss(adapter), be_max_eqs(adapter),
3202                          be_max_vfs(adapter));
3203                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3204                          be_max_uc(adapter), be_max_mc(adapter),
3205                          be_max_vlans(adapter));
3206         }
3207
3208         return 0;
3209 }
3210
3211 /* Routine to query per function resource limits */
3212 static int be_get_config(struct be_adapter *adapter)
3213 {
3214         int status;
3215
3216         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3217                                      &adapter->function_mode,
3218                                      &adapter->function_caps,
3219                                      &adapter->asic_rev);
3220         if (status)
3221                 return status;
3222
3223         status = be_get_resources(adapter);
3224         if (status)
3225                 return status;
3226
3227         /* primary mac needs 1 pmac entry */
3228         adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3229                                    GFP_KERNEL);
3230         if (!adapter->pmac_id)
3231                 return -ENOMEM;
3232
3233         /* Sanitize cfg_num_qs based on HW and platform limits */
3234         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3235
3236         return 0;
3237 }
3238
3239 static int be_mac_setup(struct be_adapter *adapter)
3240 {
3241         u8 mac[ETH_ALEN];
3242         int status;
3243
3244         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3245                 status = be_cmd_get_perm_mac(adapter, mac);
3246                 if (status)
3247                         return status;
3248
3249                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3250                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3251         } else {
3252                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3253                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3254         }
3255
3256         /* For BE3-R VFs, the PF programs the initial MAC address */
3257         if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3258                 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3259                                 &adapter->pmac_id[0], 0);
3260         return 0;
3261 }
3262
3263 static void be_schedule_worker(struct be_adapter *adapter)
3264 {
3265         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3266         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3267 }
3268
3269 static int be_setup_queues(struct be_adapter *adapter)
3270 {
3271         struct net_device *netdev = adapter->netdev;
3272         int status;
3273
3274         status = be_evt_queues_create(adapter);
3275         if (status)
3276                 goto err;
3277
3278         status = be_tx_qs_create(adapter);
3279         if (status)
3280                 goto err;
3281
3282         status = be_rx_cqs_create(adapter);
3283         if (status)
3284                 goto err;
3285
3286         status = be_mcc_queues_create(adapter);
3287         if (status)
3288                 goto err;
3289
3290         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3291         if (status)
3292                 goto err;
3293
3294         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3295         if (status)
3296                 goto err;
3297
3298         return 0;
3299 err:
3300         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3301         return status;
3302 }
3303
3304 int be_update_queues(struct be_adapter *adapter)
3305 {
3306         struct net_device *netdev = adapter->netdev;
3307         int status;
3308
3309         if (netif_running(netdev))
3310                 be_close(netdev);
3311
3312         be_cancel_worker(adapter);
3313
3314         /* If any vectors have been shared with RoCE we cannot re-program
3315          * the MSIx table.
3316          */
3317         if (!adapter->num_msix_roce_vec)
3318                 be_msix_disable(adapter);
3319
3320         be_clear_queues(adapter);
3321
3322         if (!msix_enabled(adapter)) {
3323                 status = be_msix_enable(adapter);
3324                 if (status)
3325                         return status;
3326         }
3327
3328         status = be_setup_queues(adapter);
3329         if (status)
3330                 return status;
3331
3332         be_schedule_worker(adapter);
3333
3334         if (netif_running(netdev))
3335                 status = be_open(netdev);
3336
3337         return status;
3338 }
3339
3340 static int be_setup(struct be_adapter *adapter)
3341 {
3342         struct device *dev = &adapter->pdev->dev;
3343         u32 tx_fc, rx_fc, en_flags;
3344         int status;
3345
3346         be_setup_init(adapter);
3347
3348         if (!lancer_chip(adapter))
3349                 be_cmd_req_native_mode(adapter);
3350
3351         status = be_get_config(adapter);
3352         if (status)
3353                 goto err;
3354
3355         status = be_msix_enable(adapter);
3356         if (status)
3357                 goto err;
3358
3359         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3360                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3361         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3362                 en_flags |= BE_IF_FLAGS_RSS;
3363         en_flags = en_flags & be_if_cap_flags(adapter);
3364         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3365                                   &adapter->if_handle, 0);
3366         if (status)
3367                 goto err;
3368
3369         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3370         rtnl_lock();
3371         status = be_setup_queues(adapter);
3372         rtnl_unlock();
3373         if (status)
3374                 goto err;
3375
3376         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3377         /* In UMC mode FW does not return right privileges.
3378          * Override with correct privilege equivalent to PF.
3379          */
3380         if (be_is_mc(adapter))
3381                 adapter->cmd_privileges = MAX_PRIVILEGES;
3382
3383         status = be_mac_setup(adapter);
3384         if (status)
3385                 goto err;
3386
3387         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3388
3389         if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3390                 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3391                         adapter->fw_ver);
3392                 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3393         }
3394
3395         if (adapter->vlans_added)
3396                 be_vid_config(adapter);
3397
3398         be_set_rx_mode(adapter->netdev);
3399
3400         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3401
3402         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3403                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3404                                         adapter->rx_fc);
3405
3406         if (sriov_want(adapter)) {
3407                 if (be_max_vfs(adapter))
3408                         be_vf_setup(adapter);
3409                 else
3410                         dev_warn(dev, "device doesn't support SRIOV\n");
3411         }
3412
3413         status = be_cmd_get_phy_info(adapter);
3414         if (!status && be_pause_supported(adapter))
3415                 adapter->phy.fc_autoneg = 1;
3416
3417         be_schedule_worker(adapter);
3418         return 0;
3419 err:
3420         be_clear(adapter);
3421         return status;
3422 }
3423
3424 #ifdef CONFIG_NET_POLL_CONTROLLER
3425 static void be_netpoll(struct net_device *netdev)
3426 {
3427         struct be_adapter *adapter = netdev_priv(netdev);
3428         struct be_eq_obj *eqo;
3429         int i;
3430
3431         for_all_evt_queues(adapter, eqo, i) {
3432                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3433                 napi_schedule(&eqo->napi);
3434         }
3435
3436         return;
3437 }
3438 #endif
3439
3440 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3441 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3442
3443 static bool be_flash_redboot(struct be_adapter *adapter,
3444                         const u8 *p, u32 img_start, int image_size,
3445                         int hdr_size)
3446 {
3447         u32 crc_offset;
3448         u8 flashed_crc[4];
3449         int status;
3450
3451         crc_offset = hdr_size + img_start + image_size - 4;
3452
3453         p += crc_offset;
3454
3455         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3456                         (image_size - 4));
3457         if (status) {
3458                 dev_err(&adapter->pdev->dev,
3459                 "could not get crc from flash, not flashing redboot\n");
3460                 return false;
3461         }
3462
3463         /*update redboot only if crc does not match*/
3464         if (!memcmp(flashed_crc, p, 4))
3465                 return false;
3466         else
3467                 return true;
3468 }
3469
3470 static bool phy_flashing_required(struct be_adapter *adapter)
3471 {
3472         return (adapter->phy.phy_type == TN_8022 &&
3473                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3474 }
3475
3476 static bool is_comp_in_ufi(struct be_adapter *adapter,
3477                            struct flash_section_info *fsec, int type)
3478 {
3479         int i = 0, img_type = 0;
3480         struct flash_section_info_g2 *fsec_g2 = NULL;
3481
3482         if (BE2_chip(adapter))
3483                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3484
3485         for (i = 0; i < MAX_FLASH_COMP; i++) {
3486                 if (fsec_g2)
3487                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3488                 else
3489                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3490
3491                 if (img_type == type)
3492                         return true;
3493         }
3494         return false;
3495
3496 }
3497
3498 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3499                                          int header_size,
3500                                          const struct firmware *fw)
3501 {
3502         struct flash_section_info *fsec = NULL;
3503         const u8 *p = fw->data;
3504
3505         p += header_size;
3506         while (p < (fw->data + fw->size)) {
3507                 fsec = (struct flash_section_info *)p;
3508                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3509                         return fsec;
3510                 p += 32;
3511         }
3512         return NULL;
3513 }
3514
3515 static int be_flash(struct be_adapter *adapter, const u8 *img,
3516                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3517 {
3518         u32 total_bytes = 0, flash_op, num_bytes = 0;
3519         int status = 0;
3520         struct be_cmd_write_flashrom *req = flash_cmd->va;
3521
3522         total_bytes = img_size;
3523         while (total_bytes) {
3524                 num_bytes = min_t(u32, 32*1024, total_bytes);
3525
3526                 total_bytes -= num_bytes;
3527
3528                 if (!total_bytes) {
3529                         if (optype == OPTYPE_PHY_FW)
3530                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3531                         else
3532                                 flash_op = FLASHROM_OPER_FLASH;
3533                 } else {
3534                         if (optype == OPTYPE_PHY_FW)
3535                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3536                         else
3537                                 flash_op = FLASHROM_OPER_SAVE;
3538                 }
3539
3540                 memcpy(req->data_buf, img, num_bytes);
3541                 img += num_bytes;
3542                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3543                                                 flash_op, num_bytes);
3544                 if (status) {
3545                         if (status == ILLEGAL_IOCTL_REQ &&
3546                             optype == OPTYPE_PHY_FW)
3547                                 break;
3548                         dev_err(&adapter->pdev->dev,
3549                                 "cmd to write to flash rom failed.\n");
3550                         return status;
3551                 }
3552         }
3553         return 0;
3554 }
3555
3556 /* For BE2, BE3 and BE3-R */
3557 static int be_flash_BEx(struct be_adapter *adapter,
3558                          const struct firmware *fw,
3559                          struct be_dma_mem *flash_cmd,
3560                          int num_of_images)
3561
3562 {
3563         int status = 0, i, filehdr_size = 0;
3564         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3565         const u8 *p = fw->data;
3566         const struct flash_comp *pflashcomp;
3567         int num_comp, redboot;
3568         struct flash_section_info *fsec = NULL;
3569
3570         struct flash_comp gen3_flash_types[] = {
3571                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3572                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3573                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3574                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3575                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3576                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3577                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3578                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3579                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3580                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3581                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3582                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3583                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3584                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3585                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3586                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3587                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3588                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3589                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3590                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3591         };
3592
3593         struct flash_comp gen2_flash_types[] = {
3594                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3595                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3596                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3597                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3598                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3599                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3600                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3601                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3602                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3603                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3604                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3605                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3606                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3607                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3608                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3609                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3610         };
3611
3612         if (BE3_chip(adapter)) {
3613                 pflashcomp = gen3_flash_types;
3614                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3615                 num_comp = ARRAY_SIZE(gen3_flash_types);
3616         } else {
3617                 pflashcomp = gen2_flash_types;
3618                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3619                 num_comp = ARRAY_SIZE(gen2_flash_types);
3620         }
3621
3622         /* Get flash section info*/
3623         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3624         if (!fsec) {
3625                 dev_err(&adapter->pdev->dev,
3626                         "Invalid Cookie. UFI corrupted ?\n");
3627                 return -1;
3628         }
3629         for (i = 0; i < num_comp; i++) {
3630                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3631                         continue;
3632
3633                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3634                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3635                         continue;
3636
3637                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3638                     !phy_flashing_required(adapter))
3639                                 continue;
3640
3641                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3642                         redboot = be_flash_redboot(adapter, fw->data,
3643                                 pflashcomp[i].offset, pflashcomp[i].size,
3644                                 filehdr_size + img_hdrs_size);
3645                         if (!redboot)
3646                                 continue;
3647                 }
3648
3649                 p = fw->data;
3650                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3651                 if (p + pflashcomp[i].size > fw->data + fw->size)
3652                         return -1;
3653
3654                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3655                                         pflashcomp[i].size);
3656                 if (status) {
3657                         dev_err(&adapter->pdev->dev,
3658                                 "Flashing section type %d failed.\n",
3659                                 pflashcomp[i].img_type);
3660                         return status;
3661                 }
3662         }
3663         return 0;
3664 }
3665
3666 static int be_flash_skyhawk(struct be_adapter *adapter,
3667                 const struct firmware *fw,
3668                 struct be_dma_mem *flash_cmd, int num_of_images)
3669 {
3670         int status = 0, i, filehdr_size = 0;
3671         int img_offset, img_size, img_optype, redboot;
3672         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3673         const u8 *p = fw->data;
3674         struct flash_section_info *fsec = NULL;
3675
3676         filehdr_size = sizeof(struct flash_file_hdr_g3);
3677         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3678         if (!fsec) {
3679                 dev_err(&adapter->pdev->dev,
3680                         "Invalid Cookie. UFI corrupted ?\n");
3681                 return -1;
3682         }
3683
3684         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3685                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3686                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3687
3688                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3689                 case IMAGE_FIRMWARE_iSCSI:
3690                         img_optype = OPTYPE_ISCSI_ACTIVE;
3691                         break;
3692                 case IMAGE_BOOT_CODE:
3693                         img_optype = OPTYPE_REDBOOT;
3694                         break;
3695                 case IMAGE_OPTION_ROM_ISCSI:
3696                         img_optype = OPTYPE_BIOS;
3697                         break;
3698                 case IMAGE_OPTION_ROM_PXE:
3699                         img_optype = OPTYPE_PXE_BIOS;
3700                         break;
3701                 case IMAGE_OPTION_ROM_FCoE:
3702                         img_optype = OPTYPE_FCOE_BIOS;
3703                         break;
3704                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3705                         img_optype = OPTYPE_ISCSI_BACKUP;
3706                         break;
3707                 case IMAGE_NCSI:
3708                         img_optype = OPTYPE_NCSI_FW;
3709                         break;
3710                 default:
3711                         continue;
3712                 }
3713
3714                 if (img_optype == OPTYPE_REDBOOT) {
3715                         redboot = be_flash_redboot(adapter, fw->data,
3716                                         img_offset, img_size,
3717                                         filehdr_size + img_hdrs_size);
3718                         if (!redboot)
3719                                 continue;
3720                 }
3721
3722                 p = fw->data;
3723                 p += filehdr_size + img_offset + img_hdrs_size;
3724                 if (p + img_size > fw->data + fw->size)
3725                         return -1;
3726
3727                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3728                 if (status) {
3729                         dev_err(&adapter->pdev->dev,
3730                                 "Flashing section type %d failed.\n",
3731                                 fsec->fsec_entry[i].type);
3732                         return status;
3733                 }
3734         }
3735         return 0;
3736 }
3737
3738 static int lancer_fw_download(struct be_adapter *adapter,
3739                                 const struct firmware *fw)
3740 {
3741 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3742 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3743         struct be_dma_mem flash_cmd;
3744         const u8 *data_ptr = NULL;
3745         u8 *dest_image_ptr = NULL;
3746         size_t image_size = 0;
3747         u32 chunk_size = 0;
3748         u32 data_written = 0;
3749         u32 offset = 0;
3750         int status = 0;
3751         u8 add_status = 0;
3752         u8 change_status;
3753
3754         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3755                 dev_err(&adapter->pdev->dev,
3756                         "FW Image not properly aligned. "
3757                         "Length must be 4 byte aligned.\n");
3758                 status = -EINVAL;
3759                 goto lancer_fw_exit;
3760         }
3761
3762         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3763                                 + LANCER_FW_DOWNLOAD_CHUNK;
3764         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3765                                           &flash_cmd.dma, GFP_KERNEL);
3766         if (!flash_cmd.va) {
3767                 status = -ENOMEM;
3768                 goto lancer_fw_exit;
3769         }
3770
3771         dest_image_ptr = flash_cmd.va +
3772                                 sizeof(struct lancer_cmd_req_write_object);
3773         image_size = fw->size;
3774         data_ptr = fw->data;
3775
3776         while (image_size) {
3777                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3778
3779                 /* Copy the image chunk content. */
3780                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3781
3782                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3783                                                  chunk_size, offset,
3784                                                  LANCER_FW_DOWNLOAD_LOCATION,
3785                                                  &data_written, &change_status,
3786                                                  &add_status);
3787                 if (status)
3788                         break;
3789
3790                 offset += data_written;
3791                 data_ptr += data_written;
3792                 image_size -= data_written;
3793         }
3794
3795         if (!status) {
3796                 /* Commit the FW written */
3797                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3798                                                  0, offset,
3799                                                  LANCER_FW_DOWNLOAD_LOCATION,
3800                                                  &data_written, &change_status,
3801                                                  &add_status);
3802         }
3803
3804         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3805                                 flash_cmd.dma);
3806         if (status) {
3807                 dev_err(&adapter->pdev->dev,
3808                         "Firmware load error. "
3809                         "Status code: 0x%x Additional Status: 0x%x\n",
3810                         status, add_status);
3811                 goto lancer_fw_exit;
3812         }
3813
3814         if (change_status == LANCER_FW_RESET_NEEDED) {
3815                 status = lancer_physdev_ctrl(adapter,
3816                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3817                 if (status) {
3818                         dev_err(&adapter->pdev->dev,
3819                                 "Adapter busy for FW reset.\n"
3820                                 "New FW will not be active.\n");
3821                         goto lancer_fw_exit;
3822                 }
3823         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3824                         dev_err(&adapter->pdev->dev,
3825                                 "System reboot required for new FW"
3826                                 " to be active\n");
3827         }
3828
3829         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3830 lancer_fw_exit:
3831         return status;
3832 }
3833
3834 #define UFI_TYPE2               2
3835 #define UFI_TYPE3               3
3836 #define UFI_TYPE3R              10
3837 #define UFI_TYPE4               4
3838 static int be_get_ufi_type(struct be_adapter *adapter,
3839                            struct flash_file_hdr_g3 *fhdr)
3840 {
3841         if (fhdr == NULL)
3842                 goto be_get_ufi_exit;
3843
3844         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3845                 return UFI_TYPE4;
3846         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3847                 if (fhdr->asic_type_rev == 0x10)
3848                         return UFI_TYPE3R;
3849                 else
3850                         return UFI_TYPE3;
3851         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3852                 return UFI_TYPE2;
3853
3854 be_get_ufi_exit:
3855         dev_err(&adapter->pdev->dev,
3856                 "UFI and Interface are not compatible for flashing\n");
3857         return -1;
3858 }
3859
3860 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3861 {
3862         struct flash_file_hdr_g3 *fhdr3;
3863         struct image_hdr *img_hdr_ptr = NULL;
3864         struct be_dma_mem flash_cmd;
3865         const u8 *p;
3866         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3867
3868         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3869         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3870                                           &flash_cmd.dma, GFP_KERNEL);
3871         if (!flash_cmd.va) {
3872                 status = -ENOMEM;
3873                 goto be_fw_exit;
3874         }
3875
3876         p = fw->data;
3877         fhdr3 = (struct flash_file_hdr_g3 *)p;
3878
3879         ufi_type = be_get_ufi_type(adapter, fhdr3);
3880
3881         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3882         for (i = 0; i < num_imgs; i++) {
3883                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3884                                 (sizeof(struct flash_file_hdr_g3) +
3885                                  i * sizeof(struct image_hdr)));
3886                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3887                         switch (ufi_type) {
3888                         case UFI_TYPE4:
3889                                 status = be_flash_skyhawk(adapter, fw,
3890                                                         &flash_cmd, num_imgs);
3891                                 break;
3892                         case UFI_TYPE3R:
3893                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3894                                                       num_imgs);
3895                                 break;
3896                         case UFI_TYPE3:
3897                                 /* Do not flash this ufi on BE3-R cards */
3898                                 if (adapter->asic_rev < 0x10)
3899                                         status = be_flash_BEx(adapter, fw,
3900                                                               &flash_cmd,
3901                                                               num_imgs);
3902                                 else {
3903                                         status = -1;
3904                                         dev_err(&adapter->pdev->dev,
3905                                                 "Can't load BE3 UFI on BE3R\n");
3906                                 }
3907                         }
3908                 }
3909         }
3910
3911         if (ufi_type == UFI_TYPE2)
3912                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3913         else if (ufi_type == -1)
3914                 status = -1;
3915
3916         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3917                           flash_cmd.dma);
3918         if (status) {
3919                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3920                 goto be_fw_exit;
3921         }
3922
3923         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3924
3925 be_fw_exit:
3926         return status;
3927 }
3928
3929 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3930 {
3931         const struct firmware *fw;
3932         int status;
3933
3934         if (!netif_running(adapter->netdev)) {
3935                 dev_err(&adapter->pdev->dev,
3936                         "Firmware load not allowed (interface is down)\n");
3937                 return -1;
3938         }
3939
3940         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3941         if (status)
3942                 goto fw_exit;
3943
3944         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3945
3946         if (lancer_chip(adapter))
3947                 status = lancer_fw_download(adapter, fw);
3948         else
3949                 status = be_fw_download(adapter, fw);
3950
3951         if (!status)
3952                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3953                                   adapter->fw_on_flash);
3954
3955 fw_exit:
3956         release_firmware(fw);
3957         return status;
3958 }
3959
3960 static int be_ndo_bridge_setlink(struct net_device *dev,
3961                                     struct nlmsghdr *nlh)
3962 {
3963         struct be_adapter *adapter = netdev_priv(dev);
3964         struct nlattr *attr, *br_spec;
3965         int rem;
3966         int status = 0;
3967         u16 mode = 0;
3968
3969         if (!sriov_enabled(adapter))
3970                 return -EOPNOTSUPP;
3971
3972         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3973
3974         nla_for_each_nested(attr, br_spec, rem) {
3975                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3976                         continue;
3977
3978                 mode = nla_get_u16(attr);
3979                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3980                         return -EINVAL;
3981
3982                 status = be_cmd_set_hsw_config(adapter, 0, 0,
3983                                                adapter->if_handle,
3984                                                mode == BRIDGE_MODE_VEPA ?
3985                                                PORT_FWD_TYPE_VEPA :
3986                                                PORT_FWD_TYPE_VEB);
3987                 if (status)
3988                         goto err;
3989
3990                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3991                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3992
3993                 return status;
3994         }
3995 err:
3996         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3997                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3998
3999         return status;
4000 }
4001
4002 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4003                                     struct net_device *dev,
4004                                     u32 filter_mask)
4005 {
4006         struct be_adapter *adapter = netdev_priv(dev);
4007         int status = 0;
4008         u8 hsw_mode;
4009
4010         if (!sriov_enabled(adapter))
4011                 return 0;
4012
4013         /* BE and Lancer chips support VEB mode only */
4014         if (BEx_chip(adapter) || lancer_chip(adapter)) {
4015                 hsw_mode = PORT_FWD_TYPE_VEB;
4016         } else {
4017                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4018                                                adapter->if_handle, &hsw_mode);
4019                 if (status)
4020                         return 0;
4021         }
4022
4023         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4024                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
4025                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4026 }
4027
4028 static const struct net_device_ops be_netdev_ops = {
4029         .ndo_open               = be_open,
4030         .ndo_stop               = be_close,
4031         .ndo_start_xmit         = be_xmit,
4032         .ndo_set_rx_mode        = be_set_rx_mode,
4033         .ndo_set_mac_address    = be_mac_addr_set,
4034         .ndo_change_mtu         = be_change_mtu,
4035         .ndo_get_stats64        = be_get_stats64,
4036         .ndo_validate_addr      = eth_validate_addr,
4037         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
4038         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
4039         .ndo_set_vf_mac         = be_set_vf_mac,
4040         .ndo_set_vf_vlan        = be_set_vf_vlan,
4041         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
4042         .ndo_get_vf_config      = be_get_vf_config,
4043 #ifdef CONFIG_NET_POLL_CONTROLLER
4044         .ndo_poll_controller    = be_netpoll,
4045 #endif
4046         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
4047         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
4048 #ifdef CONFIG_NET_RX_BUSY_POLL
4049         .ndo_busy_poll          = be_busy_poll
4050 #endif
4051 };
4052
4053 static void be_netdev_init(struct net_device *netdev)
4054 {
4055         struct be_adapter *adapter = netdev_priv(netdev);
4056
4057         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4058                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4059                 NETIF_F_HW_VLAN_CTAG_TX;
4060         if (be_multi_rxq(adapter))
4061                 netdev->hw_features |= NETIF_F_RXHASH;
4062
4063         netdev->features |= netdev->hw_features |
4064                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4065
4066         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4067                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4068
4069         netdev->priv_flags |= IFF_UNICAST_FLT;
4070
4071         netdev->flags |= IFF_MULTICAST;
4072
4073         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4074
4075         netdev->netdev_ops = &be_netdev_ops;
4076
4077         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4078 }
4079
4080 static void be_unmap_pci_bars(struct be_adapter *adapter)
4081 {
4082         if (adapter->csr)
4083                 pci_iounmap(adapter->pdev, adapter->csr);
4084         if (adapter->db)
4085                 pci_iounmap(adapter->pdev, adapter->db);
4086 }
4087
4088 static int db_bar(struct be_adapter *adapter)
4089 {
4090         if (lancer_chip(adapter) || !be_physfn(adapter))
4091                 return 0;
4092         else
4093                 return 4;
4094 }
4095
4096 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4097 {
4098         if (skyhawk_chip(adapter)) {
4099                 adapter->roce_db.size = 4096;
4100                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4101                                                               db_bar(adapter));
4102                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4103                                                                db_bar(adapter));
4104         }
4105         return 0;
4106 }
4107
4108 static int be_map_pci_bars(struct be_adapter *adapter)
4109 {
4110         u8 __iomem *addr;
4111
4112         if (BEx_chip(adapter) && be_physfn(adapter)) {
4113                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4114                 if (adapter->csr == NULL)
4115                         return -ENOMEM;
4116         }
4117
4118         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4119         if (addr == NULL)
4120                 goto pci_map_err;
4121         adapter->db = addr;
4122
4123         be_roce_map_pci_bars(adapter);
4124         return 0;
4125
4126 pci_map_err:
4127         be_unmap_pci_bars(adapter);
4128         return -ENOMEM;
4129 }
4130
4131 static void be_ctrl_cleanup(struct be_adapter *adapter)
4132 {
4133         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4134
4135         be_unmap_pci_bars(adapter);
4136
4137         if (mem->va)
4138                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4139                                   mem->dma);
4140
4141         mem = &adapter->rx_filter;
4142         if (mem->va)
4143                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4144                                   mem->dma);
4145 }
4146
4147 static int be_ctrl_init(struct be_adapter *adapter)
4148 {
4149         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4150         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4151         struct be_dma_mem *rx_filter = &adapter->rx_filter;
4152         u32 sli_intf;
4153         int status;
4154
4155         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4156         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4157                                  SLI_INTF_FAMILY_SHIFT;
4158         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4159
4160         status = be_map_pci_bars(adapter);
4161         if (status)
4162                 goto done;
4163
4164         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4165         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4166                                                 mbox_mem_alloc->size,
4167                                                 &mbox_mem_alloc->dma,
4168                                                 GFP_KERNEL);
4169         if (!mbox_mem_alloc->va) {
4170                 status = -ENOMEM;
4171                 goto unmap_pci_bars;
4172         }
4173         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4174         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4175         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4176         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4177
4178         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4179         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4180                                             rx_filter->size, &rx_filter->dma,
4181                                             GFP_KERNEL);
4182         if (rx_filter->va == NULL) {
4183                 status = -ENOMEM;
4184                 goto free_mbox;
4185         }
4186
4187         mutex_init(&adapter->mbox_lock);
4188         spin_lock_init(&adapter->mcc_lock);
4189         spin_lock_init(&adapter->mcc_cq_lock);
4190
4191         init_completion(&adapter->flash_compl);
4192         pci_save_state(adapter->pdev);
4193         return 0;
4194
4195 free_mbox:
4196         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4197                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4198
4199 unmap_pci_bars:
4200         be_unmap_pci_bars(adapter);
4201
4202 done:
4203         return status;
4204 }
4205
4206 static void be_stats_cleanup(struct be_adapter *adapter)
4207 {
4208         struct be_dma_mem *cmd = &adapter->stats_cmd;
4209
4210         if (cmd->va)
4211                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4212                                   cmd->va, cmd->dma);
4213 }
4214
4215 static int be_stats_init(struct be_adapter *adapter)
4216 {
4217         struct be_dma_mem *cmd = &adapter->stats_cmd;
4218
4219         if (lancer_chip(adapter))
4220                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4221         else if (BE2_chip(adapter))
4222                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4223         else if (BE3_chip(adapter))
4224                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4225         else
4226                 /* ALL non-BE ASICs */
4227                 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4228
4229         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4230                                       GFP_KERNEL);
4231         if (cmd->va == NULL)
4232                 return -1;
4233         return 0;
4234 }
4235
4236 static void be_remove(struct pci_dev *pdev)
4237 {
4238         struct be_adapter *adapter = pci_get_drvdata(pdev);
4239
4240         if (!adapter)
4241                 return;
4242
4243         be_roce_dev_remove(adapter);
4244         be_intr_set(adapter, false);
4245
4246         cancel_delayed_work_sync(&adapter->func_recovery_work);
4247
4248         unregister_netdev(adapter->netdev);
4249
4250         be_clear(adapter);
4251
4252         /* tell fw we're done with firing cmds */
4253         be_cmd_fw_clean(adapter);
4254
4255         be_stats_cleanup(adapter);
4256
4257         be_ctrl_cleanup(adapter);
4258
4259         pci_disable_pcie_error_reporting(pdev);
4260
4261         pci_release_regions(pdev);
4262         pci_disable_device(pdev);
4263
4264         free_netdev(adapter->netdev);
4265 }
4266
4267 bool be_is_wol_supported(struct be_adapter *adapter)
4268 {
4269         return ((adapter->wol_cap & BE_WOL_CAP) &&
4270                 !be_is_wol_excluded(adapter)) ? true : false;
4271 }
4272
4273 u32 be_get_fw_log_level(struct be_adapter *adapter)
4274 {
4275         struct be_dma_mem extfat_cmd;
4276         struct be_fat_conf_params *cfgs;
4277         int status;
4278         u32 level = 0;
4279         int j;
4280
4281         if (lancer_chip(adapter))
4282                 return 0;
4283
4284         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4285         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4286         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4287                                              &extfat_cmd.dma);
4288
4289         if (!extfat_cmd.va) {
4290                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4291                         __func__);
4292                 goto err;
4293         }
4294
4295         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4296         if (!status) {
4297                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4298                                                 sizeof(struct be_cmd_resp_hdr));
4299                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4300                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4301                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4302                 }
4303         }
4304         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4305                             extfat_cmd.dma);
4306 err:
4307         return level;
4308 }
4309
4310 static int be_get_initial_config(struct be_adapter *adapter)
4311 {
4312         int status;
4313         u32 level;
4314
4315         status = be_cmd_get_cntl_attributes(adapter);
4316         if (status)
4317                 return status;
4318
4319         status = be_cmd_get_acpi_wol_cap(adapter);
4320         if (status) {
4321                 /* in case of a failure to get wol capabillities
4322                  * check the exclusion list to determine WOL capability */
4323                 if (!be_is_wol_excluded(adapter))
4324                         adapter->wol_cap |= BE_WOL_CAP;
4325         }
4326
4327         if (be_is_wol_supported(adapter))
4328                 adapter->wol = true;
4329
4330         /* Must be a power of 2 or else MODULO will BUG_ON */
4331         adapter->be_get_temp_freq = 64;
4332
4333         level = be_get_fw_log_level(adapter);
4334         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4335
4336         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4337         return 0;
4338 }
4339
4340 static int lancer_recover_func(struct be_adapter *adapter)
4341 {
4342         struct device *dev = &adapter->pdev->dev;
4343         int status;
4344
4345         status = lancer_test_and_set_rdy_state(adapter);
4346         if (status)
4347                 goto err;
4348
4349         if (netif_running(adapter->netdev))
4350                 be_close(adapter->netdev);
4351
4352         be_clear(adapter);
4353
4354         be_clear_all_error(adapter);
4355
4356         status = be_setup(adapter);
4357         if (status)
4358                 goto err;
4359
4360         if (netif_running(adapter->netdev)) {
4361                 status = be_open(adapter->netdev);
4362                 if (status)
4363                         goto err;
4364         }
4365
4366         dev_err(dev, "Error recovery successful\n");
4367         return 0;
4368 err:
4369         if (status == -EAGAIN)
4370                 dev_err(dev, "Waiting for resource provisioning\n");
4371         else
4372                 dev_err(dev, "Error recovery failed\n");
4373
4374         return status;
4375 }
4376
4377 static void be_func_recovery_task(struct work_struct *work)
4378 {
4379         struct be_adapter *adapter =
4380                 container_of(work, struct be_adapter,  func_recovery_work.work);
4381         int status = 0;
4382
4383         be_detect_error(adapter);
4384
4385         if (adapter->hw_error && lancer_chip(adapter)) {
4386
4387                 rtnl_lock();
4388                 netif_device_detach(adapter->netdev);
4389                 rtnl_unlock();
4390
4391                 status = lancer_recover_func(adapter);
4392                 if (!status)
4393                         netif_device_attach(adapter->netdev);
4394         }
4395
4396         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4397          * no need to attempt further recovery.
4398          */
4399         if (!status || status == -EAGAIN)
4400                 schedule_delayed_work(&adapter->func_recovery_work,
4401                                       msecs_to_jiffies(1000));
4402 }
4403
4404 static void be_worker(struct work_struct *work)
4405 {
4406         struct be_adapter *adapter =
4407                 container_of(work, struct be_adapter, work.work);
4408         struct be_rx_obj *rxo;
4409         int i;
4410
4411         /* when interrupts are not yet enabled, just reap any pending
4412         * mcc completions */
4413         if (!netif_running(adapter->netdev)) {
4414                 local_bh_disable();
4415                 be_process_mcc(adapter);
4416                 local_bh_enable();
4417                 goto reschedule;
4418         }
4419
4420         if (!adapter->stats_cmd_sent) {
4421                 if (lancer_chip(adapter))
4422                         lancer_cmd_get_pport_stats(adapter,
4423                                                 &adapter->stats_cmd);
4424                 else
4425                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4426         }
4427
4428         if (be_physfn(adapter) &&
4429             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4430                 be_cmd_get_die_temperature(adapter);
4431
4432         for_all_rx_queues(adapter, rxo, i) {
4433                 /* Replenish RX-queues starved due to memory
4434                  * allocation failures.
4435                  */
4436                 if (rxo->rx_post_starved)
4437                         be_post_rx_frags(rxo, GFP_KERNEL);
4438         }
4439
4440         be_eqd_update(adapter);
4441
4442 reschedule:
4443         adapter->work_counter++;
4444         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4445 }
4446
4447 /* If any VFs are already enabled don't FLR the PF */
4448 static bool be_reset_required(struct be_adapter *adapter)
4449 {
4450         return pci_num_vf(adapter->pdev) ? false : true;
4451 }
4452
4453 static char *mc_name(struct be_adapter *adapter)
4454 {
4455         if (adapter->function_mode & FLEX10_MODE)
4456                 return "FLEX10";
4457         else if (adapter->function_mode & VNIC_MODE)
4458                 return "vNIC";
4459         else if (adapter->function_mode & UMC_ENABLED)
4460                 return "UMC";
4461         else
4462                 return "";
4463 }
4464
4465 static inline char *func_name(struct be_adapter *adapter)
4466 {
4467         return be_physfn(adapter) ? "PF" : "VF";
4468 }
4469
4470 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4471 {
4472         int status = 0;
4473         struct be_adapter *adapter;
4474         struct net_device *netdev;
4475         char port_name;
4476
4477         status = pci_enable_device(pdev);
4478         if (status)
4479                 goto do_none;
4480
4481         status = pci_request_regions(pdev, DRV_NAME);
4482         if (status)
4483                 goto disable_dev;
4484         pci_set_master(pdev);
4485
4486         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4487         if (netdev == NULL) {
4488                 status = -ENOMEM;
4489                 goto rel_reg;
4490         }
4491         adapter = netdev_priv(netdev);
4492         adapter->pdev = pdev;
4493         pci_set_drvdata(pdev, adapter);
4494         adapter->netdev = netdev;
4495         SET_NETDEV_DEV(netdev, &pdev->dev);
4496
4497         status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4498         if (!status) {
4499                 netdev->features |= NETIF_F_HIGHDMA;
4500         } else {
4501                 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4502                 if (status) {
4503                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4504                         goto free_netdev;
4505                 }
4506         }
4507
4508         if (be_physfn(adapter)) {
4509                 status = pci_enable_pcie_error_reporting(pdev);
4510                 if (!status)
4511                         dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4512         }
4513
4514         status = be_ctrl_init(adapter);
4515         if (status)
4516                 goto free_netdev;
4517
4518         /* sync up with fw's ready state */
4519         if (be_physfn(adapter)) {
4520                 status = be_fw_wait_ready(adapter);
4521                 if (status)
4522                         goto ctrl_clean;
4523         }
4524
4525         if (be_reset_required(adapter)) {
4526                 status = be_cmd_reset_function(adapter);
4527                 if (status)
4528                         goto ctrl_clean;
4529
4530                 /* Wait for interrupts to quiesce after an FLR */
4531                 msleep(100);
4532         }
4533
4534         /* Allow interrupts for other ULPs running on NIC function */
4535         be_intr_set(adapter, true);
4536
4537         /* tell fw we're ready to fire cmds */
4538         status = be_cmd_fw_init(adapter);
4539         if (status)
4540                 goto ctrl_clean;
4541
4542         status = be_stats_init(adapter);
4543         if (status)
4544                 goto ctrl_clean;
4545
4546         status = be_get_initial_config(adapter);
4547         if (status)
4548                 goto stats_clean;
4549
4550         INIT_DELAYED_WORK(&adapter->work, be_worker);
4551         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4552         adapter->rx_fc = adapter->tx_fc = true;
4553
4554         status = be_setup(adapter);
4555         if (status)
4556                 goto stats_clean;
4557
4558         be_netdev_init(netdev);
4559         status = register_netdev(netdev);
4560         if (status != 0)
4561                 goto unsetup;
4562
4563         be_roce_dev_add(adapter);
4564
4565         schedule_delayed_work(&adapter->func_recovery_work,
4566                               msecs_to_jiffies(1000));
4567
4568         be_cmd_query_port_name(adapter, &port_name);
4569
4570         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4571                  func_name(adapter), mc_name(adapter), port_name);
4572
4573         return 0;
4574
4575 unsetup:
4576         be_clear(adapter);
4577 stats_clean:
4578         be_stats_cleanup(adapter);
4579 ctrl_clean:
4580         be_ctrl_cleanup(adapter);
4581 free_netdev:
4582         free_netdev(netdev);
4583 rel_reg:
4584         pci_release_regions(pdev);
4585 disable_dev:
4586         pci_disable_device(pdev);
4587 do_none:
4588         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4589         return status;
4590 }
4591
4592 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4593 {
4594         struct be_adapter *adapter = pci_get_drvdata(pdev);
4595         struct net_device *netdev =  adapter->netdev;
4596
4597         if (adapter->wol)
4598                 be_setup_wol(adapter, true);
4599
4600         be_intr_set(adapter, false);
4601         cancel_delayed_work_sync(&adapter->func_recovery_work);
4602
4603         netif_device_detach(netdev);
4604         if (netif_running(netdev)) {
4605                 rtnl_lock();
4606                 be_close(netdev);
4607                 rtnl_unlock();
4608         }
4609         be_clear(adapter);
4610
4611         pci_save_state(pdev);
4612         pci_disable_device(pdev);
4613         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4614         return 0;
4615 }
4616
4617 static int be_resume(struct pci_dev *pdev)
4618 {
4619         int status = 0;
4620         struct be_adapter *adapter = pci_get_drvdata(pdev);
4621         struct net_device *netdev =  adapter->netdev;
4622
4623         netif_device_detach(netdev);
4624
4625         status = pci_enable_device(pdev);
4626         if (status)
4627                 return status;
4628
4629         pci_set_power_state(pdev, PCI_D0);
4630         pci_restore_state(pdev);
4631
4632         status = be_fw_wait_ready(adapter);
4633         if (status)
4634                 return status;
4635
4636         be_intr_set(adapter, true);
4637         /* tell fw we're ready to fire cmds */
4638         status = be_cmd_fw_init(adapter);
4639         if (status)
4640                 return status;
4641
4642         be_setup(adapter);
4643         if (netif_running(netdev)) {
4644                 rtnl_lock();
4645                 be_open(netdev);
4646                 rtnl_unlock();
4647         }
4648
4649         schedule_delayed_work(&adapter->func_recovery_work,
4650                               msecs_to_jiffies(1000));
4651         netif_device_attach(netdev);
4652
4653         if (adapter->wol)
4654                 be_setup_wol(adapter, false);
4655
4656         return 0;
4657 }
4658
4659 /*
4660  * An FLR will stop BE from DMAing any data.
4661  */
4662 static void be_shutdown(struct pci_dev *pdev)
4663 {
4664         struct be_adapter *adapter = pci_get_drvdata(pdev);
4665
4666         if (!adapter)
4667                 return;
4668
4669         cancel_delayed_work_sync(&adapter->work);
4670         cancel_delayed_work_sync(&adapter->func_recovery_work);
4671
4672         netif_device_detach(adapter->netdev);
4673
4674         be_cmd_reset_function(adapter);
4675
4676         pci_disable_device(pdev);
4677 }
4678
4679 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4680                                 pci_channel_state_t state)
4681 {
4682         struct be_adapter *adapter = pci_get_drvdata(pdev);
4683         struct net_device *netdev =  adapter->netdev;
4684
4685         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4686
4687         if (!adapter->eeh_error) {
4688                 adapter->eeh_error = true;
4689
4690                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4691
4692                 rtnl_lock();
4693                 netif_device_detach(netdev);
4694                 if (netif_running(netdev))
4695                         be_close(netdev);
4696                 rtnl_unlock();
4697
4698                 be_clear(adapter);
4699         }
4700
4701         if (state == pci_channel_io_perm_failure)
4702                 return PCI_ERS_RESULT_DISCONNECT;
4703
4704         pci_disable_device(pdev);
4705
4706         /* The error could cause the FW to trigger a flash debug dump.
4707          * Resetting the card while flash dump is in progress
4708          * can cause it not to recover; wait for it to finish.
4709          * Wait only for first function as it is needed only once per
4710          * adapter.
4711          */
4712         if (pdev->devfn == 0)
4713                 ssleep(30);
4714
4715         return PCI_ERS_RESULT_NEED_RESET;
4716 }
4717
4718 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4719 {
4720         struct be_adapter *adapter = pci_get_drvdata(pdev);
4721         int status;
4722
4723         dev_info(&adapter->pdev->dev, "EEH reset\n");
4724
4725         status = pci_enable_device(pdev);
4726         if (status)
4727                 return PCI_ERS_RESULT_DISCONNECT;
4728
4729         pci_set_master(pdev);
4730         pci_set_power_state(pdev, PCI_D0);
4731         pci_restore_state(pdev);
4732
4733         /* Check if card is ok and fw is ready */
4734         dev_info(&adapter->pdev->dev,
4735                  "Waiting for FW to be ready after EEH reset\n");
4736         status = be_fw_wait_ready(adapter);
4737         if (status)
4738                 return PCI_ERS_RESULT_DISCONNECT;
4739
4740         pci_cleanup_aer_uncorrect_error_status(pdev);
4741         be_clear_all_error(adapter);
4742         return PCI_ERS_RESULT_RECOVERED;
4743 }
4744
4745 static void be_eeh_resume(struct pci_dev *pdev)
4746 {
4747         int status = 0;
4748         struct be_adapter *adapter = pci_get_drvdata(pdev);
4749         struct net_device *netdev =  adapter->netdev;
4750
4751         dev_info(&adapter->pdev->dev, "EEH resume\n");
4752
4753         pci_save_state(pdev);
4754
4755         status = be_cmd_reset_function(adapter);
4756         if (status)
4757                 goto err;
4758
4759         /* tell fw we're ready to fire cmds */
4760         status = be_cmd_fw_init(adapter);
4761         if (status)
4762                 goto err;
4763
4764         status = be_setup(adapter);
4765         if (status)
4766                 goto err;
4767
4768         if (netif_running(netdev)) {
4769                 status = be_open(netdev);
4770                 if (status)
4771                         goto err;
4772         }
4773
4774         schedule_delayed_work(&adapter->func_recovery_work,
4775                               msecs_to_jiffies(1000));
4776         netif_device_attach(netdev);
4777         return;
4778 err:
4779         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4780 }
4781
4782 static const struct pci_error_handlers be_eeh_handlers = {
4783         .error_detected = be_eeh_err_detected,
4784         .slot_reset = be_eeh_reset,
4785         .resume = be_eeh_resume,
4786 };
4787
4788 static struct pci_driver be_driver = {
4789         .name = DRV_NAME,
4790         .id_table = be_dev_ids,
4791         .probe = be_probe,
4792         .remove = be_remove,
4793         .suspend = be_suspend,
4794         .resume = be_resume,
4795         .shutdown = be_shutdown,
4796         .err_handler = &be_eeh_handlers
4797 };
4798
4799 static int __init be_init_module(void)
4800 {
4801         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4802             rx_frag_size != 2048) {
4803                 printk(KERN_WARNING DRV_NAME
4804                         " : Module param rx_frag_size must be 2048/4096/8192."
4805                         " Using 2048\n");
4806                 rx_frag_size = 2048;
4807         }
4808
4809         return pci_register_driver(&be_driver);
4810 }
4811 module_init(be_init_module);
4812
4813 static void __exit be_exit_module(void)
4814 {
4815         pci_unregister_driver(&be_driver);
4816 }
4817 module_exit(be_exit_module);