Merge remote-tracking branches 'regulator/fix/88pm800', 'regulator/fix/max8973',...
[linux-drm-fsl-dcu.git] / drivers / net / ethernet / broadcom / genet / bcmgenet.c
1 /*
2  * Broadcom GENET (Gigabit Ethernet) controller driver
3  *
4  * Copyright (c) 2014 Broadcom Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #define pr_fmt(fmt)                             "bcmgenet: " fmt
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/string.h>
20 #include <linux/if_ether.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/pm.h>
27 #include <linux/clk.h>
28 #include <linux/of.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/of_net.h>
32 #include <linux/of_platform.h>
33 #include <net/arp.h>
34
35 #include <linux/mii.h>
36 #include <linux/ethtool.h>
37 #include <linux/netdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/in.h>
42 #include <linux/ip.h>
43 #include <linux/ipv6.h>
44 #include <linux/phy.h>
45 #include <linux/platform_data/bcmgenet.h>
46
47 #include <asm/unaligned.h>
48
49 #include "bcmgenet.h"
50
51 /* Maximum number of hardware queues, downsized if needed */
52 #define GENET_MAX_MQ_CNT        4
53
54 /* Default highest priority queue for multi queue support */
55 #define GENET_Q0_PRIORITY       0
56
57 #define GENET_Q16_RX_BD_CNT     \
58         (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
59 #define GENET_Q16_TX_BD_CNT     \
60         (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
61
62 #define RX_BUF_LENGTH           2048
63 #define SKB_ALIGNMENT           32
64
65 /* Tx/Rx DMA register offset, skip 256 descriptors */
66 #define WORDS_PER_BD(p)         (p->hw_params->words_per_bd)
67 #define DMA_DESC_SIZE           (WORDS_PER_BD(priv) * sizeof(u32))
68
69 #define GENET_TDMA_REG_OFF      (priv->hw_params->tdma_offset + \
70                                 TOTAL_DESC * DMA_DESC_SIZE)
71
72 #define GENET_RDMA_REG_OFF      (priv->hw_params->rdma_offset + \
73                                 TOTAL_DESC * DMA_DESC_SIZE)
74
75 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
76                                              void __iomem *d, u32 value)
77 {
78         __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
79 }
80
81 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
82                                             void __iomem *d)
83 {
84         return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
85 }
86
87 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
88                                     void __iomem *d,
89                                     dma_addr_t addr)
90 {
91         __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
92
93         /* Register writes to GISB bus can take couple hundred nanoseconds
94          * and are done for each packet, save these expensive writes unless
95          * the platform is explicitly configured for 64-bits/LPAE.
96          */
97 #ifdef CONFIG_PHYS_ADDR_T_64BIT
98         if (priv->hw_params->flags & GENET_HAS_40BITS)
99                 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
100 #endif
101 }
102
103 /* Combined address + length/status setter */
104 static inline void dmadesc_set(struct bcmgenet_priv *priv,
105                                void __iomem *d, dma_addr_t addr, u32 val)
106 {
107         dmadesc_set_length_status(priv, d, val);
108         dmadesc_set_addr(priv, d, addr);
109 }
110
111 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
112                                           void __iomem *d)
113 {
114         dma_addr_t addr;
115
116         addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
117
118         /* Register writes to GISB bus can take couple hundred nanoseconds
119          * and are done for each packet, save these expensive writes unless
120          * the platform is explicitly configured for 64-bits/LPAE.
121          */
122 #ifdef CONFIG_PHYS_ADDR_T_64BIT
123         if (priv->hw_params->flags & GENET_HAS_40BITS)
124                 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
125 #endif
126         return addr;
127 }
128
129 #define GENET_VER_FMT   "%1d.%1d EPHY: 0x%04x"
130
131 #define GENET_MSG_DEFAULT       (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
132                                 NETIF_MSG_LINK)
133
134 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
135 {
136         if (GENET_IS_V1(priv))
137                 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
138         else
139                 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
140 }
141
142 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
143 {
144         if (GENET_IS_V1(priv))
145                 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
146         else
147                 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
148 }
149
150 /* These macros are defined to deal with register map change
151  * between GENET1.1 and GENET2. Only those currently being used
152  * by driver are defined.
153  */
154 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
155 {
156         if (GENET_IS_V1(priv))
157                 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
158         else
159                 return __raw_readl(priv->base +
160                                 priv->hw_params->tbuf_offset + TBUF_CTRL);
161 }
162
163 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
164 {
165         if (GENET_IS_V1(priv))
166                 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
167         else
168                 __raw_writel(val, priv->base +
169                                 priv->hw_params->tbuf_offset + TBUF_CTRL);
170 }
171
172 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
173 {
174         if (GENET_IS_V1(priv))
175                 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
176         else
177                 return __raw_readl(priv->base +
178                                 priv->hw_params->tbuf_offset + TBUF_BP_MC);
179 }
180
181 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
182 {
183         if (GENET_IS_V1(priv))
184                 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
185         else
186                 __raw_writel(val, priv->base +
187                                 priv->hw_params->tbuf_offset + TBUF_BP_MC);
188 }
189
190 /* RX/TX DMA register accessors */
191 enum dma_reg {
192         DMA_RING_CFG = 0,
193         DMA_CTRL,
194         DMA_STATUS,
195         DMA_SCB_BURST_SIZE,
196         DMA_ARB_CTRL,
197         DMA_PRIORITY_0,
198         DMA_PRIORITY_1,
199         DMA_PRIORITY_2,
200         DMA_INDEX2RING_0,
201         DMA_INDEX2RING_1,
202         DMA_INDEX2RING_2,
203         DMA_INDEX2RING_3,
204         DMA_INDEX2RING_4,
205         DMA_INDEX2RING_5,
206         DMA_INDEX2RING_6,
207         DMA_INDEX2RING_7,
208 };
209
210 static const u8 bcmgenet_dma_regs_v3plus[] = {
211         [DMA_RING_CFG]          = 0x00,
212         [DMA_CTRL]              = 0x04,
213         [DMA_STATUS]            = 0x08,
214         [DMA_SCB_BURST_SIZE]    = 0x0C,
215         [DMA_ARB_CTRL]          = 0x2C,
216         [DMA_PRIORITY_0]        = 0x30,
217         [DMA_PRIORITY_1]        = 0x34,
218         [DMA_PRIORITY_2]        = 0x38,
219         [DMA_INDEX2RING_0]      = 0x70,
220         [DMA_INDEX2RING_1]      = 0x74,
221         [DMA_INDEX2RING_2]      = 0x78,
222         [DMA_INDEX2RING_3]      = 0x7C,
223         [DMA_INDEX2RING_4]      = 0x80,
224         [DMA_INDEX2RING_5]      = 0x84,
225         [DMA_INDEX2RING_6]      = 0x88,
226         [DMA_INDEX2RING_7]      = 0x8C,
227 };
228
229 static const u8 bcmgenet_dma_regs_v2[] = {
230         [DMA_RING_CFG]          = 0x00,
231         [DMA_CTRL]              = 0x04,
232         [DMA_STATUS]            = 0x08,
233         [DMA_SCB_BURST_SIZE]    = 0x0C,
234         [DMA_ARB_CTRL]          = 0x30,
235         [DMA_PRIORITY_0]        = 0x34,
236         [DMA_PRIORITY_1]        = 0x38,
237         [DMA_PRIORITY_2]        = 0x3C,
238 };
239
240 static const u8 bcmgenet_dma_regs_v1[] = {
241         [DMA_CTRL]              = 0x00,
242         [DMA_STATUS]            = 0x04,
243         [DMA_SCB_BURST_SIZE]    = 0x0C,
244         [DMA_ARB_CTRL]          = 0x30,
245         [DMA_PRIORITY_0]        = 0x34,
246         [DMA_PRIORITY_1]        = 0x38,
247         [DMA_PRIORITY_2]        = 0x3C,
248 };
249
250 /* Set at runtime once bcmgenet version is known */
251 static const u8 *bcmgenet_dma_regs;
252
253 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
254 {
255         return netdev_priv(dev_get_drvdata(dev));
256 }
257
258 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
259                                       enum dma_reg r)
260 {
261         return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
262                         DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
263 }
264
265 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
266                                         u32 val, enum dma_reg r)
267 {
268         __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
269                         DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
270 }
271
272 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
273                                       enum dma_reg r)
274 {
275         return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
276                         DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
277 }
278
279 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
280                                         u32 val, enum dma_reg r)
281 {
282         __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
283                         DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
284 }
285
286 /* RDMA/TDMA ring registers and accessors
287  * we merge the common fields and just prefix with T/D the registers
288  * having different meaning depending on the direction
289  */
290 enum dma_ring_reg {
291         TDMA_READ_PTR = 0,
292         RDMA_WRITE_PTR = TDMA_READ_PTR,
293         TDMA_READ_PTR_HI,
294         RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
295         TDMA_CONS_INDEX,
296         RDMA_PROD_INDEX = TDMA_CONS_INDEX,
297         TDMA_PROD_INDEX,
298         RDMA_CONS_INDEX = TDMA_PROD_INDEX,
299         DMA_RING_BUF_SIZE,
300         DMA_START_ADDR,
301         DMA_START_ADDR_HI,
302         DMA_END_ADDR,
303         DMA_END_ADDR_HI,
304         DMA_MBUF_DONE_THRESH,
305         TDMA_FLOW_PERIOD,
306         RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
307         TDMA_WRITE_PTR,
308         RDMA_READ_PTR = TDMA_WRITE_PTR,
309         TDMA_WRITE_PTR_HI,
310         RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
311 };
312
313 /* GENET v4 supports 40-bits pointer addressing
314  * for obvious reasons the LO and HI word parts
315  * are contiguous, but this offsets the other
316  * registers.
317  */
318 static const u8 genet_dma_ring_regs_v4[] = {
319         [TDMA_READ_PTR]                 = 0x00,
320         [TDMA_READ_PTR_HI]              = 0x04,
321         [TDMA_CONS_INDEX]               = 0x08,
322         [TDMA_PROD_INDEX]               = 0x0C,
323         [DMA_RING_BUF_SIZE]             = 0x10,
324         [DMA_START_ADDR]                = 0x14,
325         [DMA_START_ADDR_HI]             = 0x18,
326         [DMA_END_ADDR]                  = 0x1C,
327         [DMA_END_ADDR_HI]               = 0x20,
328         [DMA_MBUF_DONE_THRESH]          = 0x24,
329         [TDMA_FLOW_PERIOD]              = 0x28,
330         [TDMA_WRITE_PTR]                = 0x2C,
331         [TDMA_WRITE_PTR_HI]             = 0x30,
332 };
333
334 static const u8 genet_dma_ring_regs_v123[] = {
335         [TDMA_READ_PTR]                 = 0x00,
336         [TDMA_CONS_INDEX]               = 0x04,
337         [TDMA_PROD_INDEX]               = 0x08,
338         [DMA_RING_BUF_SIZE]             = 0x0C,
339         [DMA_START_ADDR]                = 0x10,
340         [DMA_END_ADDR]                  = 0x14,
341         [DMA_MBUF_DONE_THRESH]          = 0x18,
342         [TDMA_FLOW_PERIOD]              = 0x1C,
343         [TDMA_WRITE_PTR]                = 0x20,
344 };
345
346 /* Set at runtime once GENET version is known */
347 static const u8 *genet_dma_ring_regs;
348
349 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
350                                            unsigned int ring,
351                                            enum dma_ring_reg r)
352 {
353         return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
354                         (DMA_RING_SIZE * ring) +
355                         genet_dma_ring_regs[r]);
356 }
357
358 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
359                                              unsigned int ring, u32 val,
360                                              enum dma_ring_reg r)
361 {
362         __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
363                         (DMA_RING_SIZE * ring) +
364                         genet_dma_ring_regs[r]);
365 }
366
367 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
368                                            unsigned int ring,
369                                            enum dma_ring_reg r)
370 {
371         return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
372                         (DMA_RING_SIZE * ring) +
373                         genet_dma_ring_regs[r]);
374 }
375
376 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
377                                              unsigned int ring, u32 val,
378                                              enum dma_ring_reg r)
379 {
380         __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
381                         (DMA_RING_SIZE * ring) +
382                         genet_dma_ring_regs[r]);
383 }
384
385 static int bcmgenet_get_settings(struct net_device *dev,
386                                  struct ethtool_cmd *cmd)
387 {
388         struct bcmgenet_priv *priv = netdev_priv(dev);
389
390         if (!netif_running(dev))
391                 return -EINVAL;
392
393         if (!priv->phydev)
394                 return -ENODEV;
395
396         return phy_ethtool_gset(priv->phydev, cmd);
397 }
398
399 static int bcmgenet_set_settings(struct net_device *dev,
400                                  struct ethtool_cmd *cmd)
401 {
402         struct bcmgenet_priv *priv = netdev_priv(dev);
403
404         if (!netif_running(dev))
405                 return -EINVAL;
406
407         if (!priv->phydev)
408                 return -ENODEV;
409
410         return phy_ethtool_sset(priv->phydev, cmd);
411 }
412
413 static int bcmgenet_set_rx_csum(struct net_device *dev,
414                                 netdev_features_t wanted)
415 {
416         struct bcmgenet_priv *priv = netdev_priv(dev);
417         u32 rbuf_chk_ctrl;
418         bool rx_csum_en;
419
420         rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
421
422         rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
423
424         /* enable rx checksumming */
425         if (rx_csum_en)
426                 rbuf_chk_ctrl |= RBUF_RXCHK_EN;
427         else
428                 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
429         priv->desc_rxchk_en = rx_csum_en;
430
431         /* If UniMAC forwards CRC, we need to skip over it to get
432          * a valid CHK bit to be set in the per-packet status word
433         */
434         if (rx_csum_en && priv->crc_fwd_en)
435                 rbuf_chk_ctrl |= RBUF_SKIP_FCS;
436         else
437                 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
438
439         bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
440
441         return 0;
442 }
443
444 static int bcmgenet_set_tx_csum(struct net_device *dev,
445                                 netdev_features_t wanted)
446 {
447         struct bcmgenet_priv *priv = netdev_priv(dev);
448         bool desc_64b_en;
449         u32 tbuf_ctrl, rbuf_ctrl;
450
451         tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
452         rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
453
454         desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
455
456         /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
457         if (desc_64b_en) {
458                 tbuf_ctrl |= RBUF_64B_EN;
459                 rbuf_ctrl |= RBUF_64B_EN;
460         } else {
461                 tbuf_ctrl &= ~RBUF_64B_EN;
462                 rbuf_ctrl &= ~RBUF_64B_EN;
463         }
464         priv->desc_64b_en = desc_64b_en;
465
466         bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
467         bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
468
469         return 0;
470 }
471
472 static int bcmgenet_set_features(struct net_device *dev,
473                                  netdev_features_t features)
474 {
475         netdev_features_t changed = features ^ dev->features;
476         netdev_features_t wanted = dev->wanted_features;
477         int ret = 0;
478
479         if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
480                 ret = bcmgenet_set_tx_csum(dev, wanted);
481         if (changed & (NETIF_F_RXCSUM))
482                 ret = bcmgenet_set_rx_csum(dev, wanted);
483
484         return ret;
485 }
486
487 static u32 bcmgenet_get_msglevel(struct net_device *dev)
488 {
489         struct bcmgenet_priv *priv = netdev_priv(dev);
490
491         return priv->msg_enable;
492 }
493
494 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
495 {
496         struct bcmgenet_priv *priv = netdev_priv(dev);
497
498         priv->msg_enable = level;
499 }
500
501 /* standard ethtool support functions. */
502 enum bcmgenet_stat_type {
503         BCMGENET_STAT_NETDEV = -1,
504         BCMGENET_STAT_MIB_RX,
505         BCMGENET_STAT_MIB_TX,
506         BCMGENET_STAT_RUNT,
507         BCMGENET_STAT_MISC,
508         BCMGENET_STAT_SOFT,
509 };
510
511 struct bcmgenet_stats {
512         char stat_string[ETH_GSTRING_LEN];
513         int stat_sizeof;
514         int stat_offset;
515         enum bcmgenet_stat_type type;
516         /* reg offset from UMAC base for misc counters */
517         u16 reg_offset;
518 };
519
520 #define STAT_NETDEV(m) { \
521         .stat_string = __stringify(m), \
522         .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
523         .stat_offset = offsetof(struct net_device_stats, m), \
524         .type = BCMGENET_STAT_NETDEV, \
525 }
526
527 #define STAT_GENET_MIB(str, m, _type) { \
528         .stat_string = str, \
529         .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
530         .stat_offset = offsetof(struct bcmgenet_priv, m), \
531         .type = _type, \
532 }
533
534 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
535 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
536 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
537 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
538
539 #define STAT_GENET_MISC(str, m, offset) { \
540         .stat_string = str, \
541         .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
542         .stat_offset = offsetof(struct bcmgenet_priv, m), \
543         .type = BCMGENET_STAT_MISC, \
544         .reg_offset = offset, \
545 }
546
547
548 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
549  * between the end of TX stats and the beginning of the RX RUNT
550  */
551 #define BCMGENET_STAT_OFFSET    0xc
552
553 /* Hardware counters must be kept in sync because the order/offset
554  * is important here (order in structure declaration = order in hardware)
555  */
556 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
557         /* general stats */
558         STAT_NETDEV(rx_packets),
559         STAT_NETDEV(tx_packets),
560         STAT_NETDEV(rx_bytes),
561         STAT_NETDEV(tx_bytes),
562         STAT_NETDEV(rx_errors),
563         STAT_NETDEV(tx_errors),
564         STAT_NETDEV(rx_dropped),
565         STAT_NETDEV(tx_dropped),
566         STAT_NETDEV(multicast),
567         /* UniMAC RSV counters */
568         STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
569         STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
570         STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
571         STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
572         STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
573         STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
574         STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
575         STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
576         STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
577         STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
578         STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
579         STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
580         STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
581         STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
582         STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
583         STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
584         STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
585         STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
586         STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
587         STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
588         STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
589         STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
590         STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
591         STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
592         STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
593         STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
594         STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
595         STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
596         STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
597         /* UniMAC TSV counters */
598         STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
599         STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
600         STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
601         STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
602         STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
603         STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
604         STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
605         STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
606         STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
607         STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
608         STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
609         STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
610         STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
611         STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
612         STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
613         STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
614         STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
615         STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
616         STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
617         STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
618         STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
619         STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
620         STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
621         STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
622         STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
623         STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
624         STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
625         STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
626         STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
627         /* UniMAC RUNT counters */
628         STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
629         STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
630         STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
631         STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
632         /* Misc UniMAC counters */
633         STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
634                         UMAC_RBUF_OVFL_CNT),
635         STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
636         STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
637         STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
638         STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
639         STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
640 };
641
642 #define BCMGENET_STATS_LEN      ARRAY_SIZE(bcmgenet_gstrings_stats)
643
644 static void bcmgenet_get_drvinfo(struct net_device *dev,
645                                  struct ethtool_drvinfo *info)
646 {
647         strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
648         strlcpy(info->version, "v2.0", sizeof(info->version));
649         info->n_stats = BCMGENET_STATS_LEN;
650 }
651
652 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
653 {
654         switch (string_set) {
655         case ETH_SS_STATS:
656                 return BCMGENET_STATS_LEN;
657         default:
658                 return -EOPNOTSUPP;
659         }
660 }
661
662 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
663                                  u8 *data)
664 {
665         int i;
666
667         switch (stringset) {
668         case ETH_SS_STATS:
669                 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
670                         memcpy(data + i * ETH_GSTRING_LEN,
671                                bcmgenet_gstrings_stats[i].stat_string,
672                                ETH_GSTRING_LEN);
673                 }
674                 break;
675         }
676 }
677
678 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
679 {
680         int i, j = 0;
681
682         for (i = 0; i < BCMGENET_STATS_LEN; i++) {
683                 const struct bcmgenet_stats *s;
684                 u8 offset = 0;
685                 u32 val = 0;
686                 char *p;
687
688                 s = &bcmgenet_gstrings_stats[i];
689                 switch (s->type) {
690                 case BCMGENET_STAT_NETDEV:
691                 case BCMGENET_STAT_SOFT:
692                         continue;
693                 case BCMGENET_STAT_MIB_RX:
694                 case BCMGENET_STAT_MIB_TX:
695                 case BCMGENET_STAT_RUNT:
696                         if (s->type != BCMGENET_STAT_MIB_RX)
697                                 offset = BCMGENET_STAT_OFFSET;
698                         val = bcmgenet_umac_readl(priv,
699                                                   UMAC_MIB_START + j + offset);
700                         break;
701                 case BCMGENET_STAT_MISC:
702                         val = bcmgenet_umac_readl(priv, s->reg_offset);
703                         /* clear if overflowed */
704                         if (val == ~0)
705                                 bcmgenet_umac_writel(priv, 0, s->reg_offset);
706                         break;
707                 }
708
709                 j += s->stat_sizeof;
710                 p = (char *)priv + s->stat_offset;
711                 *(u32 *)p = val;
712         }
713 }
714
715 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
716                                        struct ethtool_stats *stats,
717                                        u64 *data)
718 {
719         struct bcmgenet_priv *priv = netdev_priv(dev);
720         int i;
721
722         if (netif_running(dev))
723                 bcmgenet_update_mib_counters(priv);
724
725         for (i = 0; i < BCMGENET_STATS_LEN; i++) {
726                 const struct bcmgenet_stats *s;
727                 char *p;
728
729                 s = &bcmgenet_gstrings_stats[i];
730                 if (s->type == BCMGENET_STAT_NETDEV)
731                         p = (char *)&dev->stats;
732                 else
733                         p = (char *)priv;
734                 p += s->stat_offset;
735                 data[i] = *(u32 *)p;
736         }
737 }
738
739 static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
740 {
741         struct bcmgenet_priv *priv = netdev_priv(dev);
742         u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
743         u32 reg;
744
745         if (enable && !priv->clk_eee_enabled) {
746                 clk_prepare_enable(priv->clk_eee);
747                 priv->clk_eee_enabled = true;
748         }
749
750         reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
751         if (enable)
752                 reg |= EEE_EN;
753         else
754                 reg &= ~EEE_EN;
755         bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
756
757         /* Enable EEE and switch to a 27Mhz clock automatically */
758         reg = __raw_readl(priv->base + off);
759         if (enable)
760                 reg |= TBUF_EEE_EN | TBUF_PM_EN;
761         else
762                 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
763         __raw_writel(reg, priv->base + off);
764
765         /* Do the same for thing for RBUF */
766         reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
767         if (enable)
768                 reg |= RBUF_EEE_EN | RBUF_PM_EN;
769         else
770                 reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
771         bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
772
773         if (!enable && priv->clk_eee_enabled) {
774                 clk_disable_unprepare(priv->clk_eee);
775                 priv->clk_eee_enabled = false;
776         }
777
778         priv->eee.eee_enabled = enable;
779         priv->eee.eee_active = enable;
780 }
781
782 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
783 {
784         struct bcmgenet_priv *priv = netdev_priv(dev);
785         struct ethtool_eee *p = &priv->eee;
786
787         if (GENET_IS_V1(priv))
788                 return -EOPNOTSUPP;
789
790         e->eee_enabled = p->eee_enabled;
791         e->eee_active = p->eee_active;
792         e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
793
794         return phy_ethtool_get_eee(priv->phydev, e);
795 }
796
797 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
798 {
799         struct bcmgenet_priv *priv = netdev_priv(dev);
800         struct ethtool_eee *p = &priv->eee;
801         int ret = 0;
802
803         if (GENET_IS_V1(priv))
804                 return -EOPNOTSUPP;
805
806         p->eee_enabled = e->eee_enabled;
807
808         if (!p->eee_enabled) {
809                 bcmgenet_eee_enable_set(dev, false);
810         } else {
811                 ret = phy_init_eee(priv->phydev, 0);
812                 if (ret) {
813                         netif_err(priv, hw, dev, "EEE initialization failed\n");
814                         return ret;
815                 }
816
817                 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
818                 bcmgenet_eee_enable_set(dev, true);
819         }
820
821         return phy_ethtool_set_eee(priv->phydev, e);
822 }
823
824 static int bcmgenet_nway_reset(struct net_device *dev)
825 {
826         struct bcmgenet_priv *priv = netdev_priv(dev);
827
828         return genphy_restart_aneg(priv->phydev);
829 }
830
831 /* standard ethtool support functions. */
832 static struct ethtool_ops bcmgenet_ethtool_ops = {
833         .get_strings            = bcmgenet_get_strings,
834         .get_sset_count         = bcmgenet_get_sset_count,
835         .get_ethtool_stats      = bcmgenet_get_ethtool_stats,
836         .get_settings           = bcmgenet_get_settings,
837         .set_settings           = bcmgenet_set_settings,
838         .get_drvinfo            = bcmgenet_get_drvinfo,
839         .get_link               = ethtool_op_get_link,
840         .get_msglevel           = bcmgenet_get_msglevel,
841         .set_msglevel           = bcmgenet_set_msglevel,
842         .get_wol                = bcmgenet_get_wol,
843         .set_wol                = bcmgenet_set_wol,
844         .get_eee                = bcmgenet_get_eee,
845         .set_eee                = bcmgenet_set_eee,
846         .nway_reset             = bcmgenet_nway_reset,
847 };
848
849 /* Power down the unimac, based on mode. */
850 static int bcmgenet_power_down(struct bcmgenet_priv *priv,
851                                 enum bcmgenet_power_mode mode)
852 {
853         int ret = 0;
854         u32 reg;
855
856         switch (mode) {
857         case GENET_POWER_CABLE_SENSE:
858                 phy_detach(priv->phydev);
859                 break;
860
861         case GENET_POWER_WOL_MAGIC:
862                 ret = bcmgenet_wol_power_down_cfg(priv, mode);
863                 break;
864
865         case GENET_POWER_PASSIVE:
866                 /* Power down LED */
867                 if (priv->hw_params->flags & GENET_HAS_EXT) {
868                         reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
869                         reg |= (EXT_PWR_DOWN_PHY |
870                                 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
871                         bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
872
873                         bcmgenet_phy_power_set(priv->dev, false);
874                 }
875                 break;
876         default:
877                 break;
878         }
879
880         return 0;
881 }
882
883 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
884                               enum bcmgenet_power_mode mode)
885 {
886         u32 reg;
887
888         if (!(priv->hw_params->flags & GENET_HAS_EXT))
889                 return;
890
891         reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
892
893         switch (mode) {
894         case GENET_POWER_PASSIVE:
895                 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
896                                 EXT_PWR_DOWN_BIAS);
897                 /* fallthrough */
898         case GENET_POWER_CABLE_SENSE:
899                 /* enable APD */
900                 reg |= EXT_PWR_DN_EN_LD;
901                 break;
902         case GENET_POWER_WOL_MAGIC:
903                 bcmgenet_wol_power_up_cfg(priv, mode);
904                 return;
905         default:
906                 break;
907         }
908
909         bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
910
911         if (mode == GENET_POWER_PASSIVE)
912                 bcmgenet_mii_reset(priv->dev);
913 }
914
915 /* ioctl handle special commands that are not present in ethtool. */
916 static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
917 {
918         struct bcmgenet_priv *priv = netdev_priv(dev);
919         int val = 0;
920
921         if (!netif_running(dev))
922                 return -EINVAL;
923
924         switch (cmd) {
925         case SIOCGMIIPHY:
926         case SIOCGMIIREG:
927         case SIOCSMIIREG:
928                 if (!priv->phydev)
929                         val = -ENODEV;
930                 else
931                         val = phy_mii_ioctl(priv->phydev, rq, cmd);
932                 break;
933
934         default:
935                 val = -EINVAL;
936                 break;
937         }
938
939         return val;
940 }
941
942 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
943                                          struct bcmgenet_tx_ring *ring)
944 {
945         struct enet_cb *tx_cb_ptr;
946
947         tx_cb_ptr = ring->cbs;
948         tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
949
950         /* Advancing local write pointer */
951         if (ring->write_ptr == ring->end_ptr)
952                 ring->write_ptr = ring->cb_ptr;
953         else
954                 ring->write_ptr++;
955
956         return tx_cb_ptr;
957 }
958
959 /* Simple helper to free a control block's resources */
960 static void bcmgenet_free_cb(struct enet_cb *cb)
961 {
962         dev_kfree_skb_any(cb->skb);
963         cb->skb = NULL;
964         dma_unmap_addr_set(cb, dma_addr, 0);
965 }
966
967 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
968 {
969         bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
970                                  INTRL2_CPU_MASK_SET);
971 }
972
973 static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
974 {
975         bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
976                                  INTRL2_CPU_MASK_CLEAR);
977 }
978
979 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
980 {
981         bcmgenet_intrl2_1_writel(ring->priv,
982                                  1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
983                                  INTRL2_CPU_MASK_SET);
984 }
985
986 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
987 {
988         bcmgenet_intrl2_1_writel(ring->priv,
989                                  1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
990                                  INTRL2_CPU_MASK_CLEAR);
991 }
992
993 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
994 {
995         bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
996                                  INTRL2_CPU_MASK_SET);
997 }
998
999 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
1000 {
1001         bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1002                                  INTRL2_CPU_MASK_CLEAR);
1003 }
1004
1005 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1006 {
1007         bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1008                                  INTRL2_CPU_MASK_CLEAR);
1009 }
1010
1011 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1012 {
1013         bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1014                                  INTRL2_CPU_MASK_SET);
1015 }
1016
1017 /* Unlocked version of the reclaim routine */
1018 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1019                                           struct bcmgenet_tx_ring *ring)
1020 {
1021         struct bcmgenet_priv *priv = netdev_priv(dev);
1022         struct enet_cb *tx_cb_ptr;
1023         struct netdev_queue *txq;
1024         unsigned int pkts_compl = 0;
1025         unsigned int c_index;
1026         unsigned int txbds_ready;
1027         unsigned int txbds_processed = 0;
1028
1029         /* Compute how many buffers are transmitted since last xmit call */
1030         c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
1031         c_index &= DMA_C_INDEX_MASK;
1032
1033         if (likely(c_index >= ring->c_index))
1034                 txbds_ready = c_index - ring->c_index;
1035         else
1036                 txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
1037
1038         netif_dbg(priv, tx_done, dev,
1039                   "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1040                   __func__, ring->index, ring->c_index, c_index, txbds_ready);
1041
1042         /* Reclaim transmitted buffers */
1043         while (txbds_processed < txbds_ready) {
1044                 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
1045                 if (tx_cb_ptr->skb) {
1046                         pkts_compl++;
1047                         dev->stats.tx_packets++;
1048                         dev->stats.tx_bytes += tx_cb_ptr->skb->len;
1049                         dma_unmap_single(&dev->dev,
1050                                          dma_unmap_addr(tx_cb_ptr, dma_addr),
1051                                          tx_cb_ptr->skb->len,
1052                                          DMA_TO_DEVICE);
1053                         bcmgenet_free_cb(tx_cb_ptr);
1054                 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1055                         dev->stats.tx_bytes +=
1056                                 dma_unmap_len(tx_cb_ptr, dma_len);
1057                         dma_unmap_page(&dev->dev,
1058                                        dma_unmap_addr(tx_cb_ptr, dma_addr),
1059                                        dma_unmap_len(tx_cb_ptr, dma_len),
1060                                        DMA_TO_DEVICE);
1061                         dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
1062                 }
1063
1064                 txbds_processed++;
1065                 if (likely(ring->clean_ptr < ring->end_ptr))
1066                         ring->clean_ptr++;
1067                 else
1068                         ring->clean_ptr = ring->cb_ptr;
1069         }
1070
1071         ring->free_bds += txbds_processed;
1072         ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
1073
1074         if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1075                 txq = netdev_get_tx_queue(dev, ring->queue);
1076                 if (netif_tx_queue_stopped(txq))
1077                         netif_tx_wake_queue(txq);
1078         }
1079
1080         return pkts_compl;
1081 }
1082
1083 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1084                                 struct bcmgenet_tx_ring *ring)
1085 {
1086         unsigned int released;
1087         unsigned long flags;
1088
1089         spin_lock_irqsave(&ring->lock, flags);
1090         released = __bcmgenet_tx_reclaim(dev, ring);
1091         spin_unlock_irqrestore(&ring->lock, flags);
1092
1093         return released;
1094 }
1095
1096 static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1097 {
1098         struct bcmgenet_tx_ring *ring =
1099                 container_of(napi, struct bcmgenet_tx_ring, napi);
1100         unsigned int work_done = 0;
1101
1102         work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
1103
1104         if (work_done == 0) {
1105                 napi_complete(napi);
1106                 ring->int_enable(ring);
1107
1108                 return 0;
1109         }
1110
1111         return budget;
1112 }
1113
1114 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1115 {
1116         struct bcmgenet_priv *priv = netdev_priv(dev);
1117         int i;
1118
1119         if (netif_is_multiqueue(dev)) {
1120                 for (i = 0; i < priv->hw_params->tx_queues; i++)
1121                         bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1122         }
1123
1124         bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1125 }
1126
1127 /* Transmits a single SKB (either head of a fragment or a single SKB)
1128  * caller must hold priv->lock
1129  */
1130 static int bcmgenet_xmit_single(struct net_device *dev,
1131                                 struct sk_buff *skb,
1132                                 u16 dma_desc_flags,
1133                                 struct bcmgenet_tx_ring *ring)
1134 {
1135         struct bcmgenet_priv *priv = netdev_priv(dev);
1136         struct device *kdev = &priv->pdev->dev;
1137         struct enet_cb *tx_cb_ptr;
1138         unsigned int skb_len;
1139         dma_addr_t mapping;
1140         u32 length_status;
1141         int ret;
1142
1143         tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1144
1145         if (unlikely(!tx_cb_ptr))
1146                 BUG();
1147
1148         tx_cb_ptr->skb = skb;
1149
1150         skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
1151
1152         mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1153         ret = dma_mapping_error(kdev, mapping);
1154         if (ret) {
1155                 priv->mib.tx_dma_failed++;
1156                 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1157                 dev_kfree_skb(skb);
1158                 return ret;
1159         }
1160
1161         dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1162         dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
1163         length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1164                         (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
1165                         DMA_TX_APPEND_CRC;
1166
1167         if (skb->ip_summed == CHECKSUM_PARTIAL)
1168                 length_status |= DMA_TX_DO_CSUM;
1169
1170         dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1171
1172         return 0;
1173 }
1174
1175 /* Transmit a SKB fragment */
1176 static int bcmgenet_xmit_frag(struct net_device *dev,
1177                               skb_frag_t *frag,
1178                               u16 dma_desc_flags,
1179                               struct bcmgenet_tx_ring *ring)
1180 {
1181         struct bcmgenet_priv *priv = netdev_priv(dev);
1182         struct device *kdev = &priv->pdev->dev;
1183         struct enet_cb *tx_cb_ptr;
1184         dma_addr_t mapping;
1185         int ret;
1186
1187         tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1188
1189         if (unlikely(!tx_cb_ptr))
1190                 BUG();
1191         tx_cb_ptr->skb = NULL;
1192
1193         mapping = skb_frag_dma_map(kdev, frag, 0,
1194                                    skb_frag_size(frag), DMA_TO_DEVICE);
1195         ret = dma_mapping_error(kdev, mapping);
1196         if (ret) {
1197                 priv->mib.tx_dma_failed++;
1198                 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1199                           __func__);
1200                 return ret;
1201         }
1202
1203         dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1204         dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
1205
1206         dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1207                     (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1208                     (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1209
1210         return 0;
1211 }
1212
1213 /* Reallocate the SKB to put enough headroom in front of it and insert
1214  * the transmit checksum offsets in the descriptors
1215  */
1216 static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1217                                             struct sk_buff *skb)
1218 {
1219         struct status_64 *status = NULL;
1220         struct sk_buff *new_skb;
1221         u16 offset;
1222         u8 ip_proto;
1223         u16 ip_ver;
1224         u32 tx_csum_info;
1225
1226         if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1227                 /* If 64 byte status block enabled, must make sure skb has
1228                  * enough headroom for us to insert 64B status block.
1229                  */
1230                 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1231                 dev_kfree_skb(skb);
1232                 if (!new_skb) {
1233                         dev->stats.tx_dropped++;
1234                         return NULL;
1235                 }
1236                 skb = new_skb;
1237         }
1238
1239         skb_push(skb, sizeof(*status));
1240         status = (struct status_64 *)skb->data;
1241
1242         if (skb->ip_summed  == CHECKSUM_PARTIAL) {
1243                 ip_ver = htons(skb->protocol);
1244                 switch (ip_ver) {
1245                 case ETH_P_IP:
1246                         ip_proto = ip_hdr(skb)->protocol;
1247                         break;
1248                 case ETH_P_IPV6:
1249                         ip_proto = ipv6_hdr(skb)->nexthdr;
1250                         break;
1251                 default:
1252                         return skb;
1253                 }
1254
1255                 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1256                 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1257                                 (offset + skb->csum_offset);
1258
1259                 /* Set the length valid bit for TCP and UDP and just set
1260                  * the special UDP flag for IPv4, else just set to 0.
1261                  */
1262                 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1263                         tx_csum_info |= STATUS_TX_CSUM_LV;
1264                         if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1265                                 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1266                 } else {
1267                         tx_csum_info = 0;
1268                 }
1269
1270                 status->tx_csum_info = tx_csum_info;
1271         }
1272
1273         return skb;
1274 }
1275
1276 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1277 {
1278         struct bcmgenet_priv *priv = netdev_priv(dev);
1279         struct bcmgenet_tx_ring *ring = NULL;
1280         struct netdev_queue *txq;
1281         unsigned long flags = 0;
1282         int nr_frags, index;
1283         u16 dma_desc_flags;
1284         int ret;
1285         int i;
1286
1287         index = skb_get_queue_mapping(skb);
1288         /* Mapping strategy:
1289          * queue_mapping = 0, unclassified, packet xmited through ring16
1290          * queue_mapping = 1, goes to ring 0. (highest priority queue
1291          * queue_mapping = 2, goes to ring 1.
1292          * queue_mapping = 3, goes to ring 2.
1293          * queue_mapping = 4, goes to ring 3.
1294          */
1295         if (index == 0)
1296                 index = DESC_INDEX;
1297         else
1298                 index -= 1;
1299
1300         nr_frags = skb_shinfo(skb)->nr_frags;
1301         ring = &priv->tx_rings[index];
1302         txq = netdev_get_tx_queue(dev, ring->queue);
1303
1304         spin_lock_irqsave(&ring->lock, flags);
1305         if (ring->free_bds <= nr_frags + 1) {
1306                 netif_tx_stop_queue(txq);
1307                 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
1308                            __func__, index, ring->queue);
1309                 ret = NETDEV_TX_BUSY;
1310                 goto out;
1311         }
1312
1313         if (skb_padto(skb, ETH_ZLEN)) {
1314                 ret = NETDEV_TX_OK;
1315                 goto out;
1316         }
1317
1318         /* set the SKB transmit checksum */
1319         if (priv->desc_64b_en) {
1320                 skb = bcmgenet_put_tx_csum(dev, skb);
1321                 if (!skb) {
1322                         ret = NETDEV_TX_OK;
1323                         goto out;
1324                 }
1325         }
1326
1327         dma_desc_flags = DMA_SOP;
1328         if (nr_frags == 0)
1329                 dma_desc_flags |= DMA_EOP;
1330
1331         /* Transmit single SKB or head of fragment list */
1332         ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
1333         if (ret) {
1334                 ret = NETDEV_TX_OK;
1335                 goto out;
1336         }
1337
1338         /* xmit fragment */
1339         for (i = 0; i < nr_frags; i++) {
1340                 ret = bcmgenet_xmit_frag(dev,
1341                                          &skb_shinfo(skb)->frags[i],
1342                                          (i == nr_frags - 1) ? DMA_EOP : 0,
1343                                          ring);
1344                 if (ret) {
1345                         ret = NETDEV_TX_OK;
1346                         goto out;
1347                 }
1348         }
1349
1350         skb_tx_timestamp(skb);
1351
1352         /* Decrement total BD count and advance our write pointer */
1353         ring->free_bds -= nr_frags + 1;
1354         ring->prod_index += nr_frags + 1;
1355         ring->prod_index &= DMA_P_INDEX_MASK;
1356
1357         if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1358                 netif_tx_stop_queue(txq);
1359
1360         if (!skb->xmit_more || netif_xmit_stopped(txq))
1361                 /* Packets are ready, update producer index */
1362                 bcmgenet_tdma_ring_writel(priv, ring->index,
1363                                           ring->prod_index, TDMA_PROD_INDEX);
1364 out:
1365         spin_unlock_irqrestore(&ring->lock, flags);
1366
1367         return ret;
1368 }
1369
1370 static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1371                                           struct enet_cb *cb)
1372 {
1373         struct device *kdev = &priv->pdev->dev;
1374         struct sk_buff *skb;
1375         struct sk_buff *rx_skb;
1376         dma_addr_t mapping;
1377
1378         /* Allocate a new Rx skb */
1379         skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
1380         if (!skb) {
1381                 priv->mib.alloc_rx_buff_failed++;
1382                 netif_err(priv, rx_err, priv->dev,
1383                           "%s: Rx skb allocation failed\n", __func__);
1384                 return NULL;
1385         }
1386
1387         /* DMA-map the new Rx skb */
1388         mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
1389                                  DMA_FROM_DEVICE);
1390         if (dma_mapping_error(kdev, mapping)) {
1391                 priv->mib.rx_dma_failed++;
1392                 dev_kfree_skb_any(skb);
1393                 netif_err(priv, rx_err, priv->dev,
1394                           "%s: Rx skb DMA mapping failed\n", __func__);
1395                 return NULL;
1396         }
1397
1398         /* Grab the current Rx skb from the ring and DMA-unmap it */
1399         rx_skb = cb->skb;
1400         if (likely(rx_skb))
1401                 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
1402                                  priv->rx_buf_len, DMA_FROM_DEVICE);
1403
1404         /* Put the new Rx skb on the ring */
1405         cb->skb = skb;
1406         dma_unmap_addr_set(cb, dma_addr, mapping);
1407         dmadesc_set_addr(priv, cb->bd_addr, mapping);
1408
1409         /* Return the current Rx skb to caller */
1410         return rx_skb;
1411 }
1412
1413 /* bcmgenet_desc_rx - descriptor based rx process.
1414  * this could be called from bottom half, or from NAPI polling method.
1415  */
1416 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1417                                      unsigned int budget)
1418 {
1419         struct bcmgenet_priv *priv = ring->priv;
1420         struct net_device *dev = priv->dev;
1421         struct enet_cb *cb;
1422         struct sk_buff *skb;
1423         u32 dma_length_status;
1424         unsigned long dma_flag;
1425         int len;
1426         unsigned int rxpktprocessed = 0, rxpkttoprocess;
1427         unsigned int p_index;
1428         unsigned int discards;
1429         unsigned int chksum_ok = 0;
1430
1431         p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
1432
1433         discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
1434                    DMA_P_INDEX_DISCARD_CNT_MASK;
1435         if (discards > ring->old_discards) {
1436                 discards = discards - ring->old_discards;
1437                 dev->stats.rx_missed_errors += discards;
1438                 dev->stats.rx_errors += discards;
1439                 ring->old_discards += discards;
1440
1441                 /* Clear HW register when we reach 75% of maximum 0xFFFF */
1442                 if (ring->old_discards >= 0xC000) {
1443                         ring->old_discards = 0;
1444                         bcmgenet_rdma_ring_writel(priv, ring->index, 0,
1445                                                   RDMA_PROD_INDEX);
1446                 }
1447         }
1448
1449         p_index &= DMA_P_INDEX_MASK;
1450
1451         if (likely(p_index >= ring->c_index))
1452                 rxpkttoprocess = p_index - ring->c_index;
1453         else
1454                 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
1455                                  p_index;
1456
1457         netif_dbg(priv, rx_status, dev,
1458                   "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1459
1460         while ((rxpktprocessed < rxpkttoprocess) &&
1461                (rxpktprocessed < budget)) {
1462                 cb = &priv->rx_cbs[ring->read_ptr];
1463                 skb = bcmgenet_rx_refill(priv, cb);
1464
1465                 if (unlikely(!skb)) {
1466                         dev->stats.rx_dropped++;
1467                         goto next;
1468                 }
1469
1470                 if (!priv->desc_64b_en) {
1471                         dma_length_status =
1472                                 dmadesc_get_length_status(priv, cb->bd_addr);
1473                 } else {
1474                         struct status_64 *status;
1475
1476                         status = (struct status_64 *)skb->data;
1477                         dma_length_status = status->length_status;
1478                 }
1479
1480                 /* DMA flags and length are still valid no matter how
1481                  * we got the Receive Status Vector (64B RSB or register)
1482                  */
1483                 dma_flag = dma_length_status & 0xffff;
1484                 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1485
1486                 netif_dbg(priv, rx_status, dev,
1487                           "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1488                           __func__, p_index, ring->c_index,
1489                           ring->read_ptr, dma_length_status);
1490
1491                 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1492                         netif_err(priv, rx_status, dev,
1493                                   "dropping fragmented packet!\n");
1494                         dev->stats.rx_errors++;
1495                         dev_kfree_skb_any(skb);
1496                         goto next;
1497                 }
1498
1499                 /* report errors */
1500                 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1501                                                 DMA_RX_OV |
1502                                                 DMA_RX_NO |
1503                                                 DMA_RX_LG |
1504                                                 DMA_RX_RXER))) {
1505                         netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1506                                   (unsigned int)dma_flag);
1507                         if (dma_flag & DMA_RX_CRC_ERROR)
1508                                 dev->stats.rx_crc_errors++;
1509                         if (dma_flag & DMA_RX_OV)
1510                                 dev->stats.rx_over_errors++;
1511                         if (dma_flag & DMA_RX_NO)
1512                                 dev->stats.rx_frame_errors++;
1513                         if (dma_flag & DMA_RX_LG)
1514                                 dev->stats.rx_length_errors++;
1515                         dev->stats.rx_errors++;
1516                         dev_kfree_skb_any(skb);
1517                         goto next;
1518                 } /* error packet */
1519
1520                 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1521                              priv->desc_rxchk_en;
1522
1523                 skb_put(skb, len);
1524                 if (priv->desc_64b_en) {
1525                         skb_pull(skb, 64);
1526                         len -= 64;
1527                 }
1528
1529                 if (likely(chksum_ok))
1530                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1531
1532                 /* remove hardware 2bytes added for IP alignment */
1533                 skb_pull(skb, 2);
1534                 len -= 2;
1535
1536                 if (priv->crc_fwd_en) {
1537                         skb_trim(skb, len - ETH_FCS_LEN);
1538                         len -= ETH_FCS_LEN;
1539                 }
1540
1541                 /*Finish setting up the received SKB and send it to the kernel*/
1542                 skb->protocol = eth_type_trans(skb, priv->dev);
1543                 dev->stats.rx_packets++;
1544                 dev->stats.rx_bytes += len;
1545                 if (dma_flag & DMA_RX_MULT)
1546                         dev->stats.multicast++;
1547
1548                 /* Notify kernel */
1549                 napi_gro_receive(&ring->napi, skb);
1550                 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1551
1552 next:
1553                 rxpktprocessed++;
1554                 if (likely(ring->read_ptr < ring->end_ptr))
1555                         ring->read_ptr++;
1556                 else
1557                         ring->read_ptr = ring->cb_ptr;
1558
1559                 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
1560                 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
1561         }
1562
1563         return rxpktprocessed;
1564 }
1565
1566 /* Rx NAPI polling method */
1567 static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
1568 {
1569         struct bcmgenet_rx_ring *ring = container_of(napi,
1570                         struct bcmgenet_rx_ring, napi);
1571         unsigned int work_done;
1572
1573         work_done = bcmgenet_desc_rx(ring, budget);
1574
1575         if (work_done < budget) {
1576                 napi_complete(napi);
1577                 ring->int_enable(ring);
1578         }
1579
1580         return work_done;
1581 }
1582
1583 /* Assign skb to RX DMA descriptor. */
1584 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1585                                      struct bcmgenet_rx_ring *ring)
1586 {
1587         struct enet_cb *cb;
1588         struct sk_buff *skb;
1589         int i;
1590
1591         netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
1592
1593         /* loop here for each buffer needing assign */
1594         for (i = 0; i < ring->size; i++) {
1595                 cb = ring->cbs + i;
1596                 skb = bcmgenet_rx_refill(priv, cb);
1597                 if (skb)
1598                         dev_kfree_skb_any(skb);
1599                 if (!cb->skb)
1600                         return -ENOMEM;
1601         }
1602
1603         return 0;
1604 }
1605
1606 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1607 {
1608         struct enet_cb *cb;
1609         int i;
1610
1611         for (i = 0; i < priv->num_rx_bds; i++) {
1612                 cb = &priv->rx_cbs[i];
1613
1614                 if (dma_unmap_addr(cb, dma_addr)) {
1615                         dma_unmap_single(&priv->dev->dev,
1616                                          dma_unmap_addr(cb, dma_addr),
1617                                          priv->rx_buf_len, DMA_FROM_DEVICE);
1618                         dma_unmap_addr_set(cb, dma_addr, 0);
1619                 }
1620
1621                 if (cb->skb)
1622                         bcmgenet_free_cb(cb);
1623         }
1624 }
1625
1626 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
1627 {
1628         u32 reg;
1629
1630         reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1631         if (enable)
1632                 reg |= mask;
1633         else
1634                 reg &= ~mask;
1635         bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1636
1637         /* UniMAC stops on a packet boundary, wait for a full-size packet
1638          * to be processed
1639          */
1640         if (enable == 0)
1641                 usleep_range(1000, 2000);
1642 }
1643
1644 static int reset_umac(struct bcmgenet_priv *priv)
1645 {
1646         struct device *kdev = &priv->pdev->dev;
1647         unsigned int timeout = 0;
1648         u32 reg;
1649
1650         /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1651         bcmgenet_rbuf_ctrl_set(priv, 0);
1652         udelay(10);
1653
1654         /* disable MAC while updating its registers */
1655         bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1656
1657         /* issue soft reset, wait for it to complete */
1658         bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1659         while (timeout++ < 1000) {
1660                 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1661                 if (!(reg & CMD_SW_RESET))
1662                         return 0;
1663
1664                 udelay(1);
1665         }
1666
1667         if (timeout == 1000) {
1668                 dev_err(kdev,
1669                         "timeout waiting for MAC to come out of reset\n");
1670                 return -ETIMEDOUT;
1671         }
1672
1673         return 0;
1674 }
1675
1676 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1677 {
1678         /* Mask all interrupts.*/
1679         bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1680         bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1681         bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1682         bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1683         bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1684         bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1685 }
1686
1687 static int init_umac(struct bcmgenet_priv *priv)
1688 {
1689         struct device *kdev = &priv->pdev->dev;
1690         int ret;
1691         u32 reg;
1692         u32 int0_enable = 0;
1693         u32 int1_enable = 0;
1694         int i;
1695
1696         dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1697
1698         ret = reset_umac(priv);
1699         if (ret)
1700                 return ret;
1701
1702         bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1703         /* clear tx/rx counter */
1704         bcmgenet_umac_writel(priv,
1705                              MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
1706                              UMAC_MIB_CTRL);
1707         bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1708
1709         bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1710
1711         /* init rx registers, enable ip header optimization */
1712         reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1713         reg |= RBUF_ALIGN_2B;
1714         bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1715
1716         if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1717                 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1718
1719         bcmgenet_intr_disable(priv);
1720
1721         /* Enable Rx default queue 16 interrupts */
1722         int0_enable |= UMAC_IRQ_RXDMA_DONE;
1723
1724         /* Enable Tx default queue 16 interrupts */
1725         int0_enable |= UMAC_IRQ_TXDMA_DONE;
1726
1727         /* Monitor cable plug/unplugged event for internal PHY */
1728         if (phy_is_internal(priv->phydev)) {
1729                 int0_enable |= UMAC_IRQ_LINK_EVENT;
1730         } else if (priv->ext_phy) {
1731                 int0_enable |= UMAC_IRQ_LINK_EVENT;
1732         } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1733                 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
1734                         int0_enable |= UMAC_IRQ_LINK_EVENT;
1735
1736                 reg = bcmgenet_bp_mc_get(priv);
1737                 reg |= BIT(priv->hw_params->bp_in_en_shift);
1738
1739                 /* bp_mask: back pressure mask */
1740                 if (netif_is_multiqueue(priv->dev))
1741                         reg |= priv->hw_params->bp_in_mask;
1742                 else
1743                         reg &= ~priv->hw_params->bp_in_mask;
1744                 bcmgenet_bp_mc_set(priv, reg);
1745         }
1746
1747         /* Enable MDIO interrupts on GENET v3+ */
1748         if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1749                 int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
1750
1751         /* Enable Rx priority queue interrupts */
1752         for (i = 0; i < priv->hw_params->rx_queues; ++i)
1753                 int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
1754
1755         /* Enable Tx priority queue interrupts */
1756         for (i = 0; i < priv->hw_params->tx_queues; ++i)
1757                 int1_enable |= (1 << i);
1758
1759         bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1760         bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
1761
1762         /* Enable rx/tx engine.*/
1763         dev_dbg(kdev, "done init umac\n");
1764
1765         return 0;
1766 }
1767
1768 /* Initialize a Tx ring along with corresponding hardware registers */
1769 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1770                                   unsigned int index, unsigned int size,
1771                                   unsigned int start_ptr, unsigned int end_ptr)
1772 {
1773         struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1774         u32 words_per_bd = WORDS_PER_BD(priv);
1775         u32 flow_period_val = 0;
1776
1777         spin_lock_init(&ring->lock);
1778         ring->priv = priv;
1779         ring->index = index;
1780         if (index == DESC_INDEX) {
1781                 ring->queue = 0;
1782                 ring->int_enable = bcmgenet_tx_ring16_int_enable;
1783                 ring->int_disable = bcmgenet_tx_ring16_int_disable;
1784         } else {
1785                 ring->queue = index + 1;
1786                 ring->int_enable = bcmgenet_tx_ring_int_enable;
1787                 ring->int_disable = bcmgenet_tx_ring_int_disable;
1788         }
1789         ring->cbs = priv->tx_cbs + start_ptr;
1790         ring->size = size;
1791         ring->clean_ptr = start_ptr;
1792         ring->c_index = 0;
1793         ring->free_bds = size;
1794         ring->write_ptr = start_ptr;
1795         ring->cb_ptr = start_ptr;
1796         ring->end_ptr = end_ptr - 1;
1797         ring->prod_index = 0;
1798
1799         /* Set flow period for ring != 16 */
1800         if (index != DESC_INDEX)
1801                 flow_period_val = ENET_MAX_MTU_SIZE << 16;
1802
1803         bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
1804         bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
1805         bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1806         /* Disable rate control for now */
1807         bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
1808                                   TDMA_FLOW_PERIOD);
1809         bcmgenet_tdma_ring_writel(priv, index,
1810                                   ((size << DMA_RING_SIZE_SHIFT) |
1811                                    RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1812
1813         /* Set start and end address, read and write pointers */
1814         bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1815                                   DMA_START_ADDR);
1816         bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1817                                   TDMA_READ_PTR);
1818         bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1819                                   TDMA_WRITE_PTR);
1820         bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1821                                   DMA_END_ADDR);
1822 }
1823
1824 /* Initialize a RDMA ring */
1825 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
1826                                  unsigned int index, unsigned int size,
1827                                  unsigned int start_ptr, unsigned int end_ptr)
1828 {
1829         struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
1830         u32 words_per_bd = WORDS_PER_BD(priv);
1831         int ret;
1832
1833         ring->priv = priv;
1834         ring->index = index;
1835         if (index == DESC_INDEX) {
1836                 ring->int_enable = bcmgenet_rx_ring16_int_enable;
1837                 ring->int_disable = bcmgenet_rx_ring16_int_disable;
1838         } else {
1839                 ring->int_enable = bcmgenet_rx_ring_int_enable;
1840                 ring->int_disable = bcmgenet_rx_ring_int_disable;
1841         }
1842         ring->cbs = priv->rx_cbs + start_ptr;
1843         ring->size = size;
1844         ring->c_index = 0;
1845         ring->read_ptr = start_ptr;
1846         ring->cb_ptr = start_ptr;
1847         ring->end_ptr = end_ptr - 1;
1848
1849         ret = bcmgenet_alloc_rx_buffers(priv, ring);
1850         if (ret)
1851                 return ret;
1852
1853         bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
1854         bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
1855         bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1856         bcmgenet_rdma_ring_writel(priv, index,
1857                                   ((size << DMA_RING_SIZE_SHIFT) |
1858                                    RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1859         bcmgenet_rdma_ring_writel(priv, index,
1860                                   (DMA_FC_THRESH_LO <<
1861                                    DMA_XOFF_THRESHOLD_SHIFT) |
1862                                    DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
1863
1864         /* Set start and end address, read and write pointers */
1865         bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
1866                                   DMA_START_ADDR);
1867         bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
1868                                   RDMA_READ_PTR);
1869         bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
1870                                   RDMA_WRITE_PTR);
1871         bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1872                                   DMA_END_ADDR);
1873
1874         return ret;
1875 }
1876
1877 static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
1878 {
1879         unsigned int i;
1880         struct bcmgenet_tx_ring *ring;
1881
1882         for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1883                 ring = &priv->tx_rings[i];
1884                 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1885         }
1886
1887         ring = &priv->tx_rings[DESC_INDEX];
1888         netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1889 }
1890
1891 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
1892 {
1893         unsigned int i;
1894         struct bcmgenet_tx_ring *ring;
1895
1896         for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1897                 ring = &priv->tx_rings[i];
1898                 napi_enable(&ring->napi);
1899         }
1900
1901         ring = &priv->tx_rings[DESC_INDEX];
1902         napi_enable(&ring->napi);
1903 }
1904
1905 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
1906 {
1907         unsigned int i;
1908         struct bcmgenet_tx_ring *ring;
1909
1910         for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1911                 ring = &priv->tx_rings[i];
1912                 napi_disable(&ring->napi);
1913         }
1914
1915         ring = &priv->tx_rings[DESC_INDEX];
1916         napi_disable(&ring->napi);
1917 }
1918
1919 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
1920 {
1921         unsigned int i;
1922         struct bcmgenet_tx_ring *ring;
1923
1924         for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1925                 ring = &priv->tx_rings[i];
1926                 netif_napi_del(&ring->napi);
1927         }
1928
1929         ring = &priv->tx_rings[DESC_INDEX];
1930         netif_napi_del(&ring->napi);
1931 }
1932
1933 /* Initialize Tx queues
1934  *
1935  * Queues 0-3 are priority-based, each one has 32 descriptors,
1936  * with queue 0 being the highest priority queue.
1937  *
1938  * Queue 16 is the default Tx queue with
1939  * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
1940  *
1941  * The transmit control block pool is then partitioned as follows:
1942  * - Tx queue 0 uses tx_cbs[0..31]
1943  * - Tx queue 1 uses tx_cbs[32..63]
1944  * - Tx queue 2 uses tx_cbs[64..95]
1945  * - Tx queue 3 uses tx_cbs[96..127]
1946  * - Tx queue 16 uses tx_cbs[128..255]
1947  */
1948 static void bcmgenet_init_tx_queues(struct net_device *dev)
1949 {
1950         struct bcmgenet_priv *priv = netdev_priv(dev);
1951         u32 i, dma_enable;
1952         u32 dma_ctrl, ring_cfg;
1953         u32 dma_priority[3] = {0, 0, 0};
1954
1955         dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
1956         dma_enable = dma_ctrl & DMA_EN;
1957         dma_ctrl &= ~DMA_EN;
1958         bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
1959
1960         dma_ctrl = 0;
1961         ring_cfg = 0;
1962
1963         /* Enable strict priority arbiter mode */
1964         bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
1965
1966         /* Initialize Tx priority queues */
1967         for (i = 0; i < priv->hw_params->tx_queues; i++) {
1968                 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
1969                                       i * priv->hw_params->tx_bds_per_q,
1970                                       (i + 1) * priv->hw_params->tx_bds_per_q);
1971                 ring_cfg |= (1 << i);
1972                 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
1973                 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
1974                         ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
1975         }
1976
1977         /* Initialize Tx default queue 16 */
1978         bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
1979                               priv->hw_params->tx_queues *
1980                               priv->hw_params->tx_bds_per_q,
1981                               TOTAL_DESC);
1982         ring_cfg |= (1 << DESC_INDEX);
1983         dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
1984         dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
1985                 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
1986                  DMA_PRIO_REG_SHIFT(DESC_INDEX));
1987
1988         /* Set Tx queue priorities */
1989         bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
1990         bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
1991         bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
1992
1993         /* Initialize Tx NAPI */
1994         bcmgenet_init_tx_napi(priv);
1995
1996         /* Enable Tx queues */
1997         bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
1998
1999         /* Enable Tx DMA */
2000         if (dma_enable)
2001                 dma_ctrl |= DMA_EN;
2002         bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2003 }
2004
2005 static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
2006 {
2007         unsigned int i;
2008         struct bcmgenet_rx_ring *ring;
2009
2010         for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2011                 ring = &priv->rx_rings[i];
2012                 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2013         }
2014
2015         ring = &priv->rx_rings[DESC_INDEX];
2016         netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2017 }
2018
2019 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2020 {
2021         unsigned int i;
2022         struct bcmgenet_rx_ring *ring;
2023
2024         for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2025                 ring = &priv->rx_rings[i];
2026                 napi_enable(&ring->napi);
2027         }
2028
2029         ring = &priv->rx_rings[DESC_INDEX];
2030         napi_enable(&ring->napi);
2031 }
2032
2033 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2034 {
2035         unsigned int i;
2036         struct bcmgenet_rx_ring *ring;
2037
2038         for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2039                 ring = &priv->rx_rings[i];
2040                 napi_disable(&ring->napi);
2041         }
2042
2043         ring = &priv->rx_rings[DESC_INDEX];
2044         napi_disable(&ring->napi);
2045 }
2046
2047 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2048 {
2049         unsigned int i;
2050         struct bcmgenet_rx_ring *ring;
2051
2052         for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2053                 ring = &priv->rx_rings[i];
2054                 netif_napi_del(&ring->napi);
2055         }
2056
2057         ring = &priv->rx_rings[DESC_INDEX];
2058         netif_napi_del(&ring->napi);
2059 }
2060
2061 /* Initialize Rx queues
2062  *
2063  * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2064  * used to direct traffic to these queues.
2065  *
2066  * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2067  */
2068 static int bcmgenet_init_rx_queues(struct net_device *dev)
2069 {
2070         struct bcmgenet_priv *priv = netdev_priv(dev);
2071         u32 i;
2072         u32 dma_enable;
2073         u32 dma_ctrl;
2074         u32 ring_cfg;
2075         int ret;
2076
2077         dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
2078         dma_enable = dma_ctrl & DMA_EN;
2079         dma_ctrl &= ~DMA_EN;
2080         bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2081
2082         dma_ctrl = 0;
2083         ring_cfg = 0;
2084
2085         /* Initialize Rx priority queues */
2086         for (i = 0; i < priv->hw_params->rx_queues; i++) {
2087                 ret = bcmgenet_init_rx_ring(priv, i,
2088                                             priv->hw_params->rx_bds_per_q,
2089                                             i * priv->hw_params->rx_bds_per_q,
2090                                             (i + 1) *
2091                                             priv->hw_params->rx_bds_per_q);
2092                 if (ret)
2093                         return ret;
2094
2095                 ring_cfg |= (1 << i);
2096                 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2097         }
2098
2099         /* Initialize Rx default queue 16 */
2100         ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
2101                                     priv->hw_params->rx_queues *
2102                                     priv->hw_params->rx_bds_per_q,
2103                                     TOTAL_DESC);
2104         if (ret)
2105                 return ret;
2106
2107         ring_cfg |= (1 << DESC_INDEX);
2108         dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2109
2110         /* Initialize Rx NAPI */
2111         bcmgenet_init_rx_napi(priv);
2112
2113         /* Enable rings */
2114         bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
2115
2116         /* Configure ring as descriptor ring and re-enable DMA if enabled */
2117         if (dma_enable)
2118                 dma_ctrl |= DMA_EN;
2119         bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2120
2121         return 0;
2122 }
2123
2124 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2125 {
2126         int ret = 0;
2127         int timeout = 0;
2128         u32 reg;
2129
2130         /* Disable TDMA to stop add more frames in TX DMA */
2131         reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2132         reg &= ~DMA_EN;
2133         bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2134
2135         /* Check TDMA status register to confirm TDMA is disabled */
2136         while (timeout++ < DMA_TIMEOUT_VAL) {
2137                 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2138                 if (reg & DMA_DISABLED)
2139                         break;
2140
2141                 udelay(1);
2142         }
2143
2144         if (timeout == DMA_TIMEOUT_VAL) {
2145                 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2146                 ret = -ETIMEDOUT;
2147         }
2148
2149         /* Wait 10ms for packet drain in both tx and rx dma */
2150         usleep_range(10000, 20000);
2151
2152         /* Disable RDMA */
2153         reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2154         reg &= ~DMA_EN;
2155         bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2156
2157         timeout = 0;
2158         /* Check RDMA status register to confirm RDMA is disabled */
2159         while (timeout++ < DMA_TIMEOUT_VAL) {
2160                 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2161                 if (reg & DMA_DISABLED)
2162                         break;
2163
2164                 udelay(1);
2165         }
2166
2167         if (timeout == DMA_TIMEOUT_VAL) {
2168                 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
2169                 ret = -ETIMEDOUT;
2170         }
2171
2172         return ret;
2173 }
2174
2175 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2176 {
2177         int i;
2178
2179         bcmgenet_fini_rx_napi(priv);
2180         bcmgenet_fini_tx_napi(priv);
2181
2182         /* disable DMA */
2183         bcmgenet_dma_teardown(priv);
2184
2185         for (i = 0; i < priv->num_tx_bds; i++) {
2186                 if (priv->tx_cbs[i].skb != NULL) {
2187                         dev_kfree_skb(priv->tx_cbs[i].skb);
2188                         priv->tx_cbs[i].skb = NULL;
2189                 }
2190         }
2191
2192         bcmgenet_free_rx_buffers(priv);
2193         kfree(priv->rx_cbs);
2194         kfree(priv->tx_cbs);
2195 }
2196
2197 /* init_edma: Initialize DMA control register */
2198 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2199 {
2200         int ret;
2201         unsigned int i;
2202         struct enet_cb *cb;
2203
2204         netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2205
2206         /* Initialize common Rx ring structures */
2207         priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
2208         priv->num_rx_bds = TOTAL_DESC;
2209         priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
2210                                GFP_KERNEL);
2211         if (!priv->rx_cbs)
2212                 return -ENOMEM;
2213
2214         for (i = 0; i < priv->num_rx_bds; i++) {
2215                 cb = priv->rx_cbs + i;
2216                 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
2217         }
2218
2219         /* Initialize common TX ring structures */
2220         priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
2221         priv->num_tx_bds = TOTAL_DESC;
2222         priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
2223                                GFP_KERNEL);
2224         if (!priv->tx_cbs) {
2225                 kfree(priv->rx_cbs);
2226                 return -ENOMEM;
2227         }
2228
2229         for (i = 0; i < priv->num_tx_bds; i++) {
2230                 cb = priv->tx_cbs + i;
2231                 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
2232         }
2233
2234         /* Init rDma */
2235         bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2236
2237         /* Initialize Rx queues */
2238         ret = bcmgenet_init_rx_queues(priv->dev);
2239         if (ret) {
2240                 netdev_err(priv->dev, "failed to initialize Rx queues\n");
2241                 bcmgenet_free_rx_buffers(priv);
2242                 kfree(priv->rx_cbs);
2243                 kfree(priv->tx_cbs);
2244                 return ret;
2245         }
2246
2247         /* Init tDma */
2248         bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2249
2250         /* Initialize Tx queues */
2251         bcmgenet_init_tx_queues(priv->dev);
2252
2253         return 0;
2254 }
2255
2256 /* Interrupt bottom half */
2257 static void bcmgenet_irq_task(struct work_struct *work)
2258 {
2259         struct bcmgenet_priv *priv = container_of(
2260                         work, struct bcmgenet_priv, bcmgenet_irq_work);
2261
2262         netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2263
2264         if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
2265                 priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
2266                 netif_dbg(priv, wol, priv->dev,
2267                           "magic packet detected, waking up\n");
2268                 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2269         }
2270
2271         /* Link UP/DOWN event */
2272         if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2273             (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) {
2274                 phy_mac_interrupt(priv->phydev,
2275                                   !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
2276                 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
2277         }
2278 }
2279
2280 /* bcmgenet_isr1: handle Rx and Tx priority queues */
2281 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2282 {
2283         struct bcmgenet_priv *priv = dev_id;
2284         struct bcmgenet_rx_ring *rx_ring;
2285         struct bcmgenet_tx_ring *tx_ring;
2286         unsigned int index;
2287
2288         /* Save irq status for bottom-half processing. */
2289         priv->irq1_stat =
2290                 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2291                 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2292
2293         /* clear interrupts */
2294         bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2295
2296         netif_dbg(priv, intr, priv->dev,
2297                   "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2298
2299         /* Check Rx priority queue interrupts */
2300         for (index = 0; index < priv->hw_params->rx_queues; index++) {
2301                 if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
2302                         continue;
2303
2304                 rx_ring = &priv->rx_rings[index];
2305
2306                 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2307                         rx_ring->int_disable(rx_ring);
2308                         __napi_schedule(&rx_ring->napi);
2309                 }
2310         }
2311
2312         /* Check Tx priority queue interrupts */
2313         for (index = 0; index < priv->hw_params->tx_queues; index++) {
2314                 if (!(priv->irq1_stat & BIT(index)))
2315                         continue;
2316
2317                 tx_ring = &priv->tx_rings[index];
2318
2319                 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2320                         tx_ring->int_disable(tx_ring);
2321                         __napi_schedule(&tx_ring->napi);
2322                 }
2323         }
2324
2325         return IRQ_HANDLED;
2326 }
2327
2328 /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
2329 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2330 {
2331         struct bcmgenet_priv *priv = dev_id;
2332         struct bcmgenet_rx_ring *rx_ring;
2333         struct bcmgenet_tx_ring *tx_ring;
2334
2335         /* Save irq status for bottom-half processing. */
2336         priv->irq0_stat =
2337                 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2338                 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2339
2340         /* clear interrupts */
2341         bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
2342
2343         netif_dbg(priv, intr, priv->dev,
2344                   "IRQ=0x%x\n", priv->irq0_stat);
2345
2346         if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
2347                 rx_ring = &priv->rx_rings[DESC_INDEX];
2348
2349                 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2350                         rx_ring->int_disable(rx_ring);
2351                         __napi_schedule(&rx_ring->napi);
2352                 }
2353         }
2354
2355         if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
2356                 tx_ring = &priv->tx_rings[DESC_INDEX];
2357
2358                 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2359                         tx_ring->int_disable(tx_ring);
2360                         __napi_schedule(&tx_ring->napi);
2361                 }
2362         }
2363
2364         if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2365                                 UMAC_IRQ_PHY_DET_F |
2366                                 UMAC_IRQ_LINK_EVENT |
2367                                 UMAC_IRQ_HFB_SM |
2368                                 UMAC_IRQ_HFB_MM |
2369                                 UMAC_IRQ_MPD_R)) {
2370                 /* all other interested interrupts handled in bottom half */
2371                 schedule_work(&priv->bcmgenet_irq_work);
2372         }
2373
2374         if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2375             priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2376                 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2377                 wake_up(&priv->wq);
2378         }
2379
2380         return IRQ_HANDLED;
2381 }
2382
2383 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2384 {
2385         struct bcmgenet_priv *priv = dev_id;
2386
2387         pm_wakeup_event(&priv->pdev->dev, 0);
2388
2389         return IRQ_HANDLED;
2390 }
2391
2392 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2393 {
2394         u32 reg;
2395
2396         reg = bcmgenet_rbuf_ctrl_get(priv);
2397         reg |= BIT(1);
2398         bcmgenet_rbuf_ctrl_set(priv, reg);
2399         udelay(10);
2400
2401         reg &= ~BIT(1);
2402         bcmgenet_rbuf_ctrl_set(priv, reg);
2403         udelay(10);
2404 }
2405
2406 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
2407                                  unsigned char *addr)
2408 {
2409         bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2410                         (addr[2] << 8) | addr[3], UMAC_MAC0);
2411         bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2412 }
2413
2414 /* Returns a reusable dma control register value */
2415 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2416 {
2417         u32 reg;
2418         u32 dma_ctrl;
2419
2420         /* disable DMA */
2421         dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2422         reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2423         reg &= ~dma_ctrl;
2424         bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2425
2426         reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2427         reg &= ~dma_ctrl;
2428         bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2429
2430         bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2431         udelay(10);
2432         bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2433
2434         return dma_ctrl;
2435 }
2436
2437 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2438 {
2439         u32 reg;
2440
2441         reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2442         reg |= dma_ctrl;
2443         bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2444
2445         reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2446         reg |= dma_ctrl;
2447         bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2448 }
2449
2450 static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
2451                                            u32 f_index)
2452 {
2453         u32 offset;
2454         u32 reg;
2455
2456         offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
2457         reg = bcmgenet_hfb_reg_readl(priv, offset);
2458         return !!(reg & (1 << (f_index % 32)));
2459 }
2460
2461 static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
2462 {
2463         u32 offset;
2464         u32 reg;
2465
2466         offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
2467         reg = bcmgenet_hfb_reg_readl(priv, offset);
2468         reg |= (1 << (f_index % 32));
2469         bcmgenet_hfb_reg_writel(priv, reg, offset);
2470 }
2471
2472 static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
2473                                                      u32 f_index, u32 rx_queue)
2474 {
2475         u32 offset;
2476         u32 reg;
2477
2478         offset = f_index / 8;
2479         reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
2480         reg &= ~(0xF << (4 * (f_index % 8)));
2481         reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
2482         bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
2483 }
2484
2485 static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
2486                                            u32 f_index, u32 f_length)
2487 {
2488         u32 offset;
2489         u32 reg;
2490
2491         offset = HFB_FLT_LEN_V3PLUS +
2492                  ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
2493                  sizeof(u32);
2494         reg = bcmgenet_hfb_reg_readl(priv, offset);
2495         reg &= ~(0xFF << (8 * (f_index % 4)));
2496         reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
2497         bcmgenet_hfb_reg_writel(priv, reg, offset);
2498 }
2499
2500 static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
2501 {
2502         u32 f_index;
2503
2504         for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++)
2505                 if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
2506                         return f_index;
2507
2508         return -ENOMEM;
2509 }
2510
2511 /* bcmgenet_hfb_add_filter
2512  *
2513  * Add new filter to Hardware Filter Block to match and direct Rx traffic to
2514  * desired Rx queue.
2515  *
2516  * f_data is an array of unsigned 32-bit integers where each 32-bit integer
2517  * provides filter data for 2 bytes (4 nibbles) of Rx frame:
2518  *
2519  * bits 31:20 - unused
2520  * bit  19    - nibble 0 match enable
2521  * bit  18    - nibble 1 match enable
2522  * bit  17    - nibble 2 match enable
2523  * bit  16    - nibble 3 match enable
2524  * bits 15:12 - nibble 0 data
2525  * bits 11:8  - nibble 1 data
2526  * bits 7:4   - nibble 2 data
2527  * bits 3:0   - nibble 3 data
2528  *
2529  * Example:
2530  * In order to match:
2531  * - Ethernet frame type = 0x0800 (IP)
2532  * - IP version field = 4
2533  * - IP protocol field = 0x11 (UDP)
2534  *
2535  * The following filter is needed:
2536  * u32 hfb_filter_ipv4_udp[] = {
2537  *   Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2538  *   Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
2539  *   Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
2540  * };
2541  *
2542  * To add the filter to HFB and direct the traffic to Rx queue 0, call:
2543  * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
2544  *                         ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
2545  */
2546 int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
2547                             u32 f_length, u32 rx_queue)
2548 {
2549         int f_index;
2550         u32 i;
2551
2552         f_index = bcmgenet_hfb_find_unused_filter(priv);
2553         if (f_index < 0)
2554                 return -ENOMEM;
2555
2556         if (f_length > priv->hw_params->hfb_filter_size)
2557                 return -EINVAL;
2558
2559         for (i = 0; i < f_length; i++)
2560                 bcmgenet_hfb_writel(priv, f_data[i],
2561                         (f_index * priv->hw_params->hfb_filter_size + i) *
2562                         sizeof(u32));
2563
2564         bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
2565         bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
2566         bcmgenet_hfb_enable_filter(priv, f_index);
2567         bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL);
2568
2569         return 0;
2570 }
2571
2572 /* bcmgenet_hfb_clear
2573  *
2574  * Clear Hardware Filter Block and disable all filtering.
2575  */
2576 static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
2577 {
2578         u32 i;
2579
2580         bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
2581         bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
2582         bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
2583
2584         for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
2585                 bcmgenet_rdma_writel(priv, 0x0, i);
2586
2587         for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
2588                 bcmgenet_hfb_reg_writel(priv, 0x0,
2589                                         HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
2590
2591         for (i = 0; i < priv->hw_params->hfb_filter_cnt *
2592                         priv->hw_params->hfb_filter_size; i++)
2593                 bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
2594 }
2595
2596 static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
2597 {
2598         if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
2599                 return;
2600
2601         bcmgenet_hfb_clear(priv);
2602 }
2603
2604 static void bcmgenet_netif_start(struct net_device *dev)
2605 {
2606         struct bcmgenet_priv *priv = netdev_priv(dev);
2607
2608         /* Start the network engine */
2609         bcmgenet_enable_rx_napi(priv);
2610         bcmgenet_enable_tx_napi(priv);
2611
2612         umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2613
2614         netif_tx_start_all_queues(dev);
2615
2616         phy_start(priv->phydev);
2617 }
2618
2619 static int bcmgenet_open(struct net_device *dev)
2620 {
2621         struct bcmgenet_priv *priv = netdev_priv(dev);
2622         unsigned long dma_ctrl;
2623         u32 reg;
2624         int ret;
2625
2626         netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2627
2628         /* Turn on the clock */
2629         if (!IS_ERR(priv->clk))
2630                 clk_prepare_enable(priv->clk);
2631
2632         /* If this is an internal GPHY, power it back on now, before UniMAC is
2633          * brought out of reset as absolutely no UniMAC activity is allowed
2634          */
2635         if (phy_is_internal(priv->phydev))
2636                 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2637
2638         /* take MAC out of reset */
2639         bcmgenet_umac_reset(priv);
2640
2641         ret = init_umac(priv);
2642         if (ret)
2643                 goto err_clk_disable;
2644
2645         /* disable ethernet MAC while updating its registers */
2646         umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2647
2648         /* Make sure we reflect the value of CRC_CMD_FWD */
2649         reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2650         priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2651
2652         bcmgenet_set_hw_addr(priv, dev->dev_addr);
2653
2654         if (phy_is_internal(priv->phydev)) {
2655                 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2656                 reg |= EXT_ENERGY_DET_MASK;
2657                 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2658         }
2659
2660         /* Disable RX/TX DMA and flush TX queues */
2661         dma_ctrl = bcmgenet_dma_disable(priv);
2662
2663         /* Reinitialize TDMA and RDMA and SW housekeeping */
2664         ret = bcmgenet_init_dma(priv);
2665         if (ret) {
2666                 netdev_err(dev, "failed to initialize DMA\n");
2667                 goto err_clk_disable;
2668         }
2669
2670         /* Always enable ring 16 - descriptor ring */
2671         bcmgenet_enable_dma(priv, dma_ctrl);
2672
2673         /* HFB init */
2674         bcmgenet_hfb_init(priv);
2675
2676         ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2677                           dev->name, priv);
2678         if (ret < 0) {
2679                 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2680                 goto err_fini_dma;
2681         }
2682
2683         ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2684                           dev->name, priv);
2685         if (ret < 0) {
2686                 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2687                 goto err_irq0;
2688         }
2689
2690         /* Re-configure the port multiplexer towards the PHY device */
2691         bcmgenet_mii_config(priv->dev, false);
2692
2693         phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
2694                            priv->phy_interface);
2695
2696         bcmgenet_netif_start(dev);
2697
2698         return 0;
2699
2700 err_irq0:
2701         free_irq(priv->irq0, dev);
2702 err_fini_dma:
2703         bcmgenet_fini_dma(priv);
2704 err_clk_disable:
2705         if (!IS_ERR(priv->clk))
2706                 clk_disable_unprepare(priv->clk);
2707         return ret;
2708 }
2709
2710 static void bcmgenet_netif_stop(struct net_device *dev)
2711 {
2712         struct bcmgenet_priv *priv = netdev_priv(dev);
2713
2714         netif_tx_stop_all_queues(dev);
2715         phy_stop(priv->phydev);
2716         bcmgenet_intr_disable(priv);
2717         bcmgenet_disable_rx_napi(priv);
2718         bcmgenet_disable_tx_napi(priv);
2719
2720         /* Wait for pending work items to complete. Since interrupts are
2721          * disabled no new work will be scheduled.
2722          */
2723         cancel_work_sync(&priv->bcmgenet_irq_work);
2724
2725         priv->old_link = -1;
2726         priv->old_speed = -1;
2727         priv->old_duplex = -1;
2728         priv->old_pause = -1;
2729 }
2730
2731 static int bcmgenet_close(struct net_device *dev)
2732 {
2733         struct bcmgenet_priv *priv = netdev_priv(dev);
2734         int ret;
2735
2736         netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2737
2738         bcmgenet_netif_stop(dev);
2739
2740         /* Really kill the PHY state machine and disconnect from it */
2741         phy_disconnect(priv->phydev);
2742
2743         /* Disable MAC receive */
2744         umac_enable_set(priv, CMD_RX_EN, false);
2745
2746         ret = bcmgenet_dma_teardown(priv);
2747         if (ret)
2748                 return ret;
2749
2750         /* Disable MAC transmit. TX DMA disabled have to done before this */
2751         umac_enable_set(priv, CMD_TX_EN, false);
2752
2753         /* tx reclaim */
2754         bcmgenet_tx_reclaim_all(dev);
2755         bcmgenet_fini_dma(priv);
2756
2757         free_irq(priv->irq0, priv);
2758         free_irq(priv->irq1, priv);
2759
2760         if (phy_is_internal(priv->phydev))
2761                 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2762
2763         if (!IS_ERR(priv->clk))
2764                 clk_disable_unprepare(priv->clk);
2765
2766         return ret;
2767 }
2768
2769 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
2770 {
2771         struct bcmgenet_priv *priv = ring->priv;
2772         u32 p_index, c_index, intsts, intmsk;
2773         struct netdev_queue *txq;
2774         unsigned int free_bds;
2775         unsigned long flags;
2776         bool txq_stopped;
2777
2778         if (!netif_msg_tx_err(priv))
2779                 return;
2780
2781         txq = netdev_get_tx_queue(priv->dev, ring->queue);
2782
2783         spin_lock_irqsave(&ring->lock, flags);
2784         if (ring->index == DESC_INDEX) {
2785                 intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2786                 intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
2787         } else {
2788                 intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2789                 intmsk = 1 << ring->index;
2790         }
2791         c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
2792         p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
2793         txq_stopped = netif_tx_queue_stopped(txq);
2794         free_bds = ring->free_bds;
2795         spin_unlock_irqrestore(&ring->lock, flags);
2796
2797         netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
2798                   "TX queue status: %s, interrupts: %s\n"
2799                   "(sw)free_bds: %d (sw)size: %d\n"
2800                   "(sw)p_index: %d (hw)p_index: %d\n"
2801                   "(sw)c_index: %d (hw)c_index: %d\n"
2802                   "(sw)clean_p: %d (sw)write_p: %d\n"
2803                   "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
2804                   ring->index, ring->queue,
2805                   txq_stopped ? "stopped" : "active",
2806                   intsts & intmsk ? "enabled" : "disabled",
2807                   free_bds, ring->size,
2808                   ring->prod_index, p_index & DMA_P_INDEX_MASK,
2809                   ring->c_index, c_index & DMA_C_INDEX_MASK,
2810                   ring->clean_ptr, ring->write_ptr,
2811                   ring->cb_ptr, ring->end_ptr);
2812 }
2813
2814 static void bcmgenet_timeout(struct net_device *dev)
2815 {
2816         struct bcmgenet_priv *priv = netdev_priv(dev);
2817         u32 int0_enable = 0;
2818         u32 int1_enable = 0;
2819         unsigned int q;
2820
2821         netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
2822
2823         bcmgenet_disable_tx_napi(priv);
2824
2825         for (q = 0; q < priv->hw_params->tx_queues; q++)
2826                 bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
2827         bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
2828
2829         bcmgenet_tx_reclaim_all(dev);
2830
2831         for (q = 0; q < priv->hw_params->tx_queues; q++)
2832                 int1_enable |= (1 << q);
2833
2834         int0_enable = UMAC_IRQ_TXDMA_DONE;
2835
2836         /* Re-enable TX interrupts if disabled */
2837         bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2838         bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
2839
2840         bcmgenet_enable_tx_napi(priv);
2841
2842         dev->trans_start = jiffies;
2843
2844         dev->stats.tx_errors++;
2845
2846         netif_tx_wake_all_queues(dev);
2847 }
2848
2849 #define MAX_MC_COUNT    16
2850
2851 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
2852                                          unsigned char *addr,
2853                                          int *i,
2854                                          int *mc)
2855 {
2856         u32 reg;
2857
2858         bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
2859                              UMAC_MDF_ADDR + (*i * 4));
2860         bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
2861                              addr[4] << 8 | addr[5],
2862                              UMAC_MDF_ADDR + ((*i + 1) * 4));
2863         reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
2864         reg |= (1 << (MAX_MC_COUNT - *mc));
2865         bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
2866         *i += 2;
2867         (*mc)++;
2868 }
2869
2870 static void bcmgenet_set_rx_mode(struct net_device *dev)
2871 {
2872         struct bcmgenet_priv *priv = netdev_priv(dev);
2873         struct netdev_hw_addr *ha;
2874         int i, mc;
2875         u32 reg;
2876
2877         netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
2878
2879         /* Promiscuous mode */
2880         reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2881         if (dev->flags & IFF_PROMISC) {
2882                 reg |= CMD_PROMISC;
2883                 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2884                 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
2885                 return;
2886         } else {
2887                 reg &= ~CMD_PROMISC;
2888                 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2889         }
2890
2891         /* UniMac doesn't support ALLMULTI */
2892         if (dev->flags & IFF_ALLMULTI) {
2893                 netdev_warn(dev, "ALLMULTI is not supported\n");
2894                 return;
2895         }
2896
2897         /* update MDF filter */
2898         i = 0;
2899         mc = 0;
2900         /* Broadcast */
2901         bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
2902         /* my own address.*/
2903         bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
2904         /* Unicast list*/
2905         if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
2906                 return;
2907
2908         if (!netdev_uc_empty(dev))
2909                 netdev_for_each_uc_addr(ha, dev)
2910                         bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2911         /* Multicast */
2912         if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
2913                 return;
2914
2915         netdev_for_each_mc_addr(ha, dev)
2916                 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2917 }
2918
2919 /* Set the hardware MAC address. */
2920 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
2921 {
2922         struct sockaddr *addr = p;
2923
2924         /* Setting the MAC address at the hardware level is not possible
2925          * without disabling the UniMAC RX/TX enable bits.
2926          */
2927         if (netif_running(dev))
2928                 return -EBUSY;
2929
2930         ether_addr_copy(dev->dev_addr, addr->sa_data);
2931
2932         return 0;
2933 }
2934
2935 static const struct net_device_ops bcmgenet_netdev_ops = {
2936         .ndo_open               = bcmgenet_open,
2937         .ndo_stop               = bcmgenet_close,
2938         .ndo_start_xmit         = bcmgenet_xmit,
2939         .ndo_tx_timeout         = bcmgenet_timeout,
2940         .ndo_set_rx_mode        = bcmgenet_set_rx_mode,
2941         .ndo_set_mac_address    = bcmgenet_set_mac_addr,
2942         .ndo_do_ioctl           = bcmgenet_ioctl,
2943         .ndo_set_features       = bcmgenet_set_features,
2944 };
2945
2946 /* Array of GENET hardware parameters/characteristics */
2947 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
2948         [GENET_V1] = {
2949                 .tx_queues = 0,
2950                 .tx_bds_per_q = 0,
2951                 .rx_queues = 0,
2952                 .rx_bds_per_q = 0,
2953                 .bp_in_en_shift = 16,
2954                 .bp_in_mask = 0xffff,
2955                 .hfb_filter_cnt = 16,
2956                 .qtag_mask = 0x1F,
2957                 .hfb_offset = 0x1000,
2958                 .rdma_offset = 0x2000,
2959                 .tdma_offset = 0x3000,
2960                 .words_per_bd = 2,
2961         },
2962         [GENET_V2] = {
2963                 .tx_queues = 4,
2964                 .tx_bds_per_q = 32,
2965                 .rx_queues = 0,
2966                 .rx_bds_per_q = 0,
2967                 .bp_in_en_shift = 16,
2968                 .bp_in_mask = 0xffff,
2969                 .hfb_filter_cnt = 16,
2970                 .qtag_mask = 0x1F,
2971                 .tbuf_offset = 0x0600,
2972                 .hfb_offset = 0x1000,
2973                 .hfb_reg_offset = 0x2000,
2974                 .rdma_offset = 0x3000,
2975                 .tdma_offset = 0x4000,
2976                 .words_per_bd = 2,
2977                 .flags = GENET_HAS_EXT,
2978         },
2979         [GENET_V3] = {
2980                 .tx_queues = 4,
2981                 .tx_bds_per_q = 32,
2982                 .rx_queues = 0,
2983                 .rx_bds_per_q = 0,
2984                 .bp_in_en_shift = 17,
2985                 .bp_in_mask = 0x1ffff,
2986                 .hfb_filter_cnt = 48,
2987                 .hfb_filter_size = 128,
2988                 .qtag_mask = 0x3F,
2989                 .tbuf_offset = 0x0600,
2990                 .hfb_offset = 0x8000,
2991                 .hfb_reg_offset = 0xfc00,
2992                 .rdma_offset = 0x10000,
2993                 .tdma_offset = 0x11000,
2994                 .words_per_bd = 2,
2995                 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
2996                          GENET_HAS_MOCA_LINK_DET,
2997         },
2998         [GENET_V4] = {
2999                 .tx_queues = 4,
3000                 .tx_bds_per_q = 32,
3001                 .rx_queues = 0,
3002                 .rx_bds_per_q = 0,
3003                 .bp_in_en_shift = 17,
3004                 .bp_in_mask = 0x1ffff,
3005                 .hfb_filter_cnt = 48,
3006                 .hfb_filter_size = 128,
3007                 .qtag_mask = 0x3F,
3008                 .tbuf_offset = 0x0600,
3009                 .hfb_offset = 0x8000,
3010                 .hfb_reg_offset = 0xfc00,
3011                 .rdma_offset = 0x2000,
3012                 .tdma_offset = 0x4000,
3013                 .words_per_bd = 3,
3014                 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3015                          GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3016         },
3017 };
3018
3019 /* Infer hardware parameters from the detected GENET version */
3020 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3021 {
3022         struct bcmgenet_hw_params *params;
3023         u32 reg;
3024         u8 major;
3025         u16 gphy_rev;
3026
3027         if (GENET_IS_V4(priv)) {
3028                 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3029                 genet_dma_ring_regs = genet_dma_ring_regs_v4;
3030                 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3031                 priv->version = GENET_V4;
3032         } else if (GENET_IS_V3(priv)) {
3033                 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3034                 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3035                 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3036                 priv->version = GENET_V3;
3037         } else if (GENET_IS_V2(priv)) {
3038                 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3039                 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3040                 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3041                 priv->version = GENET_V2;
3042         } else if (GENET_IS_V1(priv)) {
3043                 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3044                 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3045                 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3046                 priv->version = GENET_V1;
3047         }
3048
3049         /* enum genet_version starts at 1 */
3050         priv->hw_params = &bcmgenet_hw_params[priv->version];
3051         params = priv->hw_params;
3052
3053         /* Read GENET HW version */
3054         reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3055         major = (reg >> 24 & 0x0f);
3056         if (major == 5)
3057                 major = 4;
3058         else if (major == 0)
3059                 major = 1;
3060         if (major != priv->version) {
3061                 dev_err(&priv->pdev->dev,
3062                         "GENET version mismatch, got: %d, configured for: %d\n",
3063                         major, priv->version);
3064         }
3065
3066         /* Print the GENET core version */
3067         dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
3068                  major, (reg >> 16) & 0x0f, reg & 0xffff);
3069
3070         /* Store the integrated PHY revision for the MDIO probing function
3071          * to pass this information to the PHY driver. The PHY driver expects
3072          * to find the PHY major revision in bits 15:8 while the GENET register
3073          * stores that information in bits 7:0, account for that.
3074          *
3075          * On newer chips, starting with PHY revision G0, a new scheme is
3076          * deployed similar to the Starfighter 2 switch with GPHY major
3077          * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3078          * is reserved as well as special value 0x01ff, we have a small
3079          * heuristic to check for the new GPHY revision and re-arrange things
3080          * so the GPHY driver is happy.
3081          */
3082         gphy_rev = reg & 0xffff;
3083
3084         /* This is the good old scheme, just GPHY major, no minor nor patch */
3085         if ((gphy_rev & 0xf0) != 0)
3086                 priv->gphy_rev = gphy_rev << 8;
3087
3088         /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3089         else if ((gphy_rev & 0xff00) != 0)
3090                 priv->gphy_rev = gphy_rev;
3091
3092         /* This is reserved so should require special treatment */
3093         else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3094                 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3095                 return;
3096         }
3097
3098 #ifdef CONFIG_PHYS_ADDR_T_64BIT
3099         if (!(params->flags & GENET_HAS_40BITS))
3100                 pr_warn("GENET does not support 40-bits PA\n");
3101 #endif
3102
3103         pr_debug("Configuration for version: %d\n"
3104                 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3105                 "BP << en: %2d, BP msk: 0x%05x\n"
3106                 "HFB count: %2d, QTAQ msk: 0x%05x\n"
3107                 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3108                 "RDMA: 0x%05x, TDMA: 0x%05x\n"
3109                 "Words/BD: %d\n",
3110                 priv->version,
3111                 params->tx_queues, params->tx_bds_per_q,
3112                 params->rx_queues, params->rx_bds_per_q,
3113                 params->bp_in_en_shift, params->bp_in_mask,
3114                 params->hfb_filter_cnt, params->qtag_mask,
3115                 params->tbuf_offset, params->hfb_offset,
3116                 params->hfb_reg_offset,
3117                 params->rdma_offset, params->tdma_offset,
3118                 params->words_per_bd);
3119 }
3120
3121 static const struct of_device_id bcmgenet_match[] = {
3122         { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
3123         { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
3124         { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
3125         { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
3126         { },
3127 };
3128
3129 static int bcmgenet_probe(struct platform_device *pdev)
3130 {
3131         struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
3132         struct device_node *dn = pdev->dev.of_node;
3133         const struct of_device_id *of_id = NULL;
3134         struct bcmgenet_priv *priv;
3135         struct net_device *dev;
3136         const void *macaddr;
3137         struct resource *r;
3138         int err = -EIO;
3139
3140         /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3141         dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
3142                                  GENET_MAX_MQ_CNT + 1);
3143         if (!dev) {
3144                 dev_err(&pdev->dev, "can't allocate net device\n");
3145                 return -ENOMEM;
3146         }
3147
3148         if (dn) {
3149                 of_id = of_match_node(bcmgenet_match, dn);
3150                 if (!of_id)
3151                         return -EINVAL;
3152         }
3153
3154         priv = netdev_priv(dev);
3155         priv->irq0 = platform_get_irq(pdev, 0);
3156         priv->irq1 = platform_get_irq(pdev, 1);
3157         priv->wol_irq = platform_get_irq(pdev, 2);
3158         if (!priv->irq0 || !priv->irq1) {
3159                 dev_err(&pdev->dev, "can't find IRQs\n");
3160                 err = -EINVAL;
3161                 goto err;
3162         }
3163
3164         if (dn) {
3165                 macaddr = of_get_mac_address(dn);
3166                 if (!macaddr) {
3167                         dev_err(&pdev->dev, "can't find MAC address\n");
3168                         err = -EINVAL;
3169                         goto err;
3170                 }
3171         } else {
3172                 macaddr = pd->mac_address;
3173         }
3174
3175         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3176         priv->base = devm_ioremap_resource(&pdev->dev, r);
3177         if (IS_ERR(priv->base)) {
3178                 err = PTR_ERR(priv->base);
3179                 goto err;
3180         }
3181
3182         SET_NETDEV_DEV(dev, &pdev->dev);
3183         dev_set_drvdata(&pdev->dev, dev);
3184         ether_addr_copy(dev->dev_addr, macaddr);
3185         dev->watchdog_timeo = 2 * HZ;
3186         dev->ethtool_ops = &bcmgenet_ethtool_ops;
3187         dev->netdev_ops = &bcmgenet_netdev_ops;
3188
3189         priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
3190
3191         /* Set hardware features */
3192         dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
3193                 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
3194
3195         /* Request the WOL interrupt and advertise suspend if available */
3196         priv->wol_irq_disabled = true;
3197         err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
3198                                dev->name, priv);
3199         if (!err)
3200                 device_set_wakeup_capable(&pdev->dev, 1);
3201
3202         /* Set the needed headroom to account for any possible
3203          * features enabling/disabling at runtime
3204          */
3205         dev->needed_headroom += 64;
3206
3207         netdev_boot_setup_check(dev);
3208
3209         priv->dev = dev;
3210         priv->pdev = pdev;
3211         if (of_id)
3212                 priv->version = (enum bcmgenet_version)of_id->data;
3213         else
3214                 priv->version = pd->genet_version;
3215
3216         priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
3217         if (IS_ERR(priv->clk))
3218                 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
3219
3220         if (!IS_ERR(priv->clk))
3221                 clk_prepare_enable(priv->clk);
3222
3223         bcmgenet_set_hw_params(priv);
3224
3225         /* Mii wait queue */
3226         init_waitqueue_head(&priv->wq);
3227         /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
3228         priv->rx_buf_len = RX_BUF_LENGTH;
3229         INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
3230
3231         priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
3232         if (IS_ERR(priv->clk_wol))
3233                 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
3234
3235         priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
3236         if (IS_ERR(priv->clk_eee)) {
3237                 dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
3238                 priv->clk_eee = NULL;
3239         }
3240
3241         err = reset_umac(priv);
3242         if (err)
3243                 goto err_clk_disable;
3244
3245         err = bcmgenet_mii_init(dev);
3246         if (err)
3247                 goto err_clk_disable;
3248
3249         /* setup number of real queues  + 1 (GENET_V1 has 0 hardware queues
3250          * just the ring 16 descriptor based TX
3251          */
3252         netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
3253         netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
3254
3255         /* libphy will determine the link state */
3256         netif_carrier_off(dev);
3257
3258         /* Turn off the main clock, WOL clock is handled separately */
3259         if (!IS_ERR(priv->clk))
3260                 clk_disable_unprepare(priv->clk);
3261
3262         err = register_netdev(dev);
3263         if (err)
3264                 goto err;
3265
3266         return err;
3267
3268 err_clk_disable:
3269         if (!IS_ERR(priv->clk))
3270                 clk_disable_unprepare(priv->clk);
3271 err:
3272         free_netdev(dev);
3273         return err;
3274 }
3275
3276 static int bcmgenet_remove(struct platform_device *pdev)
3277 {
3278         struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
3279
3280         dev_set_drvdata(&pdev->dev, NULL);
3281         unregister_netdev(priv->dev);
3282         bcmgenet_mii_exit(priv->dev);
3283         free_netdev(priv->dev);
3284
3285         return 0;
3286 }
3287
3288 #ifdef CONFIG_PM_SLEEP
3289 static int bcmgenet_suspend(struct device *d)
3290 {
3291         struct net_device *dev = dev_get_drvdata(d);
3292         struct bcmgenet_priv *priv = netdev_priv(dev);
3293         int ret;
3294
3295         if (!netif_running(dev))
3296                 return 0;
3297
3298         bcmgenet_netif_stop(dev);
3299
3300         phy_suspend(priv->phydev);
3301
3302         netif_device_detach(dev);
3303
3304         /* Disable MAC receive */
3305         umac_enable_set(priv, CMD_RX_EN, false);
3306
3307         ret = bcmgenet_dma_teardown(priv);
3308         if (ret)
3309                 return ret;
3310
3311         /* Disable MAC transmit. TX DMA disabled have to done before this */
3312         umac_enable_set(priv, CMD_TX_EN, false);
3313
3314         /* tx reclaim */
3315         bcmgenet_tx_reclaim_all(dev);
3316         bcmgenet_fini_dma(priv);
3317
3318         /* Prepare the device for Wake-on-LAN and switch to the slow clock */
3319         if (device_may_wakeup(d) && priv->wolopts) {
3320                 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
3321                 clk_prepare_enable(priv->clk_wol);
3322         } else if (phy_is_internal(priv->phydev)) {
3323                 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3324         }
3325
3326         /* Turn off the clocks */
3327         clk_disable_unprepare(priv->clk);
3328
3329         return ret;
3330 }
3331
3332 static int bcmgenet_resume(struct device *d)
3333 {
3334         struct net_device *dev = dev_get_drvdata(d);
3335         struct bcmgenet_priv *priv = netdev_priv(dev);
3336         unsigned long dma_ctrl;
3337         int ret;
3338         u32 reg;
3339
3340         if (!netif_running(dev))
3341                 return 0;
3342
3343         /* Turn on the clock */
3344         ret = clk_prepare_enable(priv->clk);
3345         if (ret)
3346                 return ret;
3347
3348         /* If this is an internal GPHY, power it back on now, before UniMAC is
3349          * brought out of reset as absolutely no UniMAC activity is allowed
3350          */
3351         if (phy_is_internal(priv->phydev))
3352                 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3353
3354         bcmgenet_umac_reset(priv);
3355
3356         ret = init_umac(priv);
3357         if (ret)
3358                 goto out_clk_disable;
3359
3360         /* From WOL-enabled suspend, switch to regular clock */
3361         if (priv->wolopts)
3362                 clk_disable_unprepare(priv->clk_wol);
3363
3364         phy_init_hw(priv->phydev);
3365         /* Speed settings must be restored */
3366         bcmgenet_mii_config(priv->dev, false);
3367
3368         /* disable ethernet MAC while updating its registers */
3369         umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
3370
3371         bcmgenet_set_hw_addr(priv, dev->dev_addr);
3372
3373         if (phy_is_internal(priv->phydev)) {
3374                 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
3375                 reg |= EXT_ENERGY_DET_MASK;
3376                 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
3377         }
3378
3379         if (priv->wolopts)
3380                 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
3381
3382         /* Disable RX/TX DMA and flush TX queues */
3383         dma_ctrl = bcmgenet_dma_disable(priv);
3384
3385         /* Reinitialize TDMA and RDMA and SW housekeeping */
3386         ret = bcmgenet_init_dma(priv);
3387         if (ret) {
3388                 netdev_err(dev, "failed to initialize DMA\n");
3389                 goto out_clk_disable;
3390         }
3391
3392         /* Always enable ring 16 - descriptor ring */
3393         bcmgenet_enable_dma(priv, dma_ctrl);
3394
3395         netif_device_attach(dev);
3396
3397         phy_resume(priv->phydev);
3398
3399         if (priv->eee.eee_enabled)
3400                 bcmgenet_eee_enable_set(dev, true);
3401
3402         bcmgenet_netif_start(dev);
3403
3404         return 0;
3405
3406 out_clk_disable:
3407         clk_disable_unprepare(priv->clk);
3408         return ret;
3409 }
3410 #endif /* CONFIG_PM_SLEEP */
3411
3412 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
3413
3414 static struct platform_driver bcmgenet_driver = {
3415         .probe  = bcmgenet_probe,
3416         .remove = bcmgenet_remove,
3417         .driver = {
3418                 .name   = "bcmgenet",
3419                 .of_match_table = bcmgenet_match,
3420                 .pm     = &bcmgenet_pm_ops,
3421         },
3422 };
3423 module_platform_driver(bcmgenet_driver);
3424
3425 MODULE_AUTHOR("Broadcom Corporation");
3426 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
3427 MODULE_ALIAS("platform:bcmgenet");
3428 MODULE_LICENSE("GPL");