Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   You should have received a copy of the GNU General Public License along with
17   this program; if not, write to the Free Software Foundation, Inc.,
18   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20   The full GNU General Public License is included in this distribution in
21   the file called "COPYING".
22
23   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
24
25   Documentation available at:
26         http://www.stlinux.com
27   Support available at:
28         https://bugzilla.stlinux.com/
29 *******************************************************************************/
30
31 #include <linux/clk.h>
32 #include <linux/kernel.h>
33 #include <linux/interrupt.h>
34 #include <linux/ip.h>
35 #include <linux/tcp.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/if_ether.h>
39 #include <linux/crc32.h>
40 #include <linux/mii.h>
41 #include <linux/if.h>
42 #include <linux/if_vlan.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/slab.h>
45 #include <linux/prefetch.h>
46 #ifdef CONFIG_STMMAC_DEBUG_FS
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #endif /* CONFIG_STMMAC_DEBUG_FS */
50 #include <linux/net_tstamp.h>
51 #include "stmmac_ptp.h"
52 #include "stmmac.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define JUMBO_LEN       9000
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define DMA_TX_SIZE 256
72 static int dma_txsize = DMA_TX_SIZE;
73 module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
74 MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
75
76 #define DMA_RX_SIZE 256
77 static int dma_rxsize = DMA_RX_SIZE;
78 module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
79 MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
80
81 static int flow_ctrl = FLOW_OFF;
82 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
83 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
84
85 static int pause = PAUSE_TIME;
86 module_param(pause, int, S_IRUGO | S_IWUSR);
87 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
88
89 #define TC_DEFAULT 64
90 static int tc = TC_DEFAULT;
91 module_param(tc, int, S_IRUGO | S_IWUSR);
92 MODULE_PARM_DESC(tc, "DMA threshold control value");
93
94 #define DMA_BUFFER_SIZE BUF_SIZE_2KiB
95 static int buf_sz = DMA_BUFFER_SIZE;
96 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
97 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
98
99 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
100                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
101                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
102
103 #define STMMAC_DEFAULT_LPI_TIMER        1000
104 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
105 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
106 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
107 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
108
109 /* By default the driver will use the ring mode to manage tx and rx descriptors
110  * but passing this value so user can force to use the chain instead of the ring
111  */
112 static unsigned int chain_mode;
113 module_param(chain_mode, int, S_IRUGO);
114 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
115
116 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
117
118 #ifdef CONFIG_STMMAC_DEBUG_FS
119 static int stmmac_init_fs(struct net_device *dev);
120 static void stmmac_exit_fs(void);
121 #endif
122
123 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
124
125 /**
126  * stmmac_verify_args - verify the driver parameters.
127  * Description: it verifies if some wrong parameter is passed to the driver.
128  * Note that wrong parameters are replaced with the default values.
129  */
130 static void stmmac_verify_args(void)
131 {
132         if (unlikely(watchdog < 0))
133                 watchdog = TX_TIMEO;
134         if (unlikely(dma_rxsize < 0))
135                 dma_rxsize = DMA_RX_SIZE;
136         if (unlikely(dma_txsize < 0))
137                 dma_txsize = DMA_TX_SIZE;
138         if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
139                 buf_sz = DMA_BUFFER_SIZE;
140         if (unlikely(flow_ctrl > 1))
141                 flow_ctrl = FLOW_AUTO;
142         else if (likely(flow_ctrl < 0))
143                 flow_ctrl = FLOW_OFF;
144         if (unlikely((pause < 0) || (pause > 0xffff)))
145                 pause = PAUSE_TIME;
146         if (eee_timer < 0)
147                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
148 }
149
150 /**
151  * stmmac_clk_csr_set - dynamically set the MDC clock
152  * @priv: driver private structure
153  * Description: this is to dynamically set the MDC clock according to the csr
154  * clock input.
155  * Note:
156  *      If a specific clk_csr value is passed from the platform
157  *      this means that the CSR Clock Range selection cannot be
158  *      changed at run-time and it is fixed (as reported in the driver
159  *      documentation). Viceversa the driver will try to set the MDC
160  *      clock dynamically according to the actual clock input.
161  */
162 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
163 {
164         u32 clk_rate;
165
166         clk_rate = clk_get_rate(priv->stmmac_clk);
167
168         /* Platform provided default clk_csr would be assumed valid
169          * for all other cases except for the below mentioned ones.
170          * For values higher than the IEEE 802.3 specified frequency
171          * we can not estimate the proper divider as it is not known
172          * the frequency of clk_csr_i. So we do not change the default
173          * divider.
174          */
175         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
176                 if (clk_rate < CSR_F_35M)
177                         priv->clk_csr = STMMAC_CSR_20_35M;
178                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
179                         priv->clk_csr = STMMAC_CSR_35_60M;
180                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
181                         priv->clk_csr = STMMAC_CSR_60_100M;
182                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
183                         priv->clk_csr = STMMAC_CSR_100_150M;
184                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
185                         priv->clk_csr = STMMAC_CSR_150_250M;
186                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
187                         priv->clk_csr = STMMAC_CSR_250_300M;
188         }
189 }
190
191 static void print_pkt(unsigned char *buf, int len)
192 {
193         int j;
194         pr_debug("len = %d byte, buf addr: 0x%p", len, buf);
195         for (j = 0; j < len; j++) {
196                 if ((j % 16) == 0)
197                         pr_debug("\n %03x:", j);
198                 pr_debug(" %02x", buf[j]);
199         }
200         pr_debug("\n");
201 }
202
203 /* minimum number of free TX descriptors required to wake up TX process */
204 #define STMMAC_TX_THRESH(x)     (x->dma_tx_size/4)
205
206 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
207 {
208         return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
209 }
210
211 /**
212  * stmmac_hw_fix_mac_speed: callback for speed selection
213  * @priv: driver private structure
214  * Description: on some platforms (e.g. ST), some HW system configuraton
215  * registers have to be set according to the link speed negotiated.
216  */
217 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
218 {
219         struct phy_device *phydev = priv->phydev;
220
221         if (likely(priv->plat->fix_mac_speed))
222                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
223 }
224
225 /**
226  * stmmac_enable_eee_mode: Check and enter in LPI mode
227  * @priv: driver private structure
228  * Description: this function is to verify and enter in LPI mode for EEE.
229  */
230 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
231 {
232         /* Check and enter in LPI mode */
233         if ((priv->dirty_tx == priv->cur_tx) &&
234             (priv->tx_path_in_lpi_mode == false))
235                 priv->hw->mac->set_eee_mode(priv->ioaddr);
236 }
237
238 /**
239  * stmmac_disable_eee_mode: disable/exit from EEE
240  * @priv: driver private structure
241  * Description: this function is to exit and disable EEE in case of
242  * LPI state is true. This is called by the xmit.
243  */
244 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
245 {
246         priv->hw->mac->reset_eee_mode(priv->ioaddr);
247         del_timer_sync(&priv->eee_ctrl_timer);
248         priv->tx_path_in_lpi_mode = false;
249 }
250
251 /**
252  * stmmac_eee_ctrl_timer: EEE TX SW timer.
253  * @arg : data hook
254  * Description:
255  *  if there is no data transfer and if we are not in LPI state,
256  *  then MAC Transmitter can be moved to LPI state.
257  */
258 static void stmmac_eee_ctrl_timer(unsigned long arg)
259 {
260         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
261
262         stmmac_enable_eee_mode(priv);
263         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
264 }
265
266 /**
267  * stmmac_eee_init: init EEE
268  * @priv: driver private structure
269  * Description:
270  *  If the EEE support has been enabled while configuring the driver,
271  *  if the GMAC actually supports the EEE (from the HW cap reg) and the
272  *  phy can also manage EEE, so enable the LPI state and start the timer
273  *  to verify if the tx path can enter in LPI state.
274  */
275 bool stmmac_eee_init(struct stmmac_priv *priv)
276 {
277         bool ret = false;
278
279         /* Using PCS we cannot dial with the phy registers at this stage
280          * so we do not support extra feature like EEE.
281          */
282         if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
283             (priv->pcs == STMMAC_PCS_RTBI))
284                 goto out;
285
286         /* MAC core supports the EEE feature. */
287         if (priv->dma_cap.eee) {
288                 /* Check if the PHY supports EEE */
289                 if (phy_init_eee(priv->phydev, 1))
290                         goto out;
291
292                 if (!priv->eee_active) {
293                         priv->eee_active = 1;
294                         init_timer(&priv->eee_ctrl_timer);
295                         priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
296                         priv->eee_ctrl_timer.data = (unsigned long)priv;
297                         priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
298                         add_timer(&priv->eee_ctrl_timer);
299
300                         priv->hw->mac->set_eee_timer(priv->ioaddr,
301                                                      STMMAC_DEFAULT_LIT_LS,
302                                                      priv->tx_lpi_timer);
303                 } else
304                         /* Set HW EEE according to the speed */
305                         priv->hw->mac->set_eee_pls(priv->ioaddr,
306                                                    priv->phydev->link);
307
308                 pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
309
310                 ret = true;
311         }
312 out:
313         return ret;
314 }
315
316 /* stmmac_get_tx_hwtstamp: get HW TX timestamps
317  * @priv: driver private structure
318  * @entry : descriptor index to be used.
319  * @skb : the socket buffer
320  * Description :
321  * This function will read timestamp from the descriptor & pass it to stack.
322  * and also perform some sanity checks.
323  */
324 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
325                                    unsigned int entry, struct sk_buff *skb)
326 {
327         struct skb_shared_hwtstamps shhwtstamp;
328         u64 ns;
329         void *desc = NULL;
330
331         if (!priv->hwts_tx_en)
332                 return;
333
334         /* exit if skb doesn't support hw tstamp */
335         if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
336                 return;
337
338         if (priv->adv_ts)
339                 desc = (priv->dma_etx + entry);
340         else
341                 desc = (priv->dma_tx + entry);
342
343         /* check tx tstamp status */
344         if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
345                 return;
346
347         /* get the valid tstamp */
348         ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
349
350         memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
351         shhwtstamp.hwtstamp = ns_to_ktime(ns);
352         /* pass tstamp to stack */
353         skb_tstamp_tx(skb, &shhwtstamp);
354
355         return;
356 }
357
358 /* stmmac_get_rx_hwtstamp: get HW RX timestamps
359  * @priv: driver private structure
360  * @entry : descriptor index to be used.
361  * @skb : the socket buffer
362  * Description :
363  * This function will read received packet's timestamp from the descriptor
364  * and pass it to stack. It also perform some sanity checks.
365  */
366 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
367                                    unsigned int entry, struct sk_buff *skb)
368 {
369         struct skb_shared_hwtstamps *shhwtstamp = NULL;
370         u64 ns;
371         void *desc = NULL;
372
373         if (!priv->hwts_rx_en)
374                 return;
375
376         if (priv->adv_ts)
377                 desc = (priv->dma_erx + entry);
378         else
379                 desc = (priv->dma_rx + entry);
380
381         /* exit if rx tstamp is not valid */
382         if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
383                 return;
384
385         /* get valid tstamp */
386         ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
387         shhwtstamp = skb_hwtstamps(skb);
388         memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
389         shhwtstamp->hwtstamp = ns_to_ktime(ns);
390 }
391
392 /**
393  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
394  *  @dev: device pointer.
395  *  @ifr: An IOCTL specefic structure, that can contain a pointer to
396  *  a proprietary structure used to pass information to the driver.
397  *  Description:
398  *  This function configures the MAC to enable/disable both outgoing(TX)
399  *  and incoming(RX) packets time stamping based on user input.
400  *  Return Value:
401  *  0 on success and an appropriate -ve integer on failure.
402  */
403 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
404 {
405         struct stmmac_priv *priv = netdev_priv(dev);
406         struct hwtstamp_config config;
407         struct timespec now;
408         u64 temp = 0;
409         u32 ptp_v2 = 0;
410         u32 tstamp_all = 0;
411         u32 ptp_over_ipv4_udp = 0;
412         u32 ptp_over_ipv6_udp = 0;
413         u32 ptp_over_ethernet = 0;
414         u32 snap_type_sel = 0;
415         u32 ts_master_en = 0;
416         u32 ts_event_en = 0;
417         u32 value = 0;
418
419         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
420                 netdev_alert(priv->dev, "No support for HW time stamping\n");
421                 priv->hwts_tx_en = 0;
422                 priv->hwts_rx_en = 0;
423
424                 return -EOPNOTSUPP;
425         }
426
427         if (copy_from_user(&config, ifr->ifr_data,
428                            sizeof(struct hwtstamp_config)))
429                 return -EFAULT;
430
431         pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
432                  __func__, config.flags, config.tx_type, config.rx_filter);
433
434         /* reserved for future extensions */
435         if (config.flags)
436                 return -EINVAL;
437
438         if (config.tx_type != HWTSTAMP_TX_OFF &&
439             config.tx_type != HWTSTAMP_TX_ON)
440                 return -ERANGE;
441
442         if (priv->adv_ts) {
443                 switch (config.rx_filter) {
444                 case HWTSTAMP_FILTER_NONE:
445                         /* time stamp no incoming packet at all */
446                         config.rx_filter = HWTSTAMP_FILTER_NONE;
447                         break;
448
449                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
450                         /* PTP v1, UDP, any kind of event packet */
451                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
452                         /* take time stamp for all event messages */
453                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
454
455                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
456                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
457                         break;
458
459                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
460                         /* PTP v1, UDP, Sync packet */
461                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
462                         /* take time stamp for SYNC messages only */
463                         ts_event_en = PTP_TCR_TSEVNTENA;
464
465                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
466                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
467                         break;
468
469                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
470                         /* PTP v1, UDP, Delay_req packet */
471                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
472                         /* take time stamp for Delay_Req messages only */
473                         ts_master_en = PTP_TCR_TSMSTRENA;
474                         ts_event_en = PTP_TCR_TSEVNTENA;
475
476                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
477                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
478                         break;
479
480                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
481                         /* PTP v2, UDP, any kind of event packet */
482                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
483                         ptp_v2 = PTP_TCR_TSVER2ENA;
484                         /* take time stamp for all event messages */
485                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
486
487                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
488                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
489                         break;
490
491                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
492                         /* PTP v2, UDP, Sync packet */
493                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
494                         ptp_v2 = PTP_TCR_TSVER2ENA;
495                         /* take time stamp for SYNC messages only */
496                         ts_event_en = PTP_TCR_TSEVNTENA;
497
498                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
499                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
500                         break;
501
502                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
503                         /* PTP v2, UDP, Delay_req packet */
504                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
505                         ptp_v2 = PTP_TCR_TSVER2ENA;
506                         /* take time stamp for Delay_Req messages only */
507                         ts_master_en = PTP_TCR_TSMSTRENA;
508                         ts_event_en = PTP_TCR_TSEVNTENA;
509
510                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
511                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
512                         break;
513
514                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
515                         /* PTP v2/802.AS1 any layer, any kind of event packet */
516                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
517                         ptp_v2 = PTP_TCR_TSVER2ENA;
518                         /* take time stamp for all event messages */
519                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
520
521                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
522                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
523                         ptp_over_ethernet = PTP_TCR_TSIPENA;
524                         break;
525
526                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
527                         /* PTP v2/802.AS1, any layer, Sync packet */
528                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
529                         ptp_v2 = PTP_TCR_TSVER2ENA;
530                         /* take time stamp for SYNC messages only */
531                         ts_event_en = PTP_TCR_TSEVNTENA;
532
533                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
534                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
535                         ptp_over_ethernet = PTP_TCR_TSIPENA;
536                         break;
537
538                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
539                         /* PTP v2/802.AS1, any layer, Delay_req packet */
540                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
541                         ptp_v2 = PTP_TCR_TSVER2ENA;
542                         /* take time stamp for Delay_Req messages only */
543                         ts_master_en = PTP_TCR_TSMSTRENA;
544                         ts_event_en = PTP_TCR_TSEVNTENA;
545
546                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
547                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
548                         ptp_over_ethernet = PTP_TCR_TSIPENA;
549                         break;
550
551                 case HWTSTAMP_FILTER_ALL:
552                         /* time stamp any incoming packet */
553                         config.rx_filter = HWTSTAMP_FILTER_ALL;
554                         tstamp_all = PTP_TCR_TSENALL;
555                         break;
556
557                 default:
558                         return -ERANGE;
559                 }
560         } else {
561                 switch (config.rx_filter) {
562                 case HWTSTAMP_FILTER_NONE:
563                         config.rx_filter = HWTSTAMP_FILTER_NONE;
564                         break;
565                 default:
566                         /* PTP v1, UDP, any kind of event packet */
567                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
568                         break;
569                 }
570         }
571         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
572         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
573
574         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
575                 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
576         else {
577                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
578                          tstamp_all | ptp_v2 | ptp_over_ethernet |
579                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
580                          ts_master_en | snap_type_sel);
581
582                 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
583
584                 /* program Sub Second Increment reg */
585                 priv->hw->ptp->config_sub_second_increment(priv->ioaddr);
586
587                 /* calculate default added value:
588                  * formula is :
589                  * addend = (2^32)/freq_div_ratio;
590                  * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
591                  * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
592                  * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
593                  *       achive 20ns accuracy.
594                  *
595                  * 2^x * y == (y << x), hence
596                  * 2^32 * 50000000 ==> (50000000 << 32)
597                  */
598                 temp = (u64) (50000000ULL << 32);
599                 priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
600                 priv->hw->ptp->config_addend(priv->ioaddr,
601                                              priv->default_addend);
602
603                 /* initialize system time */
604                 getnstimeofday(&now);
605                 priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec,
606                                             now.tv_nsec);
607         }
608
609         return copy_to_user(ifr->ifr_data, &config,
610                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
611 }
612
613 /**
614  * stmmac_init_ptp: init PTP
615  * @priv: driver private structure
616  * Description: this is to verify if the HW supports the PTPv1 or v2.
617  * This is done by looking at the HW cap. register.
618  * Also it registers the ptp driver.
619  */
620 static int stmmac_init_ptp(struct stmmac_priv *priv)
621 {
622         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
623                 return -EOPNOTSUPP;
624
625         priv->adv_ts = 0;
626         if (priv->dma_cap.atime_stamp && priv->extend_desc)
627                 priv->adv_ts = 1;
628
629         if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
630                 pr_debug("IEEE 1588-2002 Time Stamp supported\n");
631
632         if (netif_msg_hw(priv) && priv->adv_ts)
633                 pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
634
635         priv->hw->ptp = &stmmac_ptp;
636         priv->hwts_tx_en = 0;
637         priv->hwts_rx_en = 0;
638
639         return stmmac_ptp_register(priv);
640 }
641
642 static void stmmac_release_ptp(struct stmmac_priv *priv)
643 {
644         stmmac_ptp_unregister(priv);
645 }
646
647 /**
648  * stmmac_adjust_link
649  * @dev: net device structure
650  * Description: it adjusts the link parameters.
651  */
652 static void stmmac_adjust_link(struct net_device *dev)
653 {
654         struct stmmac_priv *priv = netdev_priv(dev);
655         struct phy_device *phydev = priv->phydev;
656         unsigned long flags;
657         int new_state = 0;
658         unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
659
660         if (phydev == NULL)
661                 return;
662
663         spin_lock_irqsave(&priv->lock, flags);
664
665         if (phydev->link) {
666                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
667
668                 /* Now we make sure that we can be in full duplex mode.
669                  * If not, we operate in half-duplex mode. */
670                 if (phydev->duplex != priv->oldduplex) {
671                         new_state = 1;
672                         if (!(phydev->duplex))
673                                 ctrl &= ~priv->hw->link.duplex;
674                         else
675                                 ctrl |= priv->hw->link.duplex;
676                         priv->oldduplex = phydev->duplex;
677                 }
678                 /* Flow Control operation */
679                 if (phydev->pause)
680                         priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex,
681                                                  fc, pause_time);
682
683                 if (phydev->speed != priv->speed) {
684                         new_state = 1;
685                         switch (phydev->speed) {
686                         case 1000:
687                                 if (likely(priv->plat->has_gmac))
688                                         ctrl &= ~priv->hw->link.port;
689                                 stmmac_hw_fix_mac_speed(priv);
690                                 break;
691                         case 100:
692                         case 10:
693                                 if (priv->plat->has_gmac) {
694                                         ctrl |= priv->hw->link.port;
695                                         if (phydev->speed == SPEED_100) {
696                                                 ctrl |= priv->hw->link.speed;
697                                         } else {
698                                                 ctrl &= ~(priv->hw->link.speed);
699                                         }
700                                 } else {
701                                         ctrl &= ~priv->hw->link.port;
702                                 }
703                                 stmmac_hw_fix_mac_speed(priv);
704                                 break;
705                         default:
706                                 if (netif_msg_link(priv))
707                                         pr_warn("%s: Speed (%d) not 10/100\n",
708                                                 dev->name, phydev->speed);
709                                 break;
710                         }
711
712                         priv->speed = phydev->speed;
713                 }
714
715                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
716
717                 if (!priv->oldlink) {
718                         new_state = 1;
719                         priv->oldlink = 1;
720                 }
721         } else if (priv->oldlink) {
722                 new_state = 1;
723                 priv->oldlink = 0;
724                 priv->speed = 0;
725                 priv->oldduplex = -1;
726         }
727
728         if (new_state && netif_msg_link(priv))
729                 phy_print_status(phydev);
730
731         /* At this stage, it could be needed to setup the EEE or adjust some
732          * MAC related HW registers.
733          */
734         priv->eee_enabled = stmmac_eee_init(priv);
735
736         spin_unlock_irqrestore(&priv->lock, flags);
737 }
738
739 /**
740  * stmmac_check_pcs_mode: verify if RGMII/SGMII is supported
741  * @priv: driver private structure
742  * Description: this is to verify if the HW supports the PCS.
743  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
744  * configured for the TBI, RTBI, or SGMII PHY interface.
745  */
746 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
747 {
748         int interface = priv->plat->interface;
749
750         if (priv->dma_cap.pcs) {
751                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
752                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
753                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
754                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
755                         pr_debug("STMMAC: PCS RGMII support enable\n");
756                         priv->pcs = STMMAC_PCS_RGMII;
757                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
758                         pr_debug("STMMAC: PCS SGMII support enable\n");
759                         priv->pcs = STMMAC_PCS_SGMII;
760                 }
761         }
762 }
763
764 /**
765  * stmmac_init_phy - PHY initialization
766  * @dev: net device structure
767  * Description: it initializes the driver's PHY state, and attaches the PHY
768  * to the mac driver.
769  *  Return value:
770  *  0 on success
771  */
772 static int stmmac_init_phy(struct net_device *dev)
773 {
774         struct stmmac_priv *priv = netdev_priv(dev);
775         struct phy_device *phydev;
776         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
777         char bus_id[MII_BUS_ID_SIZE];
778         int interface = priv->plat->interface;
779         priv->oldlink = 0;
780         priv->speed = 0;
781         priv->oldduplex = -1;
782
783         if (priv->plat->phy_bus_name)
784                 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
785                          priv->plat->phy_bus_name, priv->plat->bus_id);
786         else
787                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
788                          priv->plat->bus_id);
789
790         snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
791                  priv->plat->phy_addr);
792         pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id_fmt);
793
794         phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
795
796         if (IS_ERR(phydev)) {
797                 pr_err("%s: Could not attach to PHY\n", dev->name);
798                 return PTR_ERR(phydev);
799         }
800
801         /* Stop Advertising 1000BASE Capability if interface is not GMII */
802         if ((interface == PHY_INTERFACE_MODE_MII) ||
803             (interface == PHY_INTERFACE_MODE_RMII))
804                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
805                                          SUPPORTED_1000baseT_Full);
806
807         /*
808          * Broken HW is sometimes missing the pull-up resistor on the
809          * MDIO line, which results in reads to non-existent devices returning
810          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
811          * device as well.
812          * Note: phydev->phy_id is the result of reading the UID PHY registers.
813          */
814         if (phydev->phy_id == 0) {
815                 phy_disconnect(phydev);
816                 return -ENODEV;
817         }
818         pr_debug("stmmac_init_phy:  %s: attached to PHY (UID 0x%x)"
819                  " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
820
821         priv->phydev = phydev;
822
823         return 0;
824 }
825
826 /**
827  * stmmac_display_ring: display ring
828  * @head: pointer to the head of the ring passed.
829  * @size: size of the ring.
830  * @extend_desc: to verify if extended descriptors are used.
831  * Description: display the control/status and buffer descriptors.
832  */
833 static void stmmac_display_ring(void *head, int size, int extend_desc)
834 {
835         int i;
836         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
837         struct dma_desc *p = (struct dma_desc *)head;
838
839         for (i = 0; i < size; i++) {
840                 u64 x;
841                 if (extend_desc) {
842                         x = *(u64 *) ep;
843                         pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
844                                 i, (unsigned int)virt_to_phys(ep),
845                                 (unsigned int)x, (unsigned int)(x >> 32),
846                                 ep->basic.des2, ep->basic.des3);
847                         ep++;
848                 } else {
849                         x = *(u64 *) p;
850                         pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
851                                 i, (unsigned int)virt_to_phys(p),
852                                 (unsigned int)x, (unsigned int)(x >> 32),
853                                 p->des2, p->des3);
854                         p++;
855                 }
856                 pr_info("\n");
857         }
858 }
859
860 static void stmmac_display_rings(struct stmmac_priv *priv)
861 {
862         unsigned int txsize = priv->dma_tx_size;
863         unsigned int rxsize = priv->dma_rx_size;
864
865         if (priv->extend_desc) {
866                 pr_info("Extended RX descriptor ring:\n");
867                 stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
868                 pr_info("Extended TX descriptor ring:\n");
869                 stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
870         } else {
871                 pr_info("RX descriptor ring:\n");
872                 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
873                 pr_info("TX descriptor ring:\n");
874                 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
875         }
876 }
877
878 static int stmmac_set_bfsize(int mtu, int bufsize)
879 {
880         int ret = bufsize;
881
882         if (mtu >= BUF_SIZE_4KiB)
883                 ret = BUF_SIZE_8KiB;
884         else if (mtu >= BUF_SIZE_2KiB)
885                 ret = BUF_SIZE_4KiB;
886         else if (mtu >= DMA_BUFFER_SIZE)
887                 ret = BUF_SIZE_2KiB;
888         else
889                 ret = DMA_BUFFER_SIZE;
890
891         return ret;
892 }
893
894 /**
895  * stmmac_clear_descriptors: clear descriptors
896  * @priv: driver private structure
897  * Description: this function is called to clear the tx and rx descriptors
898  * in case of both basic and extended descriptors are used.
899  */
900 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
901 {
902         int i;
903         unsigned int txsize = priv->dma_tx_size;
904         unsigned int rxsize = priv->dma_rx_size;
905
906         /* Clear the Rx/Tx descriptors */
907         for (i = 0; i < rxsize; i++)
908                 if (priv->extend_desc)
909                         priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
910                                                      priv->use_riwt, priv->mode,
911                                                      (i == rxsize - 1));
912                 else
913                         priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
914                                                      priv->use_riwt, priv->mode,
915                                                      (i == rxsize - 1));
916         for (i = 0; i < txsize; i++)
917                 if (priv->extend_desc)
918                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
919                                                      priv->mode,
920                                                      (i == txsize - 1));
921                 else
922                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
923                                                      priv->mode,
924                                                      (i == txsize - 1));
925 }
926
927 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
928                                   int i)
929 {
930         struct sk_buff *skb;
931
932         skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
933                                  GFP_KERNEL);
934         if (!skb) {
935                 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
936                 return -ENOMEM;
937         }
938         skb_reserve(skb, NET_IP_ALIGN);
939         priv->rx_skbuff[i] = skb;
940         priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
941                                                 priv->dma_buf_sz,
942                                                 DMA_FROM_DEVICE);
943         if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
944                 pr_err("%s: DMA mapping error\n", __func__);
945                 dev_kfree_skb_any(skb);
946                 return -EINVAL;
947         }
948
949         p->des2 = priv->rx_skbuff_dma[i];
950
951         if ((priv->mode == STMMAC_RING_MODE) &&
952             (priv->dma_buf_sz == BUF_SIZE_16KiB))
953                 priv->hw->ring->init_desc3(p);
954
955         return 0;
956 }
957
958 static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
959 {
960         if (priv->rx_skbuff[i]) {
961                 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
962                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
963                 dev_kfree_skb_any(priv->rx_skbuff[i]);
964         }
965         priv->rx_skbuff[i] = NULL;
966 }
967
968 /**
969  * init_dma_desc_rings - init the RX/TX descriptor rings
970  * @dev: net device structure
971  * Description:  this function initializes the DMA RX/TX descriptors
972  * and allocates the socket buffers. It suppors the chained and ring
973  * modes.
974  */
975 static int init_dma_desc_rings(struct net_device *dev)
976 {
977         int i;
978         struct stmmac_priv *priv = netdev_priv(dev);
979         unsigned int txsize = priv->dma_tx_size;
980         unsigned int rxsize = priv->dma_rx_size;
981         unsigned int bfsize = 0;
982         int ret = -ENOMEM;
983
984         /* Set the max buffer size according to the DESC mode
985          * and the MTU. Note that RING mode allows 16KiB bsize.
986          */
987         if (priv->mode == STMMAC_RING_MODE)
988                 bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
989
990         if (bfsize < BUF_SIZE_16KiB)
991                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
992
993         if (netif_msg_probe(priv))
994                 pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__,
995                          txsize, rxsize, bfsize);
996
997         if (priv->extend_desc) {
998                 priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
999                                                    sizeof(struct
1000                                                           dma_extended_desc),
1001                                                    &priv->dma_rx_phy,
1002                                                    GFP_KERNEL);
1003                 if (!priv->dma_erx)
1004                         goto err_dma;
1005
1006                 priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
1007                                                    sizeof(struct
1008                                                           dma_extended_desc),
1009                                                    &priv->dma_tx_phy,
1010                                                    GFP_KERNEL);
1011                 if (!priv->dma_etx) {
1012                         dma_free_coherent(priv->device, priv->dma_rx_size *
1013                                         sizeof(struct dma_extended_desc),
1014                                         priv->dma_erx, priv->dma_rx_phy);
1015                         goto err_dma;
1016                 }
1017         } else {
1018                 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
1019                                                   sizeof(struct dma_desc),
1020                                                   &priv->dma_rx_phy,
1021                                                   GFP_KERNEL);
1022                 if (!priv->dma_rx)
1023                         goto err_dma;
1024
1025                 priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
1026                                                   sizeof(struct dma_desc),
1027                                                   &priv->dma_tx_phy,
1028                                                   GFP_KERNEL);
1029                 if (!priv->dma_tx) {
1030                         dma_free_coherent(priv->device, priv->dma_rx_size *
1031                                         sizeof(struct dma_desc),
1032                                         priv->dma_rx, priv->dma_rx_phy);
1033                         goto err_dma;
1034                 }
1035         }
1036
1037         priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
1038                                             GFP_KERNEL);
1039         if (!priv->rx_skbuff_dma)
1040                 goto err_rx_skbuff_dma;
1041
1042         priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
1043                                         GFP_KERNEL);
1044         if (!priv->rx_skbuff)
1045                 goto err_rx_skbuff;
1046
1047         priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
1048                                             GFP_KERNEL);
1049         if (!priv->tx_skbuff_dma)
1050                 goto err_tx_skbuff_dma;
1051
1052         priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
1053                                         GFP_KERNEL);
1054         if (!priv->tx_skbuff)
1055                 goto err_tx_skbuff;
1056
1057         if (netif_msg_probe(priv)) {
1058                 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
1059                          (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
1060
1061                 /* RX INITIALIZATION */
1062                 pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n");
1063         }
1064         for (i = 0; i < rxsize; i++) {
1065                 struct dma_desc *p;
1066                 if (priv->extend_desc)
1067                         p = &((priv->dma_erx + i)->basic);
1068                 else
1069                         p = priv->dma_rx + i;
1070
1071                 ret = stmmac_init_rx_buffers(priv, p, i);
1072                 if (ret)
1073                         goto err_init_rx_buffers;
1074
1075                 if (netif_msg_probe(priv))
1076                         pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
1077                                  priv->rx_skbuff[i]->data,
1078                                  (unsigned int)priv->rx_skbuff_dma[i]);
1079         }
1080         priv->cur_rx = 0;
1081         priv->dirty_rx = (unsigned int)(i - rxsize);
1082         priv->dma_buf_sz = bfsize;
1083         buf_sz = bfsize;
1084
1085         /* Setup the chained descriptor addresses */
1086         if (priv->mode == STMMAC_CHAIN_MODE) {
1087                 if (priv->extend_desc) {
1088                         priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
1089                                               rxsize, 1);
1090                         priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
1091                                               txsize, 1);
1092                 } else {
1093                         priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
1094                                               rxsize, 0);
1095                         priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
1096                                               txsize, 0);
1097                 }
1098         }
1099
1100         /* TX INITIALIZATION */
1101         for (i = 0; i < txsize; i++) {
1102                 struct dma_desc *p;
1103                 if (priv->extend_desc)
1104                         p = &((priv->dma_etx + i)->basic);
1105                 else
1106                         p = priv->dma_tx + i;
1107                 p->des2 = 0;
1108                 priv->tx_skbuff_dma[i] = 0;
1109                 priv->tx_skbuff[i] = NULL;
1110         }
1111
1112         priv->dirty_tx = 0;
1113         priv->cur_tx = 0;
1114
1115         stmmac_clear_descriptors(priv);
1116
1117         if (netif_msg_hw(priv))
1118                 stmmac_display_rings(priv);
1119
1120         return 0;
1121 err_init_rx_buffers:
1122         while (--i >= 0)
1123                 stmmac_free_rx_buffers(priv, i);
1124         kfree(priv->tx_skbuff);
1125 err_tx_skbuff:
1126         kfree(priv->tx_skbuff_dma);
1127 err_tx_skbuff_dma:
1128         kfree(priv->rx_skbuff);
1129 err_rx_skbuff:
1130         kfree(priv->rx_skbuff_dma);
1131 err_rx_skbuff_dma:
1132         if (priv->extend_desc) {
1133                 dma_free_coherent(priv->device, priv->dma_tx_size *
1134                                   sizeof(struct dma_extended_desc),
1135                                   priv->dma_etx, priv->dma_tx_phy);
1136                 dma_free_coherent(priv->device, priv->dma_rx_size *
1137                                   sizeof(struct dma_extended_desc),
1138                                   priv->dma_erx, priv->dma_rx_phy);
1139         } else {
1140                 dma_free_coherent(priv->device,
1141                                 priv->dma_tx_size * sizeof(struct dma_desc),
1142                                 priv->dma_tx, priv->dma_tx_phy);
1143                 dma_free_coherent(priv->device,
1144                                 priv->dma_rx_size * sizeof(struct dma_desc),
1145                                 priv->dma_rx, priv->dma_rx_phy);
1146         }
1147 err_dma:
1148         return ret;
1149 }
1150
1151 static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1152 {
1153         int i;
1154
1155         for (i = 0; i < priv->dma_rx_size; i++)
1156                 stmmac_free_rx_buffers(priv, i);
1157 }
1158
1159 static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1160 {
1161         int i;
1162
1163         for (i = 0; i < priv->dma_tx_size; i++) {
1164                 if (priv->tx_skbuff[i] != NULL) {
1165                         struct dma_desc *p;
1166                         if (priv->extend_desc)
1167                                 p = &((priv->dma_etx + i)->basic);
1168                         else
1169                                 p = priv->dma_tx + i;
1170
1171                         if (priv->tx_skbuff_dma[i])
1172                                 dma_unmap_single(priv->device,
1173                                                  priv->tx_skbuff_dma[i],
1174                                                  priv->hw->desc->get_tx_len(p),
1175                                                  DMA_TO_DEVICE);
1176                         dev_kfree_skb_any(priv->tx_skbuff[i]);
1177                         priv->tx_skbuff[i] = NULL;
1178                         priv->tx_skbuff_dma[i] = 0;
1179                 }
1180         }
1181 }
1182
1183 static void free_dma_desc_resources(struct stmmac_priv *priv)
1184 {
1185         /* Release the DMA TX/RX socket buffers */
1186         dma_free_rx_skbufs(priv);
1187         dma_free_tx_skbufs(priv);
1188
1189         /* Free DMA regions of consistent memory previously allocated */
1190         if (!priv->extend_desc) {
1191                 dma_free_coherent(priv->device,
1192                                   priv->dma_tx_size * sizeof(struct dma_desc),
1193                                   priv->dma_tx, priv->dma_tx_phy);
1194                 dma_free_coherent(priv->device,
1195                                   priv->dma_rx_size * sizeof(struct dma_desc),
1196                                   priv->dma_rx, priv->dma_rx_phy);
1197         } else {
1198                 dma_free_coherent(priv->device, priv->dma_tx_size *
1199                                   sizeof(struct dma_extended_desc),
1200                                   priv->dma_etx, priv->dma_tx_phy);
1201                 dma_free_coherent(priv->device, priv->dma_rx_size *
1202                                   sizeof(struct dma_extended_desc),
1203                                   priv->dma_erx, priv->dma_rx_phy);
1204         }
1205         kfree(priv->rx_skbuff_dma);
1206         kfree(priv->rx_skbuff);
1207         kfree(priv->tx_skbuff_dma);
1208         kfree(priv->tx_skbuff);
1209 }
1210
1211 /**
1212  *  stmmac_dma_operation_mode - HW DMA operation mode
1213  *  @priv: driver private structure
1214  *  Description: it sets the DMA operation mode: tx/rx DMA thresholds
1215  *  or Store-And-Forward capability.
1216  */
1217 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1218 {
1219         if (priv->plat->force_thresh_dma_mode)
1220                 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc);
1221         else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1222                 /*
1223                  * In case of GMAC, SF mode can be enabled
1224                  * to perform the TX COE in HW. This depends on:
1225                  * 1) TX COE if actually supported
1226                  * 2) There is no bugged Jumbo frame support
1227                  *    that needs to not insert csum in the TDES.
1228                  */
1229                 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE);
1230                 tc = SF_DMA_MODE;
1231         } else
1232                 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
1233 }
1234
1235 /**
1236  * stmmac_tx_clean:
1237  * @priv: driver private structure
1238  * Description: it reclaims resources after transmission completes.
1239  */
1240 static void stmmac_tx_clean(struct stmmac_priv *priv)
1241 {
1242         unsigned int txsize = priv->dma_tx_size;
1243
1244         spin_lock(&priv->tx_lock);
1245
1246         priv->xstats.tx_clean++;
1247
1248         while (priv->dirty_tx != priv->cur_tx) {
1249                 int last;
1250                 unsigned int entry = priv->dirty_tx % txsize;
1251                 struct sk_buff *skb = priv->tx_skbuff[entry];
1252                 struct dma_desc *p;
1253
1254                 if (priv->extend_desc)
1255                         p = (struct dma_desc *)(priv->dma_etx + entry);
1256                 else
1257                         p = priv->dma_tx + entry;
1258
1259                 /* Check if the descriptor is owned by the DMA. */
1260                 if (priv->hw->desc->get_tx_owner(p))
1261                         break;
1262
1263                 /* Verify tx error by looking at the last segment. */
1264                 last = priv->hw->desc->get_tx_ls(p);
1265                 if (likely(last)) {
1266                         int tx_error =
1267                             priv->hw->desc->tx_status(&priv->dev->stats,
1268                                                       &priv->xstats, p,
1269                                                       priv->ioaddr);
1270                         if (likely(tx_error == 0)) {
1271                                 priv->dev->stats.tx_packets++;
1272                                 priv->xstats.tx_pkt_n++;
1273                         } else
1274                                 priv->dev->stats.tx_errors++;
1275
1276                         stmmac_get_tx_hwtstamp(priv, entry, skb);
1277                 }
1278                 if (netif_msg_tx_done(priv))
1279                         pr_debug("%s: curr %d, dirty %d\n", __func__,
1280                                  priv->cur_tx, priv->dirty_tx);
1281
1282                 if (likely(priv->tx_skbuff_dma[entry])) {
1283                         dma_unmap_single(priv->device,
1284                                          priv->tx_skbuff_dma[entry],
1285                                          priv->hw->desc->get_tx_len(p),
1286                                          DMA_TO_DEVICE);
1287                         priv->tx_skbuff_dma[entry] = 0;
1288                 }
1289                 priv->hw->ring->clean_desc3(priv, p);
1290
1291                 if (likely(skb != NULL)) {
1292                         dev_kfree_skb(skb);
1293                         priv->tx_skbuff[entry] = NULL;
1294                 }
1295
1296                 priv->hw->desc->release_tx_desc(p, priv->mode);
1297
1298                 priv->dirty_tx++;
1299         }
1300         if (unlikely(netif_queue_stopped(priv->dev) &&
1301                      stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
1302                 netif_tx_lock(priv->dev);
1303                 if (netif_queue_stopped(priv->dev) &&
1304                     stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
1305                         if (netif_msg_tx_done(priv))
1306                                 pr_debug("%s: restart transmit\n", __func__);
1307                         netif_wake_queue(priv->dev);
1308                 }
1309                 netif_tx_unlock(priv->dev);
1310         }
1311
1312         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1313                 stmmac_enable_eee_mode(priv);
1314                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1315         }
1316         spin_unlock(&priv->tx_lock);
1317 }
1318
1319 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
1320 {
1321         priv->hw->dma->enable_dma_irq(priv->ioaddr);
1322 }
1323
1324 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
1325 {
1326         priv->hw->dma->disable_dma_irq(priv->ioaddr);
1327 }
1328
1329 /**
1330  * stmmac_tx_err: irq tx error mng function
1331  * @priv: driver private structure
1332  * Description: it cleans the descriptors and restarts the transmission
1333  * in case of errors.
1334  */
1335 static void stmmac_tx_err(struct stmmac_priv *priv)
1336 {
1337         int i;
1338         int txsize = priv->dma_tx_size;
1339         netif_stop_queue(priv->dev);
1340
1341         priv->hw->dma->stop_tx(priv->ioaddr);
1342         dma_free_tx_skbufs(priv);
1343         for (i = 0; i < txsize; i++)
1344                 if (priv->extend_desc)
1345                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1346                                                      priv->mode,
1347                                                      (i == txsize - 1));
1348                 else
1349                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1350                                                      priv->mode,
1351                                                      (i == txsize - 1));
1352         priv->dirty_tx = 0;
1353         priv->cur_tx = 0;
1354         priv->hw->dma->start_tx(priv->ioaddr);
1355
1356         priv->dev->stats.tx_errors++;
1357         netif_wake_queue(priv->dev);
1358 }
1359
1360 /**
1361  * stmmac_dma_interrupt: DMA ISR
1362  * @priv: driver private structure
1363  * Description: this is the DMA ISR. It is called by the main ISR.
1364  * It calls the dwmac dma routine to understand which type of interrupt
1365  * happened. In case of there is a Normal interrupt and either TX or RX
1366  * interrupt happened so the NAPI is scheduled.
1367  */
1368 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1369 {
1370         int status;
1371
1372         status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
1373         if (likely((status & handle_rx)) || (status & handle_tx)) {
1374                 if (likely(napi_schedule_prep(&priv->napi))) {
1375                         stmmac_disable_dma_irq(priv);
1376                         __napi_schedule(&priv->napi);
1377                 }
1378         }
1379         if (unlikely(status & tx_hard_error_bump_tc)) {
1380                 /* Try to bump up the dma threshold on this failure */
1381                 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
1382                         tc += 64;
1383                         priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
1384                         priv->xstats.threshold = tc;
1385                 }
1386         } else if (unlikely(status == tx_hard_error))
1387                 stmmac_tx_err(priv);
1388 }
1389
1390 /**
1391  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1392  * @priv: driver private structure
1393  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1394  */
1395 static void stmmac_mmc_setup(struct stmmac_priv *priv)
1396 {
1397         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1398             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1399
1400         dwmac_mmc_intr_all_mask(priv->ioaddr);
1401
1402         if (priv->dma_cap.rmon) {
1403                 dwmac_mmc_ctrl(priv->ioaddr, mode);
1404                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1405         } else
1406                 pr_info(" No MAC Management Counters available\n");
1407 }
1408
1409 static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
1410 {
1411         u32 hwid = priv->hw->synopsys_uid;
1412
1413         /* Check Synopsys Id (not available on old chips) */
1414         if (likely(hwid)) {
1415                 u32 uid = ((hwid & 0x0000ff00) >> 8);
1416                 u32 synid = (hwid & 0x000000ff);
1417
1418                 pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
1419                         uid, synid);
1420
1421                 return synid;
1422         }
1423         return 0;
1424 }
1425
1426 /**
1427  * stmmac_selec_desc_mode: to select among: normal/alternate/extend descriptors
1428  * @priv: driver private structure
1429  * Description: select the Enhanced/Alternate or Normal descriptors.
1430  * In case of Enhanced/Alternate, it looks at the extended descriptors are
1431  * supported by the HW cap. register.
1432  */
1433 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1434 {
1435         if (priv->plat->enh_desc) {
1436                 pr_info(" Enhanced/Alternate descriptors\n");
1437
1438                 /* GMAC older than 3.50 has no extended descriptors */
1439                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1440                         pr_info("\tEnabled extended descriptors\n");
1441                         priv->extend_desc = 1;
1442                 } else
1443                         pr_warn("Extended descriptors not supported\n");
1444
1445                 priv->hw->desc = &enh_desc_ops;
1446         } else {
1447                 pr_info(" Normal descriptors\n");
1448                 priv->hw->desc = &ndesc_ops;
1449         }
1450 }
1451
1452 /**
1453  * stmmac_get_hw_features: get MAC capabilities from the HW cap. register.
1454  * @priv: driver private structure
1455  * Description:
1456  *  new GMAC chip generations have a new register to indicate the
1457  *  presence of the optional feature/functions.
1458  *  This can be also used to override the value passed through the
1459  *  platform and necessary for old MAC10/100 and GMAC chips.
1460  */
1461 static int stmmac_get_hw_features(struct stmmac_priv *priv)
1462 {
1463         u32 hw_cap = 0;
1464
1465         if (priv->hw->dma->get_hw_feature) {
1466                 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
1467
1468                 priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
1469                 priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
1470                 priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
1471                 priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
1472                 priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
1473                 priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
1474                 priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
1475                 priv->dma_cap.pmt_remote_wake_up =
1476                     (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
1477                 priv->dma_cap.pmt_magic_frame =
1478                     (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
1479                 /* MMC */
1480                 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
1481                 /* IEEE 1588-2002 */
1482                 priv->dma_cap.time_stamp =
1483                     (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
1484                 /* IEEE 1588-2008 */
1485                 priv->dma_cap.atime_stamp =
1486                     (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
1487                 /* 802.3az - Energy-Efficient Ethernet (EEE) */
1488                 priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
1489                 priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
1490                 /* TX and RX csum */
1491                 priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
1492                 priv->dma_cap.rx_coe_type1 =
1493                     (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
1494                 priv->dma_cap.rx_coe_type2 =
1495                     (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
1496                 priv->dma_cap.rxfifo_over_2048 =
1497                     (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
1498                 /* TX and RX number of channels */
1499                 priv->dma_cap.number_rx_channel =
1500                     (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
1501                 priv->dma_cap.number_tx_channel =
1502                     (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
1503                 /* Alternate (enhanced) DESC mode */
1504                 priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
1505         }
1506
1507         return hw_cap;
1508 }
1509
1510 /**
1511  * stmmac_check_ether_addr: check if the MAC addr is valid
1512  * @priv: driver private structure
1513  * Description:
1514  * it is to verify if the MAC address is valid, in case of failures it
1515  * generates a random MAC address
1516  */
1517 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1518 {
1519         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1520                 priv->hw->mac->get_umac_addr((void __iomem *)
1521                                              priv->dev->base_addr,
1522                                              priv->dev->dev_addr, 0);
1523                 if (!is_valid_ether_addr(priv->dev->dev_addr))
1524                         eth_hw_addr_random(priv->dev);
1525         }
1526         pr_warn("%s: device MAC address %pM\n", priv->dev->name,
1527                 priv->dev->dev_addr);
1528 }
1529
1530 /**
1531  * stmmac_init_dma_engine: DMA init.
1532  * @priv: driver private structure
1533  * Description:
1534  * It inits the DMA invoking the specific MAC/GMAC callback.
1535  * Some DMA parameters can be passed from the platform;
1536  * in case of these are not passed a default is kept for the MAC or GMAC.
1537  */
1538 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1539 {
1540         int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
1541         int mixed_burst = 0;
1542         int atds = 0;
1543
1544         if (priv->plat->dma_cfg) {
1545                 pbl = priv->plat->dma_cfg->pbl;
1546                 fixed_burst = priv->plat->dma_cfg->fixed_burst;
1547                 mixed_burst = priv->plat->dma_cfg->mixed_burst;
1548                 burst_len = priv->plat->dma_cfg->burst_len;
1549         }
1550
1551         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1552                 atds = 1;
1553
1554         return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
1555                                    burst_len, priv->dma_tx_phy,
1556                                    priv->dma_rx_phy, atds);
1557 }
1558
1559 /**
1560  * stmmac_tx_timer: mitigation sw timer for tx.
1561  * @data: data pointer
1562  * Description:
1563  * This is the timer handler to directly invoke the stmmac_tx_clean.
1564  */
1565 static void stmmac_tx_timer(unsigned long data)
1566 {
1567         struct stmmac_priv *priv = (struct stmmac_priv *)data;
1568
1569         stmmac_tx_clean(priv);
1570 }
1571
1572 /**
1573  * stmmac_init_tx_coalesce: init tx mitigation options.
1574  * @priv: driver private structure
1575  * Description:
1576  * This inits the transmit coalesce parameters: i.e. timer rate,
1577  * timer handler and default threshold used for enabling the
1578  * interrupt on completion bit.
1579  */
1580 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1581 {
1582         priv->tx_coal_frames = STMMAC_TX_FRAMES;
1583         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1584         init_timer(&priv->txtimer);
1585         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1586         priv->txtimer.data = (unsigned long)priv;
1587         priv->txtimer.function = stmmac_tx_timer;
1588         add_timer(&priv->txtimer);
1589 }
1590
1591 /**
1592  *  stmmac_open - open entry point of the driver
1593  *  @dev : pointer to the device structure.
1594  *  Description:
1595  *  This function is the open entry point of the driver.
1596  *  Return value:
1597  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1598  *  file on failure.
1599  */
1600 static int stmmac_open(struct net_device *dev)
1601 {
1602         struct stmmac_priv *priv = netdev_priv(dev);
1603         int ret;
1604
1605         clk_prepare_enable(priv->stmmac_clk);
1606
1607         stmmac_check_ether_addr(priv);
1608
1609         if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
1610             priv->pcs != STMMAC_PCS_RTBI) {
1611                 ret = stmmac_init_phy(dev);
1612                 if (ret) {
1613                         pr_err("%s: Cannot attach to PHY (error: %d)\n",
1614                                __func__, ret);
1615                         goto phy_error;
1616                 }
1617         }
1618
1619         /* Create and initialize the TX/RX descriptors chains. */
1620         priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
1621         priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
1622         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1623
1624         ret = init_dma_desc_rings(dev);
1625         if (ret < 0) {
1626                 pr_err("%s: DMA descriptors initialization failed\n", __func__);
1627                 goto dma_desc_error;
1628         }
1629
1630         /* DMA initialization and SW reset */
1631         ret = stmmac_init_dma_engine(priv);
1632         if (ret < 0) {
1633                 pr_err("%s: DMA engine initialization failed\n", __func__);
1634                 goto init_error;
1635         }
1636
1637         /* Copy the MAC addr into the HW  */
1638         priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
1639
1640         /* If required, perform hw setup of the bus. */
1641         if (priv->plat->bus_setup)
1642                 priv->plat->bus_setup(priv->ioaddr);
1643
1644         /* Initialize the MAC Core */
1645         priv->hw->mac->core_init(priv->ioaddr);
1646
1647         /* Request the IRQ lines */
1648         ret = request_irq(dev->irq, stmmac_interrupt,
1649                           IRQF_SHARED, dev->name, dev);
1650         if (unlikely(ret < 0)) {
1651                 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
1652                        __func__, dev->irq, ret);
1653                 goto init_error;
1654         }
1655
1656         /* Request the Wake IRQ in case of another line is used for WoL */
1657         if (priv->wol_irq != dev->irq) {
1658                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1659                                   IRQF_SHARED, dev->name, dev);
1660                 if (unlikely(ret < 0)) {
1661                         pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1662                                __func__, priv->wol_irq, ret);
1663                         goto wolirq_error;
1664                 }
1665         }
1666
1667         /* Request the IRQ lines */
1668         if (priv->lpi_irq != -ENXIO) {
1669                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1670                                   dev->name, dev);
1671                 if (unlikely(ret < 0)) {
1672                         pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1673                                __func__, priv->lpi_irq, ret);
1674                         goto lpiirq_error;
1675                 }
1676         }
1677
1678         /* Enable the MAC Rx/Tx */
1679         stmmac_set_mac(priv->ioaddr, true);
1680
1681         /* Set the HW DMA mode and the COE */
1682         stmmac_dma_operation_mode(priv);
1683
1684         /* Extra statistics */
1685         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1686         priv->xstats.threshold = tc;
1687
1688         stmmac_mmc_setup(priv);
1689
1690         ret = stmmac_init_ptp(priv);
1691         if (ret)
1692                 pr_warn("%s: failed PTP initialisation\n", __func__);
1693
1694 #ifdef CONFIG_STMMAC_DEBUG_FS
1695         ret = stmmac_init_fs(dev);
1696         if (ret < 0)
1697                 pr_warn("%s: failed debugFS registration\n", __func__);
1698 #endif
1699         /* Start the ball rolling... */
1700         pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
1701         priv->hw->dma->start_tx(priv->ioaddr);
1702         priv->hw->dma->start_rx(priv->ioaddr);
1703
1704         /* Dump DMA/MAC registers */
1705         if (netif_msg_hw(priv)) {
1706                 priv->hw->mac->dump_regs(priv->ioaddr);
1707                 priv->hw->dma->dump_regs(priv->ioaddr);
1708         }
1709
1710         if (priv->phydev)
1711                 phy_start(priv->phydev);
1712
1713         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1714
1715         priv->eee_enabled = stmmac_eee_init(priv);
1716
1717         stmmac_init_tx_coalesce(priv);
1718
1719         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1720                 priv->rx_riwt = MAX_DMA_RIWT;
1721                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1722         }
1723
1724         if (priv->pcs && priv->hw->mac->ctrl_ane)
1725                 priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
1726
1727         napi_enable(&priv->napi);
1728         netif_start_queue(dev);
1729
1730         return 0;
1731
1732 lpiirq_error:
1733         if (priv->wol_irq != dev->irq)
1734                 free_irq(priv->wol_irq, dev);
1735 wolirq_error:
1736         free_irq(dev->irq, dev);
1737
1738 init_error:
1739         free_dma_desc_resources(priv);
1740 dma_desc_error:
1741         if (priv->phydev)
1742                 phy_disconnect(priv->phydev);
1743 phy_error:
1744         clk_disable_unprepare(priv->stmmac_clk);
1745
1746         return ret;
1747 }
1748
1749 /**
1750  *  stmmac_release - close entry point of the driver
1751  *  @dev : device pointer.
1752  *  Description:
1753  *  This is the stop entry point of the driver.
1754  */
1755 static int stmmac_release(struct net_device *dev)
1756 {
1757         struct stmmac_priv *priv = netdev_priv(dev);
1758
1759         if (priv->eee_enabled)
1760                 del_timer_sync(&priv->eee_ctrl_timer);
1761
1762         /* Stop and disconnect the PHY */
1763         if (priv->phydev) {
1764                 phy_stop(priv->phydev);
1765                 phy_disconnect(priv->phydev);
1766                 priv->phydev = NULL;
1767         }
1768
1769         netif_stop_queue(dev);
1770
1771         napi_disable(&priv->napi);
1772
1773         del_timer_sync(&priv->txtimer);
1774
1775         /* Free the IRQ lines */
1776         free_irq(dev->irq, dev);
1777         if (priv->wol_irq != dev->irq)
1778                 free_irq(priv->wol_irq, dev);
1779         if (priv->lpi_irq != -ENXIO)
1780                 free_irq(priv->lpi_irq, dev);
1781
1782         /* Stop TX/RX DMA and clear the descriptors */
1783         priv->hw->dma->stop_tx(priv->ioaddr);
1784         priv->hw->dma->stop_rx(priv->ioaddr);
1785
1786         /* Release and free the Rx/Tx resources */
1787         free_dma_desc_resources(priv);
1788
1789         /* Disable the MAC Rx/Tx */
1790         stmmac_set_mac(priv->ioaddr, false);
1791
1792         netif_carrier_off(dev);
1793
1794 #ifdef CONFIG_STMMAC_DEBUG_FS
1795         stmmac_exit_fs();
1796 #endif
1797         clk_disable_unprepare(priv->stmmac_clk);
1798
1799         stmmac_release_ptp(priv);
1800
1801         return 0;
1802 }
1803
1804 /**
1805  *  stmmac_xmit: Tx entry point of the driver
1806  *  @skb : the socket buffer
1807  *  @dev : device pointer
1808  *  Description : this is the tx entry point of the driver.
1809  *  It programs the chain or the ring and supports oversized frames
1810  *  and SG feature.
1811  */
1812 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1813 {
1814         struct stmmac_priv *priv = netdev_priv(dev);
1815         unsigned int txsize = priv->dma_tx_size;
1816         unsigned int entry;
1817         int i, csum_insertion = 0, is_jumbo = 0;
1818         int nfrags = skb_shinfo(skb)->nr_frags;
1819         struct dma_desc *desc, *first;
1820         unsigned int nopaged_len = skb_headlen(skb);
1821
1822         if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
1823                 if (!netif_queue_stopped(dev)) {
1824                         netif_stop_queue(dev);
1825                         /* This is a hard error, log it. */
1826                         pr_err("%s: Tx Ring full when queue awake\n", __func__);
1827                 }
1828                 return NETDEV_TX_BUSY;
1829         }
1830
1831         spin_lock(&priv->tx_lock);
1832
1833         if (priv->tx_path_in_lpi_mode)
1834                 stmmac_disable_eee_mode(priv);
1835
1836         entry = priv->cur_tx % txsize;
1837
1838         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
1839
1840         if (priv->extend_desc)
1841                 desc = (struct dma_desc *)(priv->dma_etx + entry);
1842         else
1843                 desc = priv->dma_tx + entry;
1844
1845         first = desc;
1846
1847         priv->tx_skbuff[entry] = skb;
1848
1849         /* To program the descriptors according to the size of the frame */
1850         if (priv->mode == STMMAC_RING_MODE) {
1851                 is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
1852                                                         priv->plat->enh_desc);
1853                 if (unlikely(is_jumbo))
1854                         entry = priv->hw->ring->jumbo_frm(priv, skb,
1855                                                           csum_insertion);
1856         } else {
1857                 is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
1858                                                          priv->plat->enh_desc);
1859                 if (unlikely(is_jumbo))
1860                         entry = priv->hw->chain->jumbo_frm(priv, skb,
1861                                                            csum_insertion);
1862         }
1863         if (likely(!is_jumbo)) {
1864                 desc->des2 = dma_map_single(priv->device, skb->data,
1865                                             nopaged_len, DMA_TO_DEVICE);
1866                 priv->tx_skbuff_dma[entry] = desc->des2;
1867                 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1868                                                 csum_insertion, priv->mode);
1869         } else
1870                 desc = first;
1871
1872         for (i = 0; i < nfrags; i++) {
1873                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1874                 int len = skb_frag_size(frag);
1875
1876                 entry = (++priv->cur_tx) % txsize;
1877                 if (priv->extend_desc)
1878                         desc = (struct dma_desc *)(priv->dma_etx + entry);
1879                 else
1880                         desc = priv->dma_tx + entry;
1881
1882                 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
1883                                               DMA_TO_DEVICE);
1884                 priv->tx_skbuff_dma[entry] = desc->des2;
1885                 priv->tx_skbuff[entry] = NULL;
1886                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
1887                                                 priv->mode);
1888                 wmb();
1889                 priv->hw->desc->set_tx_owner(desc);
1890                 wmb();
1891         }
1892
1893         /* Finalize the latest segment. */
1894         priv->hw->desc->close_tx_desc(desc);
1895
1896         wmb();
1897         /* According to the coalesce parameter the IC bit for the latest
1898          * segment could be reset and the timer re-started to invoke the
1899          * stmmac_tx function. This approach takes care about the fragments.
1900          */
1901         priv->tx_count_frames += nfrags + 1;
1902         if (priv->tx_coal_frames > priv->tx_count_frames) {
1903                 priv->hw->desc->clear_tx_ic(desc);
1904                 priv->xstats.tx_reset_ic_bit++;
1905                 mod_timer(&priv->txtimer,
1906                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
1907         } else
1908                 priv->tx_count_frames = 0;
1909
1910         /* To avoid raise condition */
1911         priv->hw->desc->set_tx_owner(first);
1912         wmb();
1913
1914         priv->cur_tx++;
1915
1916         if (netif_msg_pktdata(priv)) {
1917                 pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
1918                         __func__, (priv->cur_tx % txsize),
1919                         (priv->dirty_tx % txsize), entry, first, nfrags);
1920
1921                 if (priv->extend_desc)
1922                         stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
1923                 else
1924                         stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
1925
1926                 pr_debug(">>> frame to be transmitted: ");
1927                 print_pkt(skb->data, skb->len);
1928         }
1929         if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
1930                 if (netif_msg_hw(priv))
1931                         pr_debug("%s: stop transmitted packets\n", __func__);
1932                 netif_stop_queue(dev);
1933         }
1934
1935         dev->stats.tx_bytes += skb->len;
1936
1937         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1938                      priv->hwts_tx_en)) {
1939                 /* declare that device is doing timestamping */
1940                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1941                 priv->hw->desc->enable_tx_timestamp(first);
1942         }
1943
1944         if (!priv->hwts_tx_en)
1945                 skb_tx_timestamp(skb);
1946
1947         priv->hw->dma->enable_dma_transmission(priv->ioaddr);
1948
1949         spin_unlock(&priv->tx_lock);
1950
1951         return NETDEV_TX_OK;
1952 }
1953
1954 /**
1955  * stmmac_rx_refill: refill used skb preallocated buffers
1956  * @priv: driver private structure
1957  * Description : this is to reallocate the skb for the reception process
1958  * that is based on zero-copy.
1959  */
1960 static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1961 {
1962         unsigned int rxsize = priv->dma_rx_size;
1963         int bfsize = priv->dma_buf_sz;
1964
1965         for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
1966                 unsigned int entry = priv->dirty_rx % rxsize;
1967                 struct dma_desc *p;
1968
1969                 if (priv->extend_desc)
1970                         p = (struct dma_desc *)(priv->dma_erx + entry);
1971                 else
1972                         p = priv->dma_rx + entry;
1973
1974                 if (likely(priv->rx_skbuff[entry] == NULL)) {
1975                         struct sk_buff *skb;
1976
1977                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
1978
1979                         if (unlikely(skb == NULL))
1980                                 break;
1981
1982                         priv->rx_skbuff[entry] = skb;
1983                         priv->rx_skbuff_dma[entry] =
1984                             dma_map_single(priv->device, skb->data, bfsize,
1985                                            DMA_FROM_DEVICE);
1986
1987                         p->des2 = priv->rx_skbuff_dma[entry];
1988
1989                         priv->hw->ring->refill_desc3(priv, p);
1990
1991                         if (netif_msg_rx_status(priv))
1992                                 pr_debug("\trefill entry #%d\n", entry);
1993                 }
1994                 wmb();
1995                 priv->hw->desc->set_rx_owner(p);
1996                 wmb();
1997         }
1998 }
1999
2000 /**
2001  * stmmac_rx_refill: refill used skb preallocated buffers
2002  * @priv: driver private structure
2003  * @limit: napi bugget.
2004  * Description :  this the function called by the napi poll method.
2005  * It gets all the frames inside the ring.
2006  */
2007 static int stmmac_rx(struct stmmac_priv *priv, int limit)
2008 {
2009         unsigned int rxsize = priv->dma_rx_size;
2010         unsigned int entry = priv->cur_rx % rxsize;
2011         unsigned int next_entry;
2012         unsigned int count = 0;
2013         int coe = priv->plat->rx_coe;
2014
2015         if (netif_msg_rx_status(priv)) {
2016                 pr_debug("%s: descriptor ring:\n", __func__);
2017                 if (priv->extend_desc)
2018                         stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
2019                 else
2020                         stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
2021         }
2022         while (count < limit) {
2023                 int status;
2024                 struct dma_desc *p;
2025
2026                 if (priv->extend_desc)
2027                         p = (struct dma_desc *)(priv->dma_erx + entry);
2028                 else
2029                         p = priv->dma_rx + entry;
2030
2031                 if (priv->hw->desc->get_rx_owner(p))
2032                         break;
2033
2034                 count++;
2035
2036                 next_entry = (++priv->cur_rx) % rxsize;
2037                 if (priv->extend_desc)
2038                         prefetch(priv->dma_erx + next_entry);
2039                 else
2040                         prefetch(priv->dma_rx + next_entry);
2041
2042                 /* read the status of the incoming frame */
2043                 status = priv->hw->desc->rx_status(&priv->dev->stats,
2044                                                    &priv->xstats, p);
2045                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2046                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
2047                                                            &priv->xstats,
2048                                                            priv->dma_erx +
2049                                                            entry);
2050                 if (unlikely(status == discard_frame)) {
2051                         priv->dev->stats.rx_errors++;
2052                         if (priv->hwts_rx_en && !priv->extend_desc) {
2053                                 /* DESC2 & DESC3 will be overwitten by device
2054                                  * with timestamp value, hence reinitialize
2055                                  * them in stmmac_rx_refill() function so that
2056                                  * device can reuse it.
2057                                  */
2058                                 priv->rx_skbuff[entry] = NULL;
2059                                 dma_unmap_single(priv->device,
2060                                                  priv->rx_skbuff_dma[entry],
2061                                                  priv->dma_buf_sz,
2062                                                  DMA_FROM_DEVICE);
2063                         }
2064                 } else {
2065                         struct sk_buff *skb;
2066                         int frame_len;
2067
2068                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2069
2070                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2071                          * Type frames (LLC/LLC-SNAP)
2072                          */
2073                         if (unlikely(status != llc_snap))
2074                                 frame_len -= ETH_FCS_LEN;
2075
2076                         if (netif_msg_rx_status(priv)) {
2077                                 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
2078                                          p, entry, p->des2);
2079                                 if (frame_len > ETH_FRAME_LEN)
2080                                         pr_debug("\tframe size %d, COE: %d\n",
2081                                                  frame_len, status);
2082                         }
2083                         skb = priv->rx_skbuff[entry];
2084                         if (unlikely(!skb)) {
2085                                 pr_err("%s: Inconsistent Rx descriptor chain\n",
2086                                        priv->dev->name);
2087                                 priv->dev->stats.rx_dropped++;
2088                                 break;
2089                         }
2090                         prefetch(skb->data - NET_IP_ALIGN);
2091                         priv->rx_skbuff[entry] = NULL;
2092
2093                         stmmac_get_rx_hwtstamp(priv, entry, skb);
2094
2095                         skb_put(skb, frame_len);
2096                         dma_unmap_single(priv->device,
2097                                          priv->rx_skbuff_dma[entry],
2098                                          priv->dma_buf_sz, DMA_FROM_DEVICE);
2099
2100                         if (netif_msg_pktdata(priv)) {
2101                                 pr_debug("frame received (%dbytes)", frame_len);
2102                                 print_pkt(skb->data, frame_len);
2103                         }
2104
2105                         skb->protocol = eth_type_trans(skb, priv->dev);
2106
2107                         if (unlikely(!coe))
2108                                 skb_checksum_none_assert(skb);
2109                         else
2110                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2111
2112                         napi_gro_receive(&priv->napi, skb);
2113
2114                         priv->dev->stats.rx_packets++;
2115                         priv->dev->stats.rx_bytes += frame_len;
2116                 }
2117                 entry = next_entry;
2118         }
2119
2120         stmmac_rx_refill(priv);
2121
2122         priv->xstats.rx_pkt_n += count;
2123
2124         return count;
2125 }
2126
2127 /**
2128  *  stmmac_poll - stmmac poll method (NAPI)
2129  *  @napi : pointer to the napi structure.
2130  *  @budget : maximum number of packets that the current CPU can receive from
2131  *            all interfaces.
2132  *  Description :
2133  *  To look at the incoming frames and clear the tx resources.
2134  */
2135 static int stmmac_poll(struct napi_struct *napi, int budget)
2136 {
2137         struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2138         int work_done = 0;
2139
2140         priv->xstats.napi_poll++;
2141         stmmac_tx_clean(priv);
2142
2143         work_done = stmmac_rx(priv, budget);
2144         if (work_done < budget) {
2145                 napi_complete(napi);
2146                 stmmac_enable_dma_irq(priv);
2147         }
2148         return work_done;
2149 }
2150
2151 /**
2152  *  stmmac_tx_timeout
2153  *  @dev : Pointer to net device structure
2154  *  Description: this function is called when a packet transmission fails to
2155  *   complete within a reasonable time. The driver will mark the error in the
2156  *   netdev structure and arrange for the device to be reset to a sane state
2157  *   in order to transmit a new packet.
2158  */
2159 static void stmmac_tx_timeout(struct net_device *dev)
2160 {
2161         struct stmmac_priv *priv = netdev_priv(dev);
2162
2163         /* Clear Tx resources and restart transmitting again */
2164         stmmac_tx_err(priv);
2165 }
2166
2167 /* Configuration changes (passed on by ifconfig) */
2168 static int stmmac_config(struct net_device *dev, struct ifmap *map)
2169 {
2170         if (dev->flags & IFF_UP)        /* can't act on a running interface */
2171                 return -EBUSY;
2172
2173         /* Don't allow changing the I/O address */
2174         if (map->base_addr != dev->base_addr) {
2175                 pr_warn("%s: can't change I/O address\n", dev->name);
2176                 return -EOPNOTSUPP;
2177         }
2178
2179         /* Don't allow changing the IRQ */
2180         if (map->irq != dev->irq) {
2181                 pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq);
2182                 return -EOPNOTSUPP;
2183         }
2184
2185         return 0;
2186 }
2187
2188 /**
2189  *  stmmac_set_rx_mode - entry point for multicast addressing
2190  *  @dev : pointer to the device structure
2191  *  Description:
2192  *  This function is a driver entry point which gets called by the kernel
2193  *  whenever multicast addresses must be enabled/disabled.
2194  *  Return value:
2195  *  void.
2196  */
2197 static void stmmac_set_rx_mode(struct net_device *dev)
2198 {
2199         struct stmmac_priv *priv = netdev_priv(dev);
2200
2201         spin_lock(&priv->lock);
2202         priv->hw->mac->set_filter(dev, priv->synopsys_id);
2203         spin_unlock(&priv->lock);
2204 }
2205
2206 /**
2207  *  stmmac_change_mtu - entry point to change MTU size for the device.
2208  *  @dev : device pointer.
2209  *  @new_mtu : the new MTU size for the device.
2210  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
2211  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
2212  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
2213  *  Return value:
2214  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2215  *  file on failure.
2216  */
2217 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2218 {
2219         struct stmmac_priv *priv = netdev_priv(dev);
2220         int max_mtu;
2221
2222         if (netif_running(dev)) {
2223                 pr_err("%s: must be stopped to change its MTU\n", dev->name);
2224                 return -EBUSY;
2225         }
2226
2227         if (priv->plat->enh_desc)
2228                 max_mtu = JUMBO_LEN;
2229         else
2230                 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
2231
2232         if ((new_mtu < 46) || (new_mtu > max_mtu)) {
2233                 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
2234                 return -EINVAL;
2235         }
2236
2237         dev->mtu = new_mtu;
2238         netdev_update_features(dev);
2239
2240         return 0;
2241 }
2242
2243 static netdev_features_t stmmac_fix_features(struct net_device *dev,
2244                                              netdev_features_t features)
2245 {
2246         struct stmmac_priv *priv = netdev_priv(dev);
2247
2248         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2249                 features &= ~NETIF_F_RXCSUM;
2250         else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
2251                 features &= ~NETIF_F_IPV6_CSUM;
2252         if (!priv->plat->tx_coe)
2253                 features &= ~NETIF_F_ALL_CSUM;
2254
2255         /* Some GMAC devices have a bugged Jumbo frame support that
2256          * needs to have the Tx COE disabled for oversized frames
2257          * (due to limited buffer sizes). In this case we disable
2258          * the TX csum insertionin the TDES and not use SF.
2259          */
2260         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2261                 features &= ~NETIF_F_ALL_CSUM;
2262
2263         return features;
2264 }
2265
2266 /**
2267  *  stmmac_interrupt - main ISR
2268  *  @irq: interrupt number.
2269  *  @dev_id: to pass the net device pointer.
2270  *  Description: this is the main driver interrupt service routine.
2271  *  It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
2272  *  interrupts.
2273  */
2274 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2275 {
2276         struct net_device *dev = (struct net_device *)dev_id;
2277         struct stmmac_priv *priv = netdev_priv(dev);
2278
2279         if (unlikely(!dev)) {
2280                 pr_err("%s: invalid dev pointer\n", __func__);
2281                 return IRQ_NONE;
2282         }
2283
2284         /* To handle GMAC own interrupts */
2285         if (priv->plat->has_gmac) {
2286                 int status = priv->hw->mac->host_irq_status((void __iomem *)
2287                                                             dev->base_addr,
2288                                                             &priv->xstats);
2289                 if (unlikely(status)) {
2290                         /* For LPI we need to save the tx status */
2291                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
2292                                 priv->tx_path_in_lpi_mode = true;
2293                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2294                                 priv->tx_path_in_lpi_mode = false;
2295                 }
2296         }
2297
2298         /* To handle DMA interrupts */
2299         stmmac_dma_interrupt(priv);
2300
2301         return IRQ_HANDLED;
2302 }
2303
2304 #ifdef CONFIG_NET_POLL_CONTROLLER
2305 /* Polling receive - used by NETCONSOLE and other diagnostic tools
2306  * to allow network I/O with interrupts disabled.
2307  */
2308 static void stmmac_poll_controller(struct net_device *dev)
2309 {
2310         disable_irq(dev->irq);
2311         stmmac_interrupt(dev->irq, dev);
2312         enable_irq(dev->irq);
2313 }
2314 #endif
2315
2316 /**
2317  *  stmmac_ioctl - Entry point for the Ioctl
2318  *  @dev: Device pointer.
2319  *  @rq: An IOCTL specefic structure, that can contain a pointer to
2320  *  a proprietary structure used to pass information to the driver.
2321  *  @cmd: IOCTL command
2322  *  Description:
2323  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
2324  */
2325 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2326 {
2327         struct stmmac_priv *priv = netdev_priv(dev);
2328         int ret = -EOPNOTSUPP;
2329
2330         if (!netif_running(dev))
2331                 return -EINVAL;
2332
2333         switch (cmd) {
2334         case SIOCGMIIPHY:
2335         case SIOCGMIIREG:
2336         case SIOCSMIIREG:
2337                 if (!priv->phydev)
2338                         return -EINVAL;
2339                 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
2340                 break;
2341         case SIOCSHWTSTAMP:
2342                 ret = stmmac_hwtstamp_ioctl(dev, rq);
2343                 break;
2344         default:
2345                 break;
2346         }
2347
2348         return ret;
2349 }
2350
2351 #ifdef CONFIG_STMMAC_DEBUG_FS
2352 static struct dentry *stmmac_fs_dir;
2353 static struct dentry *stmmac_rings_status;
2354 static struct dentry *stmmac_dma_cap;
2355
2356 static void sysfs_display_ring(void *head, int size, int extend_desc,
2357                                struct seq_file *seq)
2358 {
2359         int i;
2360         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
2361         struct dma_desc *p = (struct dma_desc *)head;
2362
2363         for (i = 0; i < size; i++) {
2364                 u64 x;
2365                 if (extend_desc) {
2366                         x = *(u64 *) ep;
2367                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2368                                    i, (unsigned int)virt_to_phys(ep),
2369                                    (unsigned int)x, (unsigned int)(x >> 32),
2370                                    ep->basic.des2, ep->basic.des3);
2371                         ep++;
2372                 } else {
2373                         x = *(u64 *) p;
2374                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2375                                    i, (unsigned int)virt_to_phys(ep),
2376                                    (unsigned int)x, (unsigned int)(x >> 32),
2377                                    p->des2, p->des3);
2378                         p++;
2379                 }
2380                 seq_printf(seq, "\n");
2381         }
2382 }
2383
2384 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
2385 {
2386         struct net_device *dev = seq->private;
2387         struct stmmac_priv *priv = netdev_priv(dev);
2388         unsigned int txsize = priv->dma_tx_size;
2389         unsigned int rxsize = priv->dma_rx_size;
2390
2391         if (priv->extend_desc) {
2392                 seq_printf(seq, "Extended RX descriptor ring:\n");
2393                 sysfs_display_ring((void *)priv->dma_erx, rxsize, 1, seq);
2394                 seq_printf(seq, "Extended TX descriptor ring:\n");
2395                 sysfs_display_ring((void *)priv->dma_etx, txsize, 1, seq);
2396         } else {
2397                 seq_printf(seq, "RX descriptor ring:\n");
2398                 sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq);
2399                 seq_printf(seq, "TX descriptor ring:\n");
2400                 sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq);
2401         }
2402
2403         return 0;
2404 }
2405
2406 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
2407 {
2408         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
2409 }
2410
2411 static const struct file_operations stmmac_rings_status_fops = {
2412         .owner = THIS_MODULE,
2413         .open = stmmac_sysfs_ring_open,
2414         .read = seq_read,
2415         .llseek = seq_lseek,
2416         .release = single_release,
2417 };
2418
2419 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
2420 {
2421         struct net_device *dev = seq->private;
2422         struct stmmac_priv *priv = netdev_priv(dev);
2423
2424         if (!priv->hw_cap_support) {
2425                 seq_printf(seq, "DMA HW features not supported\n");
2426                 return 0;
2427         }
2428
2429         seq_printf(seq, "==============================\n");
2430         seq_printf(seq, "\tDMA HW features\n");
2431         seq_printf(seq, "==============================\n");
2432
2433         seq_printf(seq, "\t10/100 Mbps %s\n",
2434                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
2435         seq_printf(seq, "\t1000 Mbps %s\n",
2436                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
2437         seq_printf(seq, "\tHalf duple %s\n",
2438                    (priv->dma_cap.half_duplex) ? "Y" : "N");
2439         seq_printf(seq, "\tHash Filter: %s\n",
2440                    (priv->dma_cap.hash_filter) ? "Y" : "N");
2441         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
2442                    (priv->dma_cap.multi_addr) ? "Y" : "N");
2443         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
2444                    (priv->dma_cap.pcs) ? "Y" : "N");
2445         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
2446                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
2447         seq_printf(seq, "\tPMT Remote wake up: %s\n",
2448                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
2449         seq_printf(seq, "\tPMT Magic Frame: %s\n",
2450                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
2451         seq_printf(seq, "\tRMON module: %s\n",
2452                    (priv->dma_cap.rmon) ? "Y" : "N");
2453         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
2454                    (priv->dma_cap.time_stamp) ? "Y" : "N");
2455         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
2456                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
2457         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
2458                    (priv->dma_cap.eee) ? "Y" : "N");
2459         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
2460         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
2461                    (priv->dma_cap.tx_coe) ? "Y" : "N");
2462         seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
2463                    (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
2464         seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
2465                    (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
2466         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
2467                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
2468         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
2469                    priv->dma_cap.number_rx_channel);
2470         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
2471                    priv->dma_cap.number_tx_channel);
2472         seq_printf(seq, "\tEnhanced descriptors: %s\n",
2473                    (priv->dma_cap.enh_desc) ? "Y" : "N");
2474
2475         return 0;
2476 }
2477
2478 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
2479 {
2480         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
2481 }
2482
2483 static const struct file_operations stmmac_dma_cap_fops = {
2484         .owner = THIS_MODULE,
2485         .open = stmmac_sysfs_dma_cap_open,
2486         .read = seq_read,
2487         .llseek = seq_lseek,
2488         .release = single_release,
2489 };
2490
2491 static int stmmac_init_fs(struct net_device *dev)
2492 {
2493         /* Create debugfs entries */
2494         stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
2495
2496         if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
2497                 pr_err("ERROR %s, debugfs create directory failed\n",
2498                        STMMAC_RESOURCE_NAME);
2499
2500                 return -ENOMEM;
2501         }
2502
2503         /* Entry to report DMA RX/TX rings */
2504         stmmac_rings_status = debugfs_create_file("descriptors_status",
2505                                                   S_IRUGO, stmmac_fs_dir, dev,
2506                                                   &stmmac_rings_status_fops);
2507
2508         if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
2509                 pr_info("ERROR creating stmmac ring debugfs file\n");
2510                 debugfs_remove(stmmac_fs_dir);
2511
2512                 return -ENOMEM;
2513         }
2514
2515         /* Entry to report the DMA HW features */
2516         stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir,
2517                                              dev, &stmmac_dma_cap_fops);
2518
2519         if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) {
2520                 pr_info("ERROR creating stmmac MMC debugfs file\n");
2521                 debugfs_remove(stmmac_rings_status);
2522                 debugfs_remove(stmmac_fs_dir);
2523
2524                 return -ENOMEM;
2525         }
2526
2527         return 0;
2528 }
2529
2530 static void stmmac_exit_fs(void)
2531 {
2532         debugfs_remove(stmmac_rings_status);
2533         debugfs_remove(stmmac_dma_cap);
2534         debugfs_remove(stmmac_fs_dir);
2535 }
2536 #endif /* CONFIG_STMMAC_DEBUG_FS */
2537
2538 static const struct net_device_ops stmmac_netdev_ops = {
2539         .ndo_open = stmmac_open,
2540         .ndo_start_xmit = stmmac_xmit,
2541         .ndo_stop = stmmac_release,
2542         .ndo_change_mtu = stmmac_change_mtu,
2543         .ndo_fix_features = stmmac_fix_features,
2544         .ndo_set_rx_mode = stmmac_set_rx_mode,
2545         .ndo_tx_timeout = stmmac_tx_timeout,
2546         .ndo_do_ioctl = stmmac_ioctl,
2547         .ndo_set_config = stmmac_config,
2548 #ifdef CONFIG_NET_POLL_CONTROLLER
2549         .ndo_poll_controller = stmmac_poll_controller,
2550 #endif
2551         .ndo_set_mac_address = eth_mac_addr,
2552 };
2553
2554 /**
2555  *  stmmac_hw_init - Init the MAC device
2556  *  @priv: driver private structure
2557  *  Description: this function detects which MAC device
2558  *  (GMAC/MAC10-100) has to attached, checks the HW capability
2559  *  (if supported) and sets the driver's features (for example
2560  *  to use the ring or chaine mode or support the normal/enh
2561  *  descriptor structure).
2562  */
2563 static int stmmac_hw_init(struct stmmac_priv *priv)
2564 {
2565         int ret;
2566         struct mac_device_info *mac;
2567
2568         /* Identify the MAC HW device */
2569         if (priv->plat->has_gmac) {
2570                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
2571                 mac = dwmac1000_setup(priv->ioaddr);
2572         } else {
2573                 mac = dwmac100_setup(priv->ioaddr);
2574         }
2575         if (!mac)
2576                 return -ENOMEM;
2577
2578         priv->hw = mac;
2579
2580         /* Get and dump the chip ID */
2581         priv->synopsys_id = stmmac_get_synopsys_id(priv);
2582
2583         /* To use the chained or ring mode */
2584         if (chain_mode) {
2585                 priv->hw->chain = &chain_mode_ops;
2586                 pr_info(" Chain mode enabled\n");
2587                 priv->mode = STMMAC_CHAIN_MODE;
2588         } else {
2589                 priv->hw->ring = &ring_mode_ops;
2590                 pr_info(" Ring mode enabled\n");
2591                 priv->mode = STMMAC_RING_MODE;
2592         }
2593
2594         /* Get the HW capability (new GMAC newer than 3.50a) */
2595         priv->hw_cap_support = stmmac_get_hw_features(priv);
2596         if (priv->hw_cap_support) {
2597                 pr_info(" DMA HW capability register supported");
2598
2599                 /* We can override some gmac/dma configuration fields: e.g.
2600                  * enh_desc, tx_coe (e.g. that are passed through the
2601                  * platform) with the values from the HW capability
2602                  * register (if supported).
2603                  */
2604                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
2605                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
2606
2607                 priv->plat->tx_coe = priv->dma_cap.tx_coe;
2608
2609                 if (priv->dma_cap.rx_coe_type2)
2610                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
2611                 else if (priv->dma_cap.rx_coe_type1)
2612                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
2613
2614         } else
2615                 pr_info(" No HW DMA feature register supported");
2616
2617         /* To use alternate (extended) or normal descriptor structures */
2618         stmmac_selec_desc_mode(priv);
2619
2620         ret = priv->hw->mac->rx_ipc(priv->ioaddr);
2621         if (!ret) {
2622                 pr_warn(" RX IPC Checksum Offload not configured.\n");
2623                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2624         }
2625
2626         if (priv->plat->rx_coe)
2627                 pr_info(" RX Checksum Offload Engine supported (type %d)\n",
2628                         priv->plat->rx_coe);
2629         if (priv->plat->tx_coe)
2630                 pr_info(" TX Checksum insertion supported\n");
2631
2632         if (priv->plat->pmt) {
2633                 pr_info(" Wake-Up On Lan supported\n");
2634                 device_set_wakeup_capable(priv->device, 1);
2635         }
2636
2637         return 0;
2638 }
2639
2640 /**
2641  * stmmac_dvr_probe
2642  * @device: device pointer
2643  * @plat_dat: platform data pointer
2644  * @addr: iobase memory address
2645  * Description: this is the main probe function used to
2646  * call the alloc_etherdev, allocate the priv structure.
2647  */
2648 struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2649                                      struct plat_stmmacenet_data *plat_dat,
2650                                      void __iomem *addr)
2651 {
2652         int ret = 0;
2653         struct net_device *ndev = NULL;
2654         struct stmmac_priv *priv;
2655
2656         ndev = alloc_etherdev(sizeof(struct stmmac_priv));
2657         if (!ndev)
2658                 return NULL;
2659
2660         SET_NETDEV_DEV(ndev, device);
2661
2662         priv = netdev_priv(ndev);
2663         priv->device = device;
2664         priv->dev = ndev;
2665
2666         ether_setup(ndev);
2667
2668         stmmac_set_ethtool_ops(ndev);
2669         priv->pause = pause;
2670         priv->plat = plat_dat;
2671         priv->ioaddr = addr;
2672         priv->dev->base_addr = (unsigned long)addr;
2673
2674         /* Verify driver arguments */
2675         stmmac_verify_args();
2676
2677         /* Override with kernel parameters if supplied XXX CRS XXX
2678          * this needs to have multiple instances
2679          */
2680         if ((phyaddr >= 0) && (phyaddr <= 31))
2681                 priv->plat->phy_addr = phyaddr;
2682
2683         /* Init MAC and get the capabilities */
2684         ret = stmmac_hw_init(priv);
2685         if (ret)
2686                 goto error_free_netdev;
2687
2688         ndev->netdev_ops = &stmmac_netdev_ops;
2689
2690         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2691                             NETIF_F_RXCSUM;
2692         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2693         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
2694 #ifdef STMMAC_VLAN_TAG_USED
2695         /* Both mac100 and gmac support receive VLAN tag detection */
2696         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2697 #endif
2698         priv->msg_enable = netif_msg_init(debug, default_msg_level);
2699
2700         if (flow_ctrl)
2701                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
2702
2703         /* Rx Watchdog is available in the COREs newer than the 3.40.
2704          * In some case, for example on bugged HW this feature
2705          * has to be disable and this can be done by passing the
2706          * riwt_off field from the platform.
2707          */
2708         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
2709                 priv->use_riwt = 1;
2710                 pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
2711         }
2712
2713         netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
2714
2715         spin_lock_init(&priv->lock);
2716         spin_lock_init(&priv->tx_lock);
2717
2718         ret = register_netdev(ndev);
2719         if (ret) {
2720                 pr_err("%s: ERROR %i registering the device\n", __func__, ret);
2721                 goto error_netdev_register;
2722         }
2723
2724         priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
2725         if (IS_ERR(priv->stmmac_clk)) {
2726                 pr_warn("%s: warning: cannot get CSR clock\n", __func__);
2727                 goto error_clk_get;
2728         }
2729
2730         /* If a specific clk_csr value is passed from the platform
2731          * this means that the CSR Clock Range selection cannot be
2732          * changed at run-time and it is fixed. Viceversa the driver'll try to
2733          * set the MDC clock dynamically according to the csr actual
2734          * clock input.
2735          */
2736         if (!priv->plat->clk_csr)
2737                 stmmac_clk_csr_set(priv);
2738         else
2739                 priv->clk_csr = priv->plat->clk_csr;
2740
2741         stmmac_check_pcs_mode(priv);
2742
2743         if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
2744             priv->pcs != STMMAC_PCS_RTBI) {
2745                 /* MDIO bus Registration */
2746                 ret = stmmac_mdio_register(ndev);
2747                 if (ret < 0) {
2748                         pr_debug("%s: MDIO bus (id: %d) registration failed",
2749                                  __func__, priv->plat->bus_id);
2750                         goto error_mdio_register;
2751                 }
2752         }
2753
2754         return priv;
2755
2756 error_mdio_register:
2757         clk_put(priv->stmmac_clk);
2758 error_clk_get:
2759         unregister_netdev(ndev);
2760 error_netdev_register:
2761         netif_napi_del(&priv->napi);
2762 error_free_netdev:
2763         free_netdev(ndev);
2764
2765         return NULL;
2766 }
2767
2768 /**
2769  * stmmac_dvr_remove
2770  * @ndev: net device pointer
2771  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
2772  * changes the link status, releases the DMA descriptor rings.
2773  */
2774 int stmmac_dvr_remove(struct net_device *ndev)
2775 {
2776         struct stmmac_priv *priv = netdev_priv(ndev);
2777
2778         pr_info("%s:\n\tremoving driver", __func__);
2779
2780         priv->hw->dma->stop_rx(priv->ioaddr);
2781         priv->hw->dma->stop_tx(priv->ioaddr);
2782
2783         stmmac_set_mac(priv->ioaddr, false);
2784         if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
2785             priv->pcs != STMMAC_PCS_RTBI)
2786                 stmmac_mdio_unregister(ndev);
2787         netif_carrier_off(ndev);
2788         unregister_netdev(ndev);
2789         free_netdev(ndev);
2790
2791         return 0;
2792 }
2793
2794 #ifdef CONFIG_PM
2795 int stmmac_suspend(struct net_device *ndev)
2796 {
2797         struct stmmac_priv *priv = netdev_priv(ndev);
2798         unsigned long flags;
2799
2800         if (!ndev || !netif_running(ndev))
2801                 return 0;
2802
2803         if (priv->phydev)
2804                 phy_stop(priv->phydev);
2805
2806         spin_lock_irqsave(&priv->lock, flags);
2807
2808         netif_device_detach(ndev);
2809         netif_stop_queue(ndev);
2810
2811         napi_disable(&priv->napi);
2812
2813         /* Stop TX/RX DMA */
2814         priv->hw->dma->stop_tx(priv->ioaddr);
2815         priv->hw->dma->stop_rx(priv->ioaddr);
2816
2817         stmmac_clear_descriptors(priv);
2818
2819         /* Enable Power down mode by programming the PMT regs */
2820         if (device_may_wakeup(priv->device))
2821                 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
2822         else {
2823                 stmmac_set_mac(priv->ioaddr, false);
2824                 /* Disable clock in case of PWM is off */
2825                 clk_disable_unprepare(priv->stmmac_clk);
2826         }
2827         spin_unlock_irqrestore(&priv->lock, flags);
2828         return 0;
2829 }
2830
2831 int stmmac_resume(struct net_device *ndev)
2832 {
2833         struct stmmac_priv *priv = netdev_priv(ndev);
2834         unsigned long flags;
2835
2836         if (!netif_running(ndev))
2837                 return 0;
2838
2839         spin_lock_irqsave(&priv->lock, flags);
2840
2841         /* Power Down bit, into the PM register, is cleared
2842          * automatically as soon as a magic packet or a Wake-up frame
2843          * is received. Anyway, it's better to manually clear
2844          * this bit because it can generate problems while resuming
2845          * from another devices (e.g. serial console).
2846          */
2847         if (device_may_wakeup(priv->device))
2848                 priv->hw->mac->pmt(priv->ioaddr, 0);
2849         else
2850                 /* enable the clk prevously disabled */
2851                 clk_prepare_enable(priv->stmmac_clk);
2852
2853         netif_device_attach(ndev);
2854
2855         /* Enable the MAC and DMA */
2856         stmmac_set_mac(priv->ioaddr, true);
2857         priv->hw->dma->start_tx(priv->ioaddr);
2858         priv->hw->dma->start_rx(priv->ioaddr);
2859
2860         napi_enable(&priv->napi);
2861
2862         netif_start_queue(ndev);
2863
2864         spin_unlock_irqrestore(&priv->lock, flags);
2865
2866         if (priv->phydev)
2867                 phy_start(priv->phydev);
2868
2869         return 0;
2870 }
2871
2872 int stmmac_freeze(struct net_device *ndev)
2873 {
2874         if (!ndev || !netif_running(ndev))
2875                 return 0;
2876
2877         return stmmac_release(ndev);
2878 }
2879
2880 int stmmac_restore(struct net_device *ndev)
2881 {
2882         if (!ndev || !netif_running(ndev))
2883                 return 0;
2884
2885         return stmmac_open(ndev);
2886 }
2887 #endif /* CONFIG_PM */
2888
2889 /* Driver can be configured w/ and w/ both PCI and Platf drivers
2890  * depending on the configuration selected.
2891  */
2892 static int __init stmmac_init(void)
2893 {
2894         int ret;
2895
2896         ret = stmmac_register_platform();
2897         if (ret)
2898                 goto err;
2899         ret = stmmac_register_pci();
2900         if (ret)
2901                 goto err_pci;
2902         return 0;
2903 err_pci:
2904         stmmac_unregister_platform();
2905 err:
2906         pr_err("stmmac: driver registration failed\n");
2907         return ret;
2908 }
2909
2910 static void __exit stmmac_exit(void)
2911 {
2912         stmmac_unregister_platform();
2913         stmmac_unregister_pci();
2914 }
2915
2916 module_init(stmmac_init);
2917 module_exit(stmmac_exit);
2918
2919 #ifndef MODULE
2920 static int __init stmmac_cmdline_opt(char *str)
2921 {
2922         char *opt;
2923
2924         if (!str || !*str)
2925                 return -EINVAL;
2926         while ((opt = strsep(&str, ",")) != NULL) {
2927                 if (!strncmp(opt, "debug:", 6)) {
2928                         if (kstrtoint(opt + 6, 0, &debug))
2929                                 goto err;
2930                 } else if (!strncmp(opt, "phyaddr:", 8)) {
2931                         if (kstrtoint(opt + 8, 0, &phyaddr))
2932                                 goto err;
2933                 } else if (!strncmp(opt, "dma_txsize:", 11)) {
2934                         if (kstrtoint(opt + 11, 0, &dma_txsize))
2935                                 goto err;
2936                 } else if (!strncmp(opt, "dma_rxsize:", 11)) {
2937                         if (kstrtoint(opt + 11, 0, &dma_rxsize))
2938                                 goto err;
2939                 } else if (!strncmp(opt, "buf_sz:", 7)) {
2940                         if (kstrtoint(opt + 7, 0, &buf_sz))
2941                                 goto err;
2942                 } else if (!strncmp(opt, "tc:", 3)) {
2943                         if (kstrtoint(opt + 3, 0, &tc))
2944                                 goto err;
2945                 } else if (!strncmp(opt, "watchdog:", 9)) {
2946                         if (kstrtoint(opt + 9, 0, &watchdog))
2947                                 goto err;
2948                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
2949                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
2950                                 goto err;
2951                 } else if (!strncmp(opt, "pause:", 6)) {
2952                         if (kstrtoint(opt + 6, 0, &pause))
2953                                 goto err;
2954                 } else if (!strncmp(opt, "eee_timer:", 10)) {
2955                         if (kstrtoint(opt + 10, 0, &eee_timer))
2956                                 goto err;
2957                 } else if (!strncmp(opt, "chain_mode:", 11)) {
2958                         if (kstrtoint(opt + 11, 0, &chain_mode))
2959                                 goto err;
2960                 }
2961         }
2962         return 0;
2963
2964 err:
2965         pr_err("%s: ERROR broken module parameter conversion", __func__);
2966         return -EINVAL;
2967 }
2968
2969 __setup("stmmaceth=", stmmac_cmdline_opt);
2970 #endif /* MODULE */
2971
2972 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
2973 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
2974 MODULE_LICENSE("GPL");