Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / net / ethernet / amd / amd8111e.c
1
2 /* Advanced  Micro Devices Inc. AMD8111E Linux Network Driver
3  * Copyright (C) 2004 Advanced Micro Devices
4  *
5  *
6  * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
7  * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
8  * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
9  * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
10  * Copyright 1993 United States Government as represented by the
11  *      Director, National Security Agency.[ pcnet32.c ]
12  * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
13  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
14  *
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  *
21  * This program is distributed in the hope that it will be useful,
22  * but WITHOUT ANY WARRANTY; without even the implied warranty of
23  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
24  * GNU General Public License for more details.
25  *
26  * You should have received a copy of the GNU General Public License
27  * along with this program; if not, see <http://www.gnu.org/licenses/>.
28
29 Module Name:
30
31         amd8111e.c
32
33 Abstract:
34
35          AMD8111 based 10/100 Ethernet Controller Driver.
36
37 Environment:
38
39         Kernel Mode
40
41 Revision History:
42         3.0.0
43            Initial Revision.
44         3.0.1
45          1. Dynamic interrupt coalescing.
46          2. Removed prev_stats.
47          3. MII support.
48          4. Dynamic IPG support
49         3.0.2  05/29/2003
50          1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
51          2. Bug fix: Fixed VLAN support failure.
52          3. Bug fix: Fixed receive interrupt coalescing bug.
53          4. Dynamic IPG support is disabled by default.
54         3.0.3 06/05/2003
55          1. Bug fix: Fixed failure to close the interface if SMP is enabled.
56         3.0.4 12/09/2003
57          1. Added set_mac_address routine for bonding driver support.
58          2. Tested the driver for bonding support
59          3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
60             indicated to the h/w.
61          4. Modified amd8111e_rx() routine to receive all the received packets
62             in the first interrupt.
63          5. Bug fix: Corrected  rx_errors  reported in get_stats() function.
64         3.0.5 03/22/2004
65          1. Added NAPI support
66
67 */
68
69
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/types.h>
73 #include <linux/compiler.h>
74 #include <linux/delay.h>
75 #include <linux/init.h>
76 #include <linux/interrupt.h>
77 #include <linux/ioport.h>
78 #include <linux/pci.h>
79 #include <linux/netdevice.h>
80 #include <linux/etherdevice.h>
81 #include <linux/skbuff.h>
82 #include <linux/ethtool.h>
83 #include <linux/mii.h>
84 #include <linux/if_vlan.h>
85 #include <linux/ctype.h>
86 #include <linux/crc32.h>
87 #include <linux/dma-mapping.h>
88
89 #include <asm/io.h>
90 #include <asm/byteorder.h>
91 #include <asm/uaccess.h>
92
93 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
94 #define AMD8111E_VLAN_TAG_USED 1
95 #else
96 #define AMD8111E_VLAN_TAG_USED 0
97 #endif
98
99 #include "amd8111e.h"
100 #define MODULE_NAME     "amd8111e"
101 #define MODULE_VERS     "3.0.7"
102 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
103 MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS);
104 MODULE_LICENSE("GPL");
105 MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
106 module_param_array(speed_duplex, int, NULL, 0);
107 MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
108 module_param_array(coalesce, bool, NULL, 0);
109 MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
110 module_param_array(dynamic_ipg, bool, NULL, 0);
111 MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
112
113 static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
114
115         { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
116          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
117         { 0, }
118
119 };
120 /*
121 This function will read the PHY registers.
122 */
123 static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
124 {
125         void __iomem *mmio = lp->mmio;
126         unsigned int reg_val;
127         unsigned int repeat= REPEAT_CNT;
128
129         reg_val = readl(mmio + PHY_ACCESS);
130         while (reg_val & PHY_CMD_ACTIVE)
131                 reg_val = readl( mmio + PHY_ACCESS );
132
133         writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
134                            ((reg & 0x1f) << 16),  mmio +PHY_ACCESS);
135         do{
136                 reg_val = readl(mmio + PHY_ACCESS);
137                 udelay(30);  /* It takes 30 us to read/write data */
138         } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
139         if(reg_val & PHY_RD_ERR)
140                 goto err_phy_read;
141
142         *val = reg_val & 0xffff;
143         return 0;
144 err_phy_read:
145         *val = 0;
146         return -EINVAL;
147
148 }
149
150 /*
151 This function will write into PHY registers.
152 */
153 static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
154 {
155         unsigned int repeat = REPEAT_CNT;
156         void __iomem *mmio = lp->mmio;
157         unsigned int reg_val;
158
159         reg_val = readl(mmio + PHY_ACCESS);
160         while (reg_val & PHY_CMD_ACTIVE)
161                 reg_val = readl( mmio + PHY_ACCESS );
162
163         writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
164                            ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
165
166         do{
167                 reg_val = readl(mmio + PHY_ACCESS);
168                 udelay(30);  /* It takes 30 us to read/write the data */
169         } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
170
171         if(reg_val & PHY_RD_ERR)
172                 goto err_phy_write;
173
174         return 0;
175
176 err_phy_write:
177         return -EINVAL;
178
179 }
180 /*
181 This is the mii register read function provided to the mii interface.
182 */
183 static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
184 {
185         struct amd8111e_priv* lp = netdev_priv(dev);
186         unsigned int reg_val;
187
188         amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
189         return reg_val;
190
191 }
192
193 /*
194 This is the mii register write function provided to the mii interface.
195 */
196 static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
197 {
198         struct amd8111e_priv* lp = netdev_priv(dev);
199
200         amd8111e_write_phy(lp, phy_id, reg_num, val);
201 }
202
203 /*
204 This function will set PHY speed. During initialization sets the original speed to 100 full.
205 */
206 static void amd8111e_set_ext_phy(struct net_device *dev)
207 {
208         struct amd8111e_priv *lp = netdev_priv(dev);
209         u32 bmcr,advert,tmp;
210
211         /* Determine mii register values to set the speed */
212         advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
213         tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
214         switch (lp->ext_phy_option){
215
216                 default:
217                 case SPEED_AUTONEG: /* advertise all values */
218                         tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
219                                 ADVERTISE_100HALF|ADVERTISE_100FULL) ;
220                         break;
221                 case SPEED10_HALF:
222                         tmp |= ADVERTISE_10HALF;
223                         break;
224                 case SPEED10_FULL:
225                         tmp |= ADVERTISE_10FULL;
226                         break;
227                 case SPEED100_HALF:
228                         tmp |= ADVERTISE_100HALF;
229                         break;
230                 case SPEED100_FULL:
231                         tmp |= ADVERTISE_100FULL;
232                         break;
233         }
234
235         if(advert != tmp)
236                 amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
237         /* Restart auto negotiation */
238         bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
239         bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
240         amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
241
242 }
243
244 /*
245 This function will unmap skb->data space and will free
246 all transmit and receive skbuffs.
247 */
248 static int amd8111e_free_skbs(struct net_device *dev)
249 {
250         struct amd8111e_priv *lp = netdev_priv(dev);
251         struct sk_buff* rx_skbuff;
252         int i;
253
254         /* Freeing transmit skbs */
255         for(i = 0; i < NUM_TX_BUFFERS; i++){
256                 if(lp->tx_skbuff[i]){
257                         pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i],                                        lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
258                         dev_kfree_skb (lp->tx_skbuff[i]);
259                         lp->tx_skbuff[i] = NULL;
260                         lp->tx_dma_addr[i] = 0;
261                 }
262         }
263         /* Freeing previously allocated receive buffers */
264         for (i = 0; i < NUM_RX_BUFFERS; i++){
265                 rx_skbuff = lp->rx_skbuff[i];
266                 if(rx_skbuff != NULL){
267                         pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
268                                   lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
269                         dev_kfree_skb(lp->rx_skbuff[i]);
270                         lp->rx_skbuff[i] = NULL;
271                         lp->rx_dma_addr[i] = 0;
272                 }
273         }
274
275         return 0;
276 }
277
278 /*
279 This will set the receive buffer length corresponding to the mtu size of networkinterface.
280 */
281 static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
282 {
283         struct amd8111e_priv* lp = netdev_priv(dev);
284         unsigned int mtu = dev->mtu;
285
286         if (mtu > ETH_DATA_LEN){
287                 /* MTU + ethernet header + FCS
288                 + optional VLAN tag + skb reserve space 2 */
289
290                 lp->rx_buff_len = mtu + ETH_HLEN + 10;
291                 lp->options |= OPTION_JUMBO_ENABLE;
292         } else{
293                 lp->rx_buff_len = PKT_BUFF_SZ;
294                 lp->options &= ~OPTION_JUMBO_ENABLE;
295         }
296 }
297
298 /*
299 This function will free all the previously allocated buffers, determine new receive buffer length  and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
300  */
301 static int amd8111e_init_ring(struct net_device *dev)
302 {
303         struct amd8111e_priv *lp = netdev_priv(dev);
304         int i;
305
306         lp->rx_idx = lp->tx_idx = 0;
307         lp->tx_complete_idx = 0;
308         lp->tx_ring_idx = 0;
309
310
311         if(lp->opened)
312                 /* Free previously allocated transmit and receive skbs */
313                 amd8111e_free_skbs(dev);
314
315         else{
316                  /* allocate the tx and rx descriptors */
317                 if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
318                         sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
319                         &lp->tx_ring_dma_addr)) == NULL)
320
321                         goto err_no_mem;
322
323                 if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
324                         sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
325                         &lp->rx_ring_dma_addr)) == NULL)
326
327                         goto err_free_tx_ring;
328
329         }
330         /* Set new receive buff size */
331         amd8111e_set_rx_buff_len(dev);
332
333         /* Allocating receive  skbs */
334         for (i = 0; i < NUM_RX_BUFFERS; i++) {
335
336                 lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len);
337                 if (!lp->rx_skbuff[i]) {
338                                 /* Release previos allocated skbs */
339                                 for(--i; i >= 0 ;i--)
340                                         dev_kfree_skb(lp->rx_skbuff[i]);
341                                 goto err_free_rx_ring;
342                 }
343                 skb_reserve(lp->rx_skbuff[i],2);
344         }
345         /* Initilaizing receive descriptors */
346         for (i = 0; i < NUM_RX_BUFFERS; i++) {
347                 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
348                         lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
349
350                 lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
351                 lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
352                 wmb();
353                 lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
354         }
355
356         /* Initializing transmit descriptors */
357         for (i = 0; i < NUM_TX_RING_DR; i++) {
358                 lp->tx_ring[i].buff_phy_addr = 0;
359                 lp->tx_ring[i].tx_flags = 0;
360                 lp->tx_ring[i].buff_count = 0;
361         }
362
363         return 0;
364
365 err_free_rx_ring:
366
367         pci_free_consistent(lp->pci_dev,
368                 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
369                 lp->rx_ring_dma_addr);
370
371 err_free_tx_ring:
372
373         pci_free_consistent(lp->pci_dev,
374                  sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
375                  lp->tx_ring_dma_addr);
376
377 err_no_mem:
378         return -ENOMEM;
379 }
380 /* This function will set the interrupt coalescing according to the input arguments */
381 static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
382 {
383         unsigned int timeout;
384         unsigned int event_count;
385
386         struct amd8111e_priv *lp = netdev_priv(dev);
387         void __iomem *mmio = lp->mmio;
388         struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
389
390
391         switch(cmod)
392         {
393                 case RX_INTR_COAL :
394                         timeout = coal_conf->rx_timeout;
395                         event_count = coal_conf->rx_event_count;
396                         if( timeout > MAX_TIMEOUT ||
397                                         event_count > MAX_EVENT_COUNT )
398                                 return -EINVAL;
399
400                         timeout = timeout * DELAY_TIMER_CONV;
401                         writel(VAL0|STINTEN, mmio+INTEN0);
402                         writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
403                                                         mmio+DLY_INT_A);
404                         break;
405
406                 case TX_INTR_COAL :
407                         timeout = coal_conf->tx_timeout;
408                         event_count = coal_conf->tx_event_count;
409                         if( timeout > MAX_TIMEOUT ||
410                                         event_count > MAX_EVENT_COUNT )
411                                 return -EINVAL;
412
413
414                         timeout = timeout * DELAY_TIMER_CONV;
415                         writel(VAL0|STINTEN,mmio+INTEN0);
416                         writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
417                                                          mmio+DLY_INT_B);
418                         break;
419
420                 case DISABLE_COAL:
421                         writel(0,mmio+STVAL);
422                         writel(STINTEN, mmio+INTEN0);
423                         writel(0, mmio +DLY_INT_B);
424                         writel(0, mmio+DLY_INT_A);
425                         break;
426                  case ENABLE_COAL:
427                        /* Start the timer */
428                         writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /*  0.5 sec */
429                         writel(VAL0|STINTEN, mmio+INTEN0);
430                         break;
431                 default:
432                         break;
433
434    }
435         return 0;
436
437 }
438
439 /*
440 This function initializes the device registers  and starts the device.
441 */
442 static int amd8111e_restart(struct net_device *dev)
443 {
444         struct amd8111e_priv *lp = netdev_priv(dev);
445         void __iomem *mmio = lp->mmio;
446         int i,reg_val;
447
448         /* stop the chip */
449          writel(RUN, mmio + CMD0);
450
451         if(amd8111e_init_ring(dev))
452                 return -ENOMEM;
453
454         /* enable the port manager and set auto negotiation always */
455         writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
456         writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
457
458         amd8111e_set_ext_phy(dev);
459
460         /* set control registers */
461         reg_val = readl(mmio + CTRL1);
462         reg_val &= ~XMTSP_MASK;
463         writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
464
465         /* enable interrupt */
466         writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
467                 APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
468                 SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
469
470         writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
471
472         /* initialize tx and rx ring base addresses */
473         writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
474         writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
475
476         writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
477         writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
478
479         /* set default IPG to 96 */
480         writew((u32)DEFAULT_IPG,mmio+IPG);
481         writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
482
483         if(lp->options & OPTION_JUMBO_ENABLE){
484                 writel((u32)VAL2|JUMBO, mmio + CMD3);
485                 /* Reset REX_UFLO */
486                 writel( REX_UFLO, mmio + CMD2);
487                 /* Should not set REX_UFLO for jumbo frames */
488                 writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
489         }else{
490                 writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
491                 writel((u32)JUMBO, mmio + CMD3);
492         }
493
494 #if AMD8111E_VLAN_TAG_USED
495         writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
496 #endif
497         writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
498
499         /* Setting the MAC address to the device */
500         for (i = 0; i < ETH_ALEN; i++)
501                 writeb( dev->dev_addr[i], mmio + PADR + i );
502
503         /* Enable interrupt coalesce */
504         if(lp->options & OPTION_INTR_COAL_ENABLE){
505                 printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
506                                                                 dev->name);
507                 amd8111e_set_coalesce(dev,ENABLE_COAL);
508         }
509
510         /* set RUN bit to start the chip */
511         writel(VAL2 | RDMD0, mmio + CMD0);
512         writel(VAL0 | INTREN | RUN, mmio + CMD0);
513
514         /* To avoid PCI posting bug */
515         readl(mmio+CMD0);
516         return 0;
517 }
518 /*
519 This function clears necessary the device registers.
520 */
521 static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
522 {
523         unsigned int reg_val;
524         unsigned int logic_filter[2] ={0,};
525         void __iomem *mmio = lp->mmio;
526
527
528         /* stop the chip */
529         writel(RUN, mmio + CMD0);
530
531         /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
532         writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
533
534         /* Clear RCV_RING_BASE_ADDR */
535         writel(0, mmio + RCV_RING_BASE_ADDR0);
536
537         /* Clear XMT_RING_BASE_ADDR */
538         writel(0, mmio + XMT_RING_BASE_ADDR0);
539         writel(0, mmio + XMT_RING_BASE_ADDR1);
540         writel(0, mmio + XMT_RING_BASE_ADDR2);
541         writel(0, mmio + XMT_RING_BASE_ADDR3);
542
543         /* Clear CMD0  */
544         writel(CMD0_CLEAR,mmio + CMD0);
545
546         /* Clear CMD2 */
547         writel(CMD2_CLEAR, mmio +CMD2);
548
549         /* Clear CMD7 */
550         writel(CMD7_CLEAR , mmio + CMD7);
551
552         /* Clear DLY_INT_A and DLY_INT_B */
553         writel(0x0, mmio + DLY_INT_A);
554         writel(0x0, mmio + DLY_INT_B);
555
556         /* Clear FLOW_CONTROL */
557         writel(0x0, mmio + FLOW_CONTROL);
558
559         /* Clear INT0  write 1 to clear register */
560         reg_val = readl(mmio + INT0);
561         writel(reg_val, mmio + INT0);
562
563         /* Clear STVAL */
564         writel(0x0, mmio + STVAL);
565
566         /* Clear INTEN0 */
567         writel( INTEN0_CLEAR, mmio + INTEN0);
568
569         /* Clear LADRF */
570         writel(0x0 , mmio + LADRF);
571
572         /* Set SRAM_SIZE & SRAM_BOUNDARY registers  */
573         writel( 0x80010,mmio + SRAM_SIZE);
574
575         /* Clear RCV_RING0_LEN */
576         writel(0x0, mmio +  RCV_RING_LEN0);
577
578         /* Clear XMT_RING0/1/2/3_LEN */
579         writel(0x0, mmio +  XMT_RING_LEN0);
580         writel(0x0, mmio +  XMT_RING_LEN1);
581         writel(0x0, mmio +  XMT_RING_LEN2);
582         writel(0x0, mmio +  XMT_RING_LEN3);
583
584         /* Clear XMT_RING_LIMIT */
585         writel(0x0, mmio + XMT_RING_LIMIT);
586
587         /* Clear MIB */
588         writew(MIB_CLEAR, mmio + MIB_ADDR);
589
590         /* Clear LARF */
591         amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
592
593         /* SRAM_SIZE register */
594         reg_val = readl(mmio + SRAM_SIZE);
595
596         if(lp->options & OPTION_JUMBO_ENABLE)
597                 writel( VAL2|JUMBO, mmio + CMD3);
598 #if AMD8111E_VLAN_TAG_USED
599         writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
600 #endif
601         /* Set default value to CTRL1 Register */
602         writel(CTRL1_DEFAULT, mmio + CTRL1);
603
604         /* To avoid PCI posting bug */
605         readl(mmio + CMD2);
606
607 }
608
609 /*
610 This function disables the interrupt and clears all the pending
611 interrupts in INT0
612  */
613 static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
614 {
615         u32 intr0;
616
617         /* Disable interrupt */
618         writel(INTREN, lp->mmio + CMD0);
619
620         /* Clear INT0 */
621         intr0 = readl(lp->mmio + INT0);
622         writel(intr0, lp->mmio + INT0);
623
624         /* To avoid PCI posting bug */
625         readl(lp->mmio + INT0);
626
627 }
628
629 /*
630 This function stops the chip.
631 */
632 static void amd8111e_stop_chip(struct amd8111e_priv* lp)
633 {
634         writel(RUN, lp->mmio + CMD0);
635
636         /* To avoid PCI posting bug */
637         readl(lp->mmio + CMD0);
638 }
639
640 /*
641 This function frees the  transmiter and receiver descriptor rings.
642 */
643 static void amd8111e_free_ring(struct amd8111e_priv* lp)
644 {
645         /* Free transmit and receive descriptor rings */
646         if(lp->rx_ring){
647                 pci_free_consistent(lp->pci_dev,
648                         sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
649                         lp->rx_ring, lp->rx_ring_dma_addr);
650                 lp->rx_ring = NULL;
651         }
652
653         if(lp->tx_ring){
654                 pci_free_consistent(lp->pci_dev,
655                         sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
656                         lp->tx_ring, lp->tx_ring_dma_addr);
657
658                 lp->tx_ring = NULL;
659         }
660
661 }
662
663 /*
664 This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
665 */
666 static int amd8111e_tx(struct net_device *dev)
667 {
668         struct amd8111e_priv* lp = netdev_priv(dev);
669         int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
670         int status;
671         /* Complete all the transmit packet */
672         while (lp->tx_complete_idx != lp->tx_idx){
673                 tx_index =  lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
674                 status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
675
676                 if(status & OWN_BIT)
677                         break;  /* It still hasn't been Txed */
678
679                 lp->tx_ring[tx_index].buff_phy_addr = 0;
680
681                 /* We must free the original skb */
682                 if (lp->tx_skbuff[tx_index]) {
683                         pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
684                                         lp->tx_skbuff[tx_index]->len,
685                                         PCI_DMA_TODEVICE);
686                         dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
687                         lp->tx_skbuff[tx_index] = NULL;
688                         lp->tx_dma_addr[tx_index] = 0;
689                 }
690                 lp->tx_complete_idx++;
691                 /*COAL update tx coalescing parameters */
692                 lp->coal_conf.tx_packets++;
693                 lp->coal_conf.tx_bytes +=
694                         le16_to_cpu(lp->tx_ring[tx_index].buff_count);
695
696                 if (netif_queue_stopped(dev) &&
697                         lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
698                         /* The ring is no longer full, clear tbusy. */
699                         /* lp->tx_full = 0; */
700                         netif_wake_queue (dev);
701                 }
702         }
703         return 0;
704 }
705
706 /* This function handles the driver receive operation in polling mode */
707 static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
708 {
709         struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
710         struct net_device *dev = lp->amd8111e_net_dev;
711         int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
712         void __iomem *mmio = lp->mmio;
713         struct sk_buff *skb,*new_skb;
714         int min_pkt_len, status;
715         unsigned int intr0;
716         int num_rx_pkt = 0;
717         short pkt_len;
718 #if AMD8111E_VLAN_TAG_USED
719         short vtag;
720 #endif
721         int rx_pkt_limit = budget;
722         unsigned long flags;
723
724         do{
725                 /* process receive packets until we use the quota*/
726                 /* If we own the next entry, it's a new packet. Send it up. */
727                 while(1) {
728                         status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
729                         if (status & OWN_BIT)
730                                 break;
731
732                         /*
733                          * There is a tricky error noted by John Murphy,
734                          * <murf@perftech.com> to Russ Nelson: Even with
735                          * full-sized * buffers it's possible for a
736                          * jabber packet to use two buffers, with only
737                          * the last correctly noting the error.
738                          */
739
740                         if(status & ERR_BIT) {
741                                 /* reseting flags */
742                                 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
743                                 goto err_next_pkt;
744                         }
745                         /* check for STP and ENP */
746                         if(!((status & STP_BIT) && (status & ENP_BIT))){
747                                 /* reseting flags */
748                                 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
749                                 goto err_next_pkt;
750                         }
751                         pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
752
753 #if AMD8111E_VLAN_TAG_USED
754                         vtag = status & TT_MASK;
755                         /*MAC will strip vlan tag*/
756                         if (vtag != 0)
757                                 min_pkt_len =MIN_PKT_LEN - 4;
758                         else
759 #endif
760                                 min_pkt_len =MIN_PKT_LEN;
761
762                         if (pkt_len < min_pkt_len) {
763                                 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
764                                 lp->drv_rx_errors++;
765                                 goto err_next_pkt;
766                         }
767                         if(--rx_pkt_limit < 0)
768                                 goto rx_not_empty;
769                         new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
770                         if (!new_skb) {
771                                 /* if allocation fail,
772                                    ignore that pkt and go to next one */
773                                 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
774                                 lp->drv_rx_errors++;
775                                 goto err_next_pkt;
776                         }
777
778                         skb_reserve(new_skb, 2);
779                         skb = lp->rx_skbuff[rx_index];
780                         pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
781                                          lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
782                         skb_put(skb, pkt_len);
783                         lp->rx_skbuff[rx_index] = new_skb;
784                         lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
785                                                                    new_skb->data,
786                                                                    lp->rx_buff_len-2,
787                                                                    PCI_DMA_FROMDEVICE);
788
789                         skb->protocol = eth_type_trans(skb, dev);
790
791 #if AMD8111E_VLAN_TAG_USED
792                         if (vtag == TT_VLAN_TAGGED){
793                                 u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
794                                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
795                         }
796 #endif
797                         netif_receive_skb(skb);
798                         /*COAL update rx coalescing parameters*/
799                         lp->coal_conf.rx_packets++;
800                         lp->coal_conf.rx_bytes += pkt_len;
801                         num_rx_pkt++;
802
803                 err_next_pkt:
804                         lp->rx_ring[rx_index].buff_phy_addr
805                                 = cpu_to_le32(lp->rx_dma_addr[rx_index]);
806                         lp->rx_ring[rx_index].buff_count =
807                                 cpu_to_le16(lp->rx_buff_len-2);
808                         wmb();
809                         lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
810                         rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
811                 }
812                 /* Check the interrupt status register for more packets in the
813                    mean time. Process them since we have not used up our quota.*/
814
815                 intr0 = readl(mmio + INT0);
816                 /*Ack receive packets */
817                 writel(intr0 & RINT0,mmio + INT0);
818
819         } while(intr0 & RINT0);
820
821         if (rx_pkt_limit > 0) {
822                 /* Receive descriptor is empty now */
823                 spin_lock_irqsave(&lp->lock, flags);
824                 __napi_complete(napi);
825                 writel(VAL0|RINTEN0, mmio + INTEN0);
826                 writel(VAL2 | RDMD0, mmio + CMD0);
827                 spin_unlock_irqrestore(&lp->lock, flags);
828         }
829
830 rx_not_empty:
831         return num_rx_pkt;
832 }
833
834 /*
835 This function will indicate the link status to the kernel.
836 */
837 static int amd8111e_link_change(struct net_device* dev)
838 {
839         struct amd8111e_priv *lp = netdev_priv(dev);
840         int status0,speed;
841
842         /* read the link change */
843         status0 = readl(lp->mmio + STAT0);
844
845         if(status0 & LINK_STATS){
846                 if(status0 & AUTONEG_COMPLETE)
847                         lp->link_config.autoneg = AUTONEG_ENABLE;
848                 else
849                         lp->link_config.autoneg = AUTONEG_DISABLE;
850
851                 if(status0 & FULL_DPLX)
852                         lp->link_config.duplex = DUPLEX_FULL;
853                 else
854                         lp->link_config.duplex = DUPLEX_HALF;
855                 speed = (status0 & SPEED_MASK) >> 7;
856                 if(speed == PHY_SPEED_10)
857                         lp->link_config.speed = SPEED_10;
858                 else if(speed == PHY_SPEED_100)
859                         lp->link_config.speed = SPEED_100;
860
861                 printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n",                        dev->name,
862                        (lp->link_config.speed == SPEED_100) ? "100": "10",
863                        (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
864                 netif_carrier_on(dev);
865         }
866         else{
867                 lp->link_config.speed = SPEED_INVALID;
868                 lp->link_config.duplex = DUPLEX_INVALID;
869                 lp->link_config.autoneg = AUTONEG_INVALID;
870                 printk(KERN_INFO "%s: Link is Down.\n",dev->name);
871                 netif_carrier_off(dev);
872         }
873
874         return 0;
875 }
876 /*
877 This function reads the mib counters.
878 */
879 static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
880 {
881         unsigned int  status;
882         unsigned  int data;
883         unsigned int repeat = REPEAT_CNT;
884
885         writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
886         do {
887                 status = readw(mmio + MIB_ADDR);
888                 udelay(2);      /* controller takes MAX 2 us to get mib data */
889         }
890         while (--repeat && (status & MIB_CMD_ACTIVE));
891
892         data = readl(mmio + MIB_DATA);
893         return data;
894 }
895
896 /*
897  * This function reads the mib registers and returns the hardware statistics.
898  * It updates previous internal driver statistics with new values.
899  */
900 static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
901 {
902         struct amd8111e_priv *lp = netdev_priv(dev);
903         void __iomem *mmio = lp->mmio;
904         unsigned long flags;
905         struct net_device_stats *new_stats = &dev->stats;
906
907         if (!lp->opened)
908                 return new_stats;
909         spin_lock_irqsave (&lp->lock, flags);
910
911         /* stats.rx_packets */
912         new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
913                                 amd8111e_read_mib(mmio, rcv_multicast_pkts)+
914                                 amd8111e_read_mib(mmio, rcv_unicast_pkts);
915
916         /* stats.tx_packets */
917         new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
918
919         /*stats.rx_bytes */
920         new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
921
922         /* stats.tx_bytes */
923         new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
924
925         /* stats.rx_errors */
926         /* hw errors + errors driver reported */
927         new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
928                                 amd8111e_read_mib(mmio, rcv_fragments)+
929                                 amd8111e_read_mib(mmio, rcv_jabbers)+
930                                 amd8111e_read_mib(mmio, rcv_alignment_errors)+
931                                 amd8111e_read_mib(mmio, rcv_fcs_errors)+
932                                 amd8111e_read_mib(mmio, rcv_miss_pkts)+
933                                 lp->drv_rx_errors;
934
935         /* stats.tx_errors */
936         new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
937
938         /* stats.rx_dropped*/
939         new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
940
941         /* stats.tx_dropped*/
942         new_stats->tx_dropped = amd8111e_read_mib(mmio,  xmt_underrun_pkts);
943
944         /* stats.multicast*/
945         new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
946
947         /* stats.collisions*/
948         new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
949
950         /* stats.rx_length_errors*/
951         new_stats->rx_length_errors =
952                 amd8111e_read_mib(mmio, rcv_undersize_pkts)+
953                 amd8111e_read_mib(mmio, rcv_oversize_pkts);
954
955         /* stats.rx_over_errors*/
956         new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
957
958         /* stats.rx_crc_errors*/
959         new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
960
961         /* stats.rx_frame_errors*/
962         new_stats->rx_frame_errors =
963                 amd8111e_read_mib(mmio, rcv_alignment_errors);
964
965         /* stats.rx_fifo_errors */
966         new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
967
968         /* stats.rx_missed_errors */
969         new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
970
971         /* stats.tx_aborted_errors*/
972         new_stats->tx_aborted_errors =
973                 amd8111e_read_mib(mmio, xmt_excessive_collision);
974
975         /* stats.tx_carrier_errors*/
976         new_stats->tx_carrier_errors =
977                 amd8111e_read_mib(mmio, xmt_loss_carrier);
978
979         /* stats.tx_fifo_errors*/
980         new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
981
982         /* stats.tx_window_errors*/
983         new_stats->tx_window_errors =
984                 amd8111e_read_mib(mmio, xmt_late_collision);
985
986         /* Reset the mibs for collecting new statistics */
987         /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
988
989         spin_unlock_irqrestore (&lp->lock, flags);
990
991         return new_stats;
992 }
993 /* This function recalculate the interrupt coalescing  mode on every interrupt
994 according to the datarate and the packet rate.
995 */
996 static int amd8111e_calc_coalesce(struct net_device *dev)
997 {
998         struct amd8111e_priv *lp = netdev_priv(dev);
999         struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
1000         int tx_pkt_rate;
1001         int rx_pkt_rate;
1002         int tx_data_rate;
1003         int rx_data_rate;
1004         int rx_pkt_size;
1005         int tx_pkt_size;
1006
1007         tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
1008         coal_conf->tx_prev_packets =  coal_conf->tx_packets;
1009
1010         tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
1011         coal_conf->tx_prev_bytes =  coal_conf->tx_bytes;
1012
1013         rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
1014         coal_conf->rx_prev_packets =  coal_conf->rx_packets;
1015
1016         rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
1017         coal_conf->rx_prev_bytes =  coal_conf->rx_bytes;
1018
1019         if(rx_pkt_rate < 800){
1020                 if(coal_conf->rx_coal_type != NO_COALESCE){
1021
1022                         coal_conf->rx_timeout = 0x0;
1023                         coal_conf->rx_event_count = 0;
1024                         amd8111e_set_coalesce(dev,RX_INTR_COAL);
1025                         coal_conf->rx_coal_type = NO_COALESCE;
1026                 }
1027         }
1028         else{
1029
1030                 rx_pkt_size = rx_data_rate/rx_pkt_rate;
1031                 if (rx_pkt_size < 128){
1032                         if(coal_conf->rx_coal_type != NO_COALESCE){
1033
1034                                 coal_conf->rx_timeout = 0;
1035                                 coal_conf->rx_event_count = 0;
1036                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1037                                 coal_conf->rx_coal_type = NO_COALESCE;
1038                         }
1039
1040                 }
1041                 else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
1042
1043                         if(coal_conf->rx_coal_type !=  LOW_COALESCE){
1044                                 coal_conf->rx_timeout = 1;
1045                                 coal_conf->rx_event_count = 4;
1046                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1047                                 coal_conf->rx_coal_type = LOW_COALESCE;
1048                         }
1049                 }
1050                 else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
1051
1052                         if(coal_conf->rx_coal_type !=  MEDIUM_COALESCE){
1053                                 coal_conf->rx_timeout = 1;
1054                                 coal_conf->rx_event_count = 4;
1055                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1056                                 coal_conf->rx_coal_type = MEDIUM_COALESCE;
1057                         }
1058
1059                 }
1060                 else if(rx_pkt_size >= 1024){
1061                         if(coal_conf->rx_coal_type !=  HIGH_COALESCE){
1062                                 coal_conf->rx_timeout = 2;
1063                                 coal_conf->rx_event_count = 3;
1064                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1065                                 coal_conf->rx_coal_type = HIGH_COALESCE;
1066                         }
1067                 }
1068         }
1069         /* NOW FOR TX INTR COALESC */
1070         if(tx_pkt_rate < 800){
1071                 if(coal_conf->tx_coal_type != NO_COALESCE){
1072
1073                         coal_conf->tx_timeout = 0x0;
1074                         coal_conf->tx_event_count = 0;
1075                         amd8111e_set_coalesce(dev,TX_INTR_COAL);
1076                         coal_conf->tx_coal_type = NO_COALESCE;
1077                 }
1078         }
1079         else{
1080
1081                 tx_pkt_size = tx_data_rate/tx_pkt_rate;
1082                 if (tx_pkt_size < 128){
1083
1084                         if(coal_conf->tx_coal_type != NO_COALESCE){
1085
1086                                 coal_conf->tx_timeout = 0;
1087                                 coal_conf->tx_event_count = 0;
1088                                 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1089                                 coal_conf->tx_coal_type = NO_COALESCE;
1090                         }
1091
1092                 }
1093                 else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
1094
1095                         if(coal_conf->tx_coal_type !=  LOW_COALESCE){
1096                                 coal_conf->tx_timeout = 1;
1097                                 coal_conf->tx_event_count = 2;
1098                                 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1099                                 coal_conf->tx_coal_type = LOW_COALESCE;
1100
1101                         }
1102                 }
1103                 else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
1104
1105                         if(coal_conf->tx_coal_type !=  MEDIUM_COALESCE){
1106                                 coal_conf->tx_timeout = 2;
1107                                 coal_conf->tx_event_count = 5;
1108                                 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1109                                 coal_conf->tx_coal_type = MEDIUM_COALESCE;
1110                         }
1111
1112                 }
1113                 else if(tx_pkt_size >= 1024){
1114                         if (tx_pkt_size >= 1024){
1115                                 if(coal_conf->tx_coal_type !=  HIGH_COALESCE){
1116                                         coal_conf->tx_timeout = 4;
1117                                         coal_conf->tx_event_count = 8;
1118                                         amd8111e_set_coalesce(dev,TX_INTR_COAL);
1119                                         coal_conf->tx_coal_type = HIGH_COALESCE;
1120                                 }
1121                         }
1122                 }
1123         }
1124         return 0;
1125
1126 }
1127 /*
1128 This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
1129 */
1130 static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1131 {
1132
1133         struct net_device * dev = (struct net_device *) dev_id;
1134         struct amd8111e_priv *lp = netdev_priv(dev);
1135         void __iomem *mmio = lp->mmio;
1136         unsigned int intr0, intren0;
1137         unsigned int handled = 1;
1138
1139         if(unlikely(dev == NULL))
1140                 return IRQ_NONE;
1141
1142         spin_lock(&lp->lock);
1143
1144         /* disabling interrupt */
1145         writel(INTREN, mmio + CMD0);
1146
1147         /* Read interrupt status */
1148         intr0 = readl(mmio + INT0);
1149         intren0 = readl(mmio + INTEN0);
1150
1151         /* Process all the INT event until INTR bit is clear. */
1152
1153         if (!(intr0 & INTR)){
1154                 handled = 0;
1155                 goto err_no_interrupt;
1156         }
1157
1158         /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
1159         writel(intr0, mmio + INT0);
1160
1161         /* Check if Receive Interrupt has occurred. */
1162         if (intr0 & RINT0) {
1163                 if (napi_schedule_prep(&lp->napi)) {
1164                         /* Disable receive interupts */
1165                         writel(RINTEN0, mmio + INTEN0);
1166                         /* Schedule a polling routine */
1167                         __napi_schedule(&lp->napi);
1168                 } else if (intren0 & RINTEN0) {
1169                         printk("************Driver bug! interrupt while in poll\n");
1170                         /* Fix by disable receive interrupts */
1171                         writel(RINTEN0, mmio + INTEN0);
1172                 }
1173         }
1174
1175         /* Check if  Transmit Interrupt has occurred. */
1176         if (intr0 & TINT0)
1177                 amd8111e_tx(dev);
1178
1179         /* Check if  Link Change Interrupt has occurred. */
1180         if (intr0 & LCINT)
1181                 amd8111e_link_change(dev);
1182
1183         /* Check if Hardware Timer Interrupt has occurred. */
1184         if (intr0 & STINT)
1185                 amd8111e_calc_coalesce(dev);
1186
1187 err_no_interrupt:
1188         writel( VAL0 | INTREN,mmio + CMD0);
1189
1190         spin_unlock(&lp->lock);
1191
1192         return IRQ_RETVAL(handled);
1193 }
1194
1195 #ifdef CONFIG_NET_POLL_CONTROLLER
1196 static void amd8111e_poll(struct net_device *dev)
1197 {
1198         unsigned long flags;
1199         local_irq_save(flags);
1200         amd8111e_interrupt(0, dev);
1201         local_irq_restore(flags);
1202 }
1203 #endif
1204
1205
1206 /*
1207 This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
1208 */
1209 static int amd8111e_close(struct net_device * dev)
1210 {
1211         struct amd8111e_priv *lp = netdev_priv(dev);
1212         netif_stop_queue(dev);
1213
1214         napi_disable(&lp->napi);
1215
1216         spin_lock_irq(&lp->lock);
1217
1218         amd8111e_disable_interrupt(lp);
1219         amd8111e_stop_chip(lp);
1220
1221         /* Free transmit and receive skbs */
1222         amd8111e_free_skbs(lp->amd8111e_net_dev);
1223
1224         netif_carrier_off(lp->amd8111e_net_dev);
1225
1226         /* Delete ipg timer */
1227         if(lp->options & OPTION_DYN_IPG_ENABLE)
1228                 del_timer_sync(&lp->ipg_data.ipg_timer);
1229
1230         spin_unlock_irq(&lp->lock);
1231         free_irq(dev->irq, dev);
1232         amd8111e_free_ring(lp);
1233
1234         /* Update the statistics before closing */
1235         amd8111e_get_stats(dev);
1236         lp->opened = 0;
1237         return 0;
1238 }
1239 /* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
1240 */
1241 static int amd8111e_open(struct net_device * dev )
1242 {
1243         struct amd8111e_priv *lp = netdev_priv(dev);
1244
1245         if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED,
1246                                          dev->name, dev))
1247                 return -EAGAIN;
1248
1249         napi_enable(&lp->napi);
1250
1251         spin_lock_irq(&lp->lock);
1252
1253         amd8111e_init_hw_default(lp);
1254
1255         if(amd8111e_restart(dev)){
1256                 spin_unlock_irq(&lp->lock);
1257                 napi_disable(&lp->napi);
1258                 if (dev->irq)
1259                         free_irq(dev->irq, dev);
1260                 return -ENOMEM;
1261         }
1262         /* Start ipg timer */
1263         if(lp->options & OPTION_DYN_IPG_ENABLE){
1264                 add_timer(&lp->ipg_data.ipg_timer);
1265                 printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
1266         }
1267
1268         lp->opened = 1;
1269
1270         spin_unlock_irq(&lp->lock);
1271
1272         netif_start_queue(dev);
1273
1274         return 0;
1275 }
1276 /*
1277 This function checks if there is any transmit  descriptors available to queue more packet.
1278 */
1279 static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
1280 {
1281         int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1282         if (lp->tx_skbuff[tx_index])
1283                 return -1;
1284         else
1285                 return 0;
1286
1287 }
1288 /*
1289 This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
1290 */
1291
1292 static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
1293                                        struct net_device * dev)
1294 {
1295         struct amd8111e_priv *lp = netdev_priv(dev);
1296         int tx_index;
1297         unsigned long flags;
1298
1299         spin_lock_irqsave(&lp->lock, flags);
1300
1301         tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1302
1303         lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1304
1305         lp->tx_skbuff[tx_index] = skb;
1306         lp->tx_ring[tx_index].tx_flags = 0;
1307
1308 #if AMD8111E_VLAN_TAG_USED
1309         if (vlan_tx_tag_present(skb)) {
1310                 lp->tx_ring[tx_index].tag_ctrl_cmd |=
1311                                 cpu_to_le16(TCC_VLAN_INSERT);
1312                 lp->tx_ring[tx_index].tag_ctrl_info =
1313                                 cpu_to_le16(vlan_tx_tag_get(skb));
1314
1315         }
1316 #endif
1317         lp->tx_dma_addr[tx_index] =
1318             pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1319         lp->tx_ring[tx_index].buff_phy_addr =
1320             cpu_to_le32(lp->tx_dma_addr[tx_index]);
1321
1322         /*  Set FCS and LTINT bits */
1323         wmb();
1324         lp->tx_ring[tx_index].tx_flags |=
1325             cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1326
1327         lp->tx_idx++;
1328
1329         /* Trigger an immediate send poll. */
1330         writel( VAL1 | TDMD0, lp->mmio + CMD0);
1331         writel( VAL2 | RDMD0,lp->mmio + CMD0);
1332
1333         if(amd8111e_tx_queue_avail(lp) < 0){
1334                 netif_stop_queue(dev);
1335         }
1336         spin_unlock_irqrestore(&lp->lock, flags);
1337         return NETDEV_TX_OK;
1338 }
1339 /*
1340 This function returns all the memory mapped registers of the device.
1341 */
1342 static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
1343 {
1344         void __iomem *mmio = lp->mmio;
1345         /* Read only necessary registers */
1346         buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1347         buf[1] = readl(mmio + XMT_RING_LEN0);
1348         buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1349         buf[3] = readl(mmio + RCV_RING_LEN0);
1350         buf[4] = readl(mmio + CMD0);
1351         buf[5] = readl(mmio + CMD2);
1352         buf[6] = readl(mmio + CMD3);
1353         buf[7] = readl(mmio + CMD7);
1354         buf[8] = readl(mmio + INT0);
1355         buf[9] = readl(mmio + INTEN0);
1356         buf[10] = readl(mmio + LADRF);
1357         buf[11] = readl(mmio + LADRF+4);
1358         buf[12] = readl(mmio + STAT0);
1359 }
1360
1361
1362 /*
1363 This function sets promiscuos mode, all-multi mode or the multicast address
1364 list to the device.
1365 */
1366 static void amd8111e_set_multicast_list(struct net_device *dev)
1367 {
1368         struct netdev_hw_addr *ha;
1369         struct amd8111e_priv *lp = netdev_priv(dev);
1370         u32 mc_filter[2] ;
1371         int bit_num;
1372
1373         if(dev->flags & IFF_PROMISC){
1374                 writel( VAL2 | PROM, lp->mmio + CMD2);
1375                 return;
1376         }
1377         else
1378                 writel( PROM, lp->mmio + CMD2);
1379         if (dev->flags & IFF_ALLMULTI ||
1380             netdev_mc_count(dev) > MAX_FILTER_SIZE) {
1381                 /* get all multicast packet */
1382                 mc_filter[1] = mc_filter[0] = 0xffffffff;
1383                 lp->options |= OPTION_MULTICAST_ENABLE;
1384                 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1385                 return;
1386         }
1387         if (netdev_mc_empty(dev)) {
1388                 /* get only own packets */
1389                 mc_filter[1] = mc_filter[0] = 0;
1390                 lp->options &= ~OPTION_MULTICAST_ENABLE;
1391                 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1392                 /* disable promiscuous mode */
1393                 writel(PROM, lp->mmio + CMD2);
1394                 return;
1395         }
1396         /* load all the multicast addresses in the logic filter */
1397         lp->options |= OPTION_MULTICAST_ENABLE;
1398         mc_filter[1] = mc_filter[0] = 0;
1399         netdev_for_each_mc_addr(ha, dev) {
1400                 bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
1401                 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1402         }
1403         amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
1404
1405         /* To eliminate PCI posting bug */
1406         readl(lp->mmio + CMD2);
1407
1408 }
1409
1410 static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info)
1411 {
1412         struct amd8111e_priv *lp = netdev_priv(dev);
1413         struct pci_dev *pci_dev = lp->pci_dev;
1414         strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
1415         strlcpy(info->version, MODULE_VERS, sizeof(info->version));
1416         snprintf(info->fw_version, sizeof(info->fw_version),
1417                 "%u", chip_version);
1418         strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
1419 }
1420
1421 static int amd8111e_get_regs_len(struct net_device *dev)
1422 {
1423         return AMD8111E_REG_DUMP_LEN;
1424 }
1425
1426 static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
1427 {
1428         struct amd8111e_priv *lp = netdev_priv(dev);
1429         regs->version = 0;
1430         amd8111e_read_regs(lp, buf);
1431 }
1432
1433 static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1434 {
1435         struct amd8111e_priv *lp = netdev_priv(dev);
1436         spin_lock_irq(&lp->lock);
1437         mii_ethtool_gset(&lp->mii_if, ecmd);
1438         spin_unlock_irq(&lp->lock);
1439         return 0;
1440 }
1441
1442 static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1443 {
1444         struct amd8111e_priv *lp = netdev_priv(dev);
1445         int res;
1446         spin_lock_irq(&lp->lock);
1447         res = mii_ethtool_sset(&lp->mii_if, ecmd);
1448         spin_unlock_irq(&lp->lock);
1449         return res;
1450 }
1451
1452 static int amd8111e_nway_reset(struct net_device *dev)
1453 {
1454         struct amd8111e_priv *lp = netdev_priv(dev);
1455         return mii_nway_restart(&lp->mii_if);
1456 }
1457
1458 static u32 amd8111e_get_link(struct net_device *dev)
1459 {
1460         struct amd8111e_priv *lp = netdev_priv(dev);
1461         return mii_link_ok(&lp->mii_if);
1462 }
1463
1464 static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1465 {
1466         struct amd8111e_priv *lp = netdev_priv(dev);
1467         wol_info->supported = WAKE_MAGIC|WAKE_PHY;
1468         if (lp->options & OPTION_WOL_ENABLE)
1469                 wol_info->wolopts = WAKE_MAGIC;
1470 }
1471
1472 static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1473 {
1474         struct amd8111e_priv *lp = netdev_priv(dev);
1475         if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
1476                 return -EINVAL;
1477         spin_lock_irq(&lp->lock);
1478         if (wol_info->wolopts & WAKE_MAGIC)
1479                 lp->options |=
1480                         (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1481         else if(wol_info->wolopts & WAKE_PHY)
1482                 lp->options |=
1483                         (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1484         else
1485                 lp->options &= ~OPTION_WOL_ENABLE;
1486         spin_unlock_irq(&lp->lock);
1487         return 0;
1488 }
1489
1490 static const struct ethtool_ops ops = {
1491         .get_drvinfo = amd8111e_get_drvinfo,
1492         .get_regs_len = amd8111e_get_regs_len,
1493         .get_regs = amd8111e_get_regs,
1494         .get_settings = amd8111e_get_settings,
1495         .set_settings = amd8111e_set_settings,
1496         .nway_reset = amd8111e_nway_reset,
1497         .get_link = amd8111e_get_link,
1498         .get_wol = amd8111e_get_wol,
1499         .set_wol = amd8111e_set_wol,
1500 };
1501
1502 /*
1503 This function handles all the  ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
1504 */
1505
1506 static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
1507 {
1508         struct mii_ioctl_data *data = if_mii(ifr);
1509         struct amd8111e_priv *lp = netdev_priv(dev);
1510         int err;
1511         u32 mii_regval;
1512
1513         switch(cmd) {
1514         case SIOCGMIIPHY:
1515                 data->phy_id = lp->ext_phy_addr;
1516
1517         /* fallthru */
1518         case SIOCGMIIREG:
1519
1520                 spin_lock_irq(&lp->lock);
1521                 err = amd8111e_read_phy(lp, data->phy_id,
1522                         data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1523                 spin_unlock_irq(&lp->lock);
1524
1525                 data->val_out = mii_regval;
1526                 return err;
1527
1528         case SIOCSMIIREG:
1529
1530                 spin_lock_irq(&lp->lock);
1531                 err = amd8111e_write_phy(lp, data->phy_id,
1532                         data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1533                 spin_unlock_irq(&lp->lock);
1534
1535                 return err;
1536
1537         default:
1538                 /* do nothing */
1539                 break;
1540         }
1541         return -EOPNOTSUPP;
1542 }
1543 static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1544 {
1545         struct amd8111e_priv *lp = netdev_priv(dev);
1546         int i;
1547         struct sockaddr *addr = p;
1548
1549         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1550         spin_lock_irq(&lp->lock);
1551         /* Setting the MAC address to the device */
1552         for (i = 0; i < ETH_ALEN; i++)
1553                 writeb( dev->dev_addr[i], lp->mmio + PADR + i );
1554
1555         spin_unlock_irq(&lp->lock);
1556
1557         return 0;
1558 }
1559
1560 /*
1561 This function changes the mtu of the device. It restarts the device  to initialize the descriptor with new receive buffers.
1562 */
1563 static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1564 {
1565         struct amd8111e_priv *lp = netdev_priv(dev);
1566         int err;
1567
1568         if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
1569                 return -EINVAL;
1570
1571         if (!netif_running(dev)) {
1572                 /* new_mtu will be used
1573                    when device starts netxt time */
1574                 dev->mtu = new_mtu;
1575                 return 0;
1576         }
1577
1578         spin_lock_irq(&lp->lock);
1579
1580         /* stop the chip */
1581         writel(RUN, lp->mmio + CMD0);
1582
1583         dev->mtu = new_mtu;
1584
1585         err = amd8111e_restart(dev);
1586         spin_unlock_irq(&lp->lock);
1587         if(!err)
1588                 netif_start_queue(dev);
1589         return err;
1590 }
1591
1592 static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
1593 {
1594         writel( VAL1|MPPLBA, lp->mmio + CMD3);
1595         writel( VAL0|MPEN_SW, lp->mmio + CMD7);
1596
1597         /* To eliminate PCI posting bug */
1598         readl(lp->mmio + CMD7);
1599         return 0;
1600 }
1601
1602 static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
1603 {
1604
1605         /* Adapter is already stoped/suspended/interrupt-disabled */
1606         writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
1607
1608         /* To eliminate PCI posting bug */
1609         readl(lp->mmio + CMD7);
1610         return 0;
1611 }
1612
1613 /*
1614  * This function is called when a packet transmission fails to complete
1615  * within a reasonable period, on the assumption that an interrupt have
1616  * failed or the interface is locked up. This function will reinitialize
1617  * the hardware.
1618  */
1619 static void amd8111e_tx_timeout(struct net_device *dev)
1620 {
1621         struct amd8111e_priv* lp = netdev_priv(dev);
1622         int err;
1623
1624         printk(KERN_ERR "%s: transmit timed out, resetting\n",
1625                                                       dev->name);
1626         spin_lock_irq(&lp->lock);
1627         err = amd8111e_restart(dev);
1628         spin_unlock_irq(&lp->lock);
1629         if(!err)
1630                 netif_wake_queue(dev);
1631 }
1632 static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state)
1633 {
1634         struct net_device *dev = pci_get_drvdata(pci_dev);
1635         struct amd8111e_priv *lp = netdev_priv(dev);
1636
1637         if (!netif_running(dev))
1638                 return 0;
1639
1640         /* disable the interrupt */
1641         spin_lock_irq(&lp->lock);
1642         amd8111e_disable_interrupt(lp);
1643         spin_unlock_irq(&lp->lock);
1644
1645         netif_device_detach(dev);
1646
1647         /* stop chip */
1648         spin_lock_irq(&lp->lock);
1649         if(lp->options & OPTION_DYN_IPG_ENABLE)
1650                 del_timer_sync(&lp->ipg_data.ipg_timer);
1651         amd8111e_stop_chip(lp);
1652         spin_unlock_irq(&lp->lock);
1653
1654         if(lp->options & OPTION_WOL_ENABLE){
1655                  /* enable wol */
1656                 if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
1657                         amd8111e_enable_magicpkt(lp);
1658                 if(lp->options & OPTION_WAKE_PHY_ENABLE)
1659                         amd8111e_enable_link_change(lp);
1660
1661                 pci_enable_wake(pci_dev, PCI_D3hot, 1);
1662                 pci_enable_wake(pci_dev, PCI_D3cold, 1);
1663
1664         }
1665         else{
1666                 pci_enable_wake(pci_dev, PCI_D3hot, 0);
1667                 pci_enable_wake(pci_dev, PCI_D3cold, 0);
1668         }
1669
1670         pci_save_state(pci_dev);
1671         pci_set_power_state(pci_dev, PCI_D3hot);
1672
1673         return 0;
1674 }
1675 static int amd8111e_resume(struct pci_dev *pci_dev)
1676 {
1677         struct net_device *dev = pci_get_drvdata(pci_dev);
1678         struct amd8111e_priv *lp = netdev_priv(dev);
1679
1680         if (!netif_running(dev))
1681                 return 0;
1682
1683         pci_set_power_state(pci_dev, PCI_D0);
1684         pci_restore_state(pci_dev);
1685
1686         pci_enable_wake(pci_dev, PCI_D3hot, 0);
1687         pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */
1688
1689         netif_device_attach(dev);
1690
1691         spin_lock_irq(&lp->lock);
1692         amd8111e_restart(dev);
1693         /* Restart ipg timer */
1694         if(lp->options & OPTION_DYN_IPG_ENABLE)
1695                 mod_timer(&lp->ipg_data.ipg_timer,
1696                                 jiffies + IPG_CONVERGE_JIFFIES);
1697         spin_unlock_irq(&lp->lock);
1698
1699         return 0;
1700 }
1701
1702
1703 static void amd8111e_remove_one(struct pci_dev *pdev)
1704 {
1705         struct net_device *dev = pci_get_drvdata(pdev);
1706         if (dev) {
1707                 unregister_netdev(dev);
1708                 iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio);
1709                 free_netdev(dev);
1710                 pci_release_regions(pdev);
1711                 pci_disable_device(pdev);
1712         }
1713 }
1714 static void amd8111e_config_ipg(struct net_device* dev)
1715 {
1716         struct amd8111e_priv *lp = netdev_priv(dev);
1717         struct ipg_info* ipg_data = &lp->ipg_data;
1718         void __iomem *mmio = lp->mmio;
1719         unsigned int prev_col_cnt = ipg_data->col_cnt;
1720         unsigned int total_col_cnt;
1721         unsigned int tmp_ipg;
1722
1723         if(lp->link_config.duplex == DUPLEX_FULL){
1724                 ipg_data->ipg = DEFAULT_IPG;
1725                 return;
1726         }
1727
1728         if(ipg_data->ipg_state == SSTATE){
1729
1730                 if(ipg_data->timer_tick == IPG_STABLE_TIME){
1731
1732                         ipg_data->timer_tick = 0;
1733                         ipg_data->ipg = MIN_IPG - IPG_STEP;
1734                         ipg_data->current_ipg = MIN_IPG;
1735                         ipg_data->diff_col_cnt = 0xFFFFFFFF;
1736                         ipg_data->ipg_state = CSTATE;
1737                 }
1738                 else
1739                         ipg_data->timer_tick++;
1740         }
1741
1742         if(ipg_data->ipg_state == CSTATE){
1743
1744                 /* Get the current collision count */
1745
1746                 total_col_cnt = ipg_data->col_cnt =
1747                                 amd8111e_read_mib(mmio, xmt_collisions);
1748
1749                 if ((total_col_cnt - prev_col_cnt) <
1750                                 (ipg_data->diff_col_cnt)){
1751
1752                         ipg_data->diff_col_cnt =
1753                                 total_col_cnt - prev_col_cnt ;
1754
1755                         ipg_data->ipg = ipg_data->current_ipg;
1756                 }
1757
1758                 ipg_data->current_ipg += IPG_STEP;
1759
1760                 if (ipg_data->current_ipg <= MAX_IPG)
1761                         tmp_ipg = ipg_data->current_ipg;
1762                 else{
1763                         tmp_ipg = ipg_data->ipg;
1764                         ipg_data->ipg_state = SSTATE;
1765                 }
1766                 writew((u32)tmp_ipg, mmio + IPG);
1767                 writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1768         }
1769          mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
1770         return;
1771
1772 }
1773
1774 static void amd8111e_probe_ext_phy(struct net_device *dev)
1775 {
1776         struct amd8111e_priv *lp = netdev_priv(dev);
1777         int i;
1778
1779         for (i = 0x1e; i >= 0; i--) {
1780                 u32 id1, id2;
1781
1782                 if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
1783                         continue;
1784                 if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
1785                         continue;
1786                 lp->ext_phy_id = (id1 << 16) | id2;
1787                 lp->ext_phy_addr = i;
1788                 return;
1789         }
1790         lp->ext_phy_id = 0;
1791         lp->ext_phy_addr = 1;
1792 }
1793
1794 static const struct net_device_ops amd8111e_netdev_ops = {
1795         .ndo_open               = amd8111e_open,
1796         .ndo_stop               = amd8111e_close,
1797         .ndo_start_xmit         = amd8111e_start_xmit,
1798         .ndo_tx_timeout         = amd8111e_tx_timeout,
1799         .ndo_get_stats          = amd8111e_get_stats,
1800         .ndo_set_rx_mode        = amd8111e_set_multicast_list,
1801         .ndo_validate_addr      = eth_validate_addr,
1802         .ndo_set_mac_address    = amd8111e_set_mac_address,
1803         .ndo_do_ioctl           = amd8111e_ioctl,
1804         .ndo_change_mtu         = amd8111e_change_mtu,
1805 #ifdef CONFIG_NET_POLL_CONTROLLER
1806         .ndo_poll_controller     = amd8111e_poll,
1807 #endif
1808 };
1809
1810 static int amd8111e_probe_one(struct pci_dev *pdev,
1811                                   const struct pci_device_id *ent)
1812 {
1813         int err, i;
1814         unsigned long reg_addr,reg_len;
1815         struct amd8111e_priv* lp;
1816         struct net_device* dev;
1817
1818         err = pci_enable_device(pdev);
1819         if(err){
1820                 printk(KERN_ERR "amd8111e: Cannot enable new PCI device, "
1821                         "exiting.\n");
1822                 return err;
1823         }
1824
1825         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
1826                 printk(KERN_ERR "amd8111e: Cannot find PCI base address, "
1827                        "exiting.\n");
1828                 err = -ENODEV;
1829                 goto err_disable_pdev;
1830         }
1831
1832         err = pci_request_regions(pdev, MODULE_NAME);
1833         if(err){
1834                 printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
1835                        "exiting.\n");
1836                 goto err_disable_pdev;
1837         }
1838
1839         pci_set_master(pdev);
1840
1841         /* Find power-management capability. */
1842         if (!pdev->pm_cap) {
1843                 printk(KERN_ERR "amd8111e: No Power Management capability, "
1844                        "exiting.\n");
1845                 err = -ENODEV;
1846                 goto err_free_reg;
1847         }
1848
1849         /* Initialize DMA */
1850         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
1851                 printk(KERN_ERR "amd8111e: DMA not supported,"
1852                         "exiting.\n");
1853                 err = -ENODEV;
1854                 goto err_free_reg;
1855         }
1856
1857         reg_addr = pci_resource_start(pdev, 0);
1858         reg_len = pci_resource_len(pdev, 0);
1859
1860         dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1861         if (!dev) {
1862                 err = -ENOMEM;
1863                 goto err_free_reg;
1864         }
1865
1866         SET_NETDEV_DEV(dev, &pdev->dev);
1867
1868 #if AMD8111E_VLAN_TAG_USED
1869         dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ;
1870 #endif
1871
1872         lp = netdev_priv(dev);
1873         lp->pci_dev = pdev;
1874         lp->amd8111e_net_dev = dev;
1875         lp->pm_cap = pdev->pm_cap;
1876
1877         spin_lock_init(&lp->lock);
1878
1879         lp->mmio = ioremap(reg_addr, reg_len);
1880         if (!lp->mmio) {
1881                 printk(KERN_ERR "amd8111e: Cannot map device registers, "
1882                        "exiting\n");
1883                 err = -ENOMEM;
1884                 goto err_free_dev;
1885         }
1886
1887         /* Initializing MAC address */
1888         for (i = 0; i < ETH_ALEN; i++)
1889                 dev->dev_addr[i] = readb(lp->mmio + PADR + i);
1890
1891         /* Setting user defined parametrs */
1892         lp->ext_phy_option = speed_duplex[card_idx];
1893         if(coalesce[card_idx])
1894                 lp->options |= OPTION_INTR_COAL_ENABLE;
1895         if(dynamic_ipg[card_idx++])
1896                 lp->options |= OPTION_DYN_IPG_ENABLE;
1897
1898
1899         /* Initialize driver entry points */
1900         dev->netdev_ops = &amd8111e_netdev_ops;
1901         SET_ETHTOOL_OPS(dev, &ops);
1902         dev->irq =pdev->irq;
1903         dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1904         netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
1905
1906 #if AMD8111E_VLAN_TAG_USED
1907         dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1908 #endif
1909         /* Probe the external PHY */
1910         amd8111e_probe_ext_phy(dev);
1911
1912         /* setting mii default values */
1913         lp->mii_if.dev = dev;
1914         lp->mii_if.mdio_read = amd8111e_mdio_read;
1915         lp->mii_if.mdio_write = amd8111e_mdio_write;
1916         lp->mii_if.phy_id = lp->ext_phy_addr;
1917
1918         /* Set receive buffer length and set jumbo option*/
1919         amd8111e_set_rx_buff_len(dev);
1920
1921
1922         err = register_netdev(dev);
1923         if (err) {
1924                 printk(KERN_ERR "amd8111e: Cannot register net device, "
1925                        "exiting.\n");
1926                 goto err_iounmap;
1927         }
1928
1929         pci_set_drvdata(pdev, dev);
1930
1931         /* Initialize software ipg timer */
1932         if(lp->options & OPTION_DYN_IPG_ENABLE){
1933                 init_timer(&lp->ipg_data.ipg_timer);
1934                 lp->ipg_data.ipg_timer.data = (unsigned long) dev;
1935                 lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
1936                 lp->ipg_data.ipg_timer.expires = jiffies +
1937                                                  IPG_CONVERGE_JIFFIES;
1938                 lp->ipg_data.ipg = DEFAULT_IPG;
1939                 lp->ipg_data.ipg_state = CSTATE;
1940         }
1941
1942         /*  display driver and device information */
1943
1944         chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
1945         printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n",
1946                dev->name,MODULE_VERS);
1947         printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
1948                dev->name, chip_version, dev->dev_addr);
1949         if (lp->ext_phy_id)
1950                 printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n",
1951                        dev->name, lp->ext_phy_id, lp->ext_phy_addr);
1952         else
1953                 printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n",
1954                        dev->name);
1955         return 0;
1956 err_iounmap:
1957         iounmap(lp->mmio);
1958
1959 err_free_dev:
1960         free_netdev(dev);
1961
1962 err_free_reg:
1963         pci_release_regions(pdev);
1964
1965 err_disable_pdev:
1966         pci_disable_device(pdev);
1967         return err;
1968
1969 }
1970
1971 static struct pci_driver amd8111e_driver = {
1972         .name           = MODULE_NAME,
1973         .id_table       = amd8111e_pci_tbl,
1974         .probe          = amd8111e_probe_one,
1975         .remove         = amd8111e_remove_one,
1976         .suspend        = amd8111e_suspend,
1977         .resume         = amd8111e_resume
1978 };
1979
1980 module_pci_driver(amd8111e_driver);