tg3: Update copyright
[linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     122
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "December 7, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138
139 /* Do not place this n-ring entries value into the tp struct itself,
140  * we really want to expose these constants to GCC so that modulo et
141  * al.  operations are done with shifts and masks instead of with
142  * hw multiply/modulo instructions.  Another solution would be to
143  * replace things like '% foo' with '& (foo - 1)'.
144  */
145
146 #define TG3_TX_RING_SIZE                512
147 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
148
149 #define TG3_RX_STD_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
156                                  TG3_TX_RING_SIZE)
157 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158
159 #define TG3_DMA_BYTE_ENAB               64
160
161 #define TG3_RX_STD_DMA_SZ               1536
162 #define TG3_RX_JMB_DMA_SZ               9046
163
164 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
165
166 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176  * that are at least dword aligned when used in PCIX mode.  The driver
177  * works around this bug by double copying the packet.  This workaround
178  * is built into the normal double copy length check for efficiency.
179  *
180  * However, the double copy is only necessary on those architectures
181  * where unaligned memory accesses are inefficient.  For those architectures
182  * where unaligned memory accesses incur little penalty, we can reintegrate
183  * the 5701 in the normal rx path.  Doing so saves a device structure
184  * dereference by hardcoding the double copy threshold in place.
185  */
186 #define TG3_RX_COPY_THRESHOLD           256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
189 #else
190         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
191 #endif
192
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
197 #endif
198
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K            2048
202 #define TG3_TX_BD_DMA_MAX_4K            4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1457 {
1458         u32 reg, val;
1459
1460         val = 0;
1461         if (!tg3_readphy(tp, MII_BMCR, &reg))
1462                 val = reg << 16;
1463         if (!tg3_readphy(tp, MII_BMSR, &reg))
1464                 val |= (reg & 0xffff);
1465         *data++ = val;
1466
1467         val = 0;
1468         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1469                 val = reg << 16;
1470         if (!tg3_readphy(tp, MII_LPA, &reg))
1471                 val |= (reg & 0xffff);
1472         *data++ = val;
1473
1474         val = 0;
1475         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1476                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1477                         val = reg << 16;
1478                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1479                         val |= (reg & 0xffff);
1480         }
1481         *data++ = val;
1482
1483         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1484                 val = reg << 16;
1485         else
1486                 val = 0;
1487         *data++ = val;
1488 }
1489
1490 /* tp->lock is held. */
1491 static void tg3_ump_link_report(struct tg3 *tp)
1492 {
1493         u32 data[4];
1494
1495         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1496                 return;
1497
1498         tg3_phy_gather_ump_data(tp, data);
1499
1500         tg3_wait_for_event_ack(tp);
1501
1502         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1503         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1504         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1505         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1506         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1507         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1508
1509         tg3_generate_fw_event(tp);
1510 }
1511
1512 /* tp->lock is held. */
1513 static void tg3_stop_fw(struct tg3 *tp)
1514 {
1515         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1516                 /* Wait for RX cpu to ACK the previous event. */
1517                 tg3_wait_for_event_ack(tp);
1518
1519                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1520
1521                 tg3_generate_fw_event(tp);
1522
1523                 /* Wait for RX cpu to ACK this event. */
1524                 tg3_wait_for_event_ack(tp);
1525         }
1526 }
1527
1528 /* tp->lock is held. */
1529 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1530 {
1531         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1532                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1533
1534         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1535                 switch (kind) {
1536                 case RESET_KIND_INIT:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_START);
1539                         break;
1540
1541                 case RESET_KIND_SHUTDOWN:
1542                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1543                                       DRV_STATE_UNLOAD);
1544                         break;
1545
1546                 case RESET_KIND_SUSPEND:
1547                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1548                                       DRV_STATE_SUSPEND);
1549                         break;
1550
1551                 default:
1552                         break;
1553                 }
1554         }
1555
1556         if (kind == RESET_KIND_INIT ||
1557             kind == RESET_KIND_SUSPEND)
1558                 tg3_ape_driver_state_change(tp, kind);
1559 }
1560
1561 /* tp->lock is held. */
1562 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1563 {
1564         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1565                 switch (kind) {
1566                 case RESET_KIND_INIT:
1567                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1568                                       DRV_STATE_START_DONE);
1569                         break;
1570
1571                 case RESET_KIND_SHUTDOWN:
1572                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1573                                       DRV_STATE_UNLOAD_DONE);
1574                         break;
1575
1576                 default:
1577                         break;
1578                 }
1579         }
1580
1581         if (kind == RESET_KIND_SHUTDOWN)
1582                 tg3_ape_driver_state_change(tp, kind);
1583 }
1584
1585 /* tp->lock is held. */
1586 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1587 {
1588         if (tg3_flag(tp, ENABLE_ASF)) {
1589                 switch (kind) {
1590                 case RESET_KIND_INIT:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_START);
1593                         break;
1594
1595                 case RESET_KIND_SHUTDOWN:
1596                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1597                                       DRV_STATE_UNLOAD);
1598                         break;
1599
1600                 case RESET_KIND_SUSPEND:
1601                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1602                                       DRV_STATE_SUSPEND);
1603                         break;
1604
1605                 default:
1606                         break;
1607                 }
1608         }
1609 }
1610
1611 static int tg3_poll_fw(struct tg3 *tp)
1612 {
1613         int i;
1614         u32 val;
1615
1616         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1617                 /* Wait up to 20ms for init done. */
1618                 for (i = 0; i < 200; i++) {
1619                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1620                                 return 0;
1621                         udelay(100);
1622                 }
1623                 return -ENODEV;
1624         }
1625
1626         /* Wait for firmware initialization to complete. */
1627         for (i = 0; i < 100000; i++) {
1628                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1629                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1630                         break;
1631                 udelay(10);
1632         }
1633
1634         /* Chip might not be fitted with firmware.  Some Sun onboard
1635          * parts are configured like that.  So don't signal the timeout
1636          * of the above loop as an error, but do report the lack of
1637          * running firmware once.
1638          */
1639         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1640                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1641
1642                 netdev_info(tp->dev, "No firmware running\n");
1643         }
1644
1645         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1646                 /* The 57765 A0 needs a little more
1647                  * time to do some important work.
1648                  */
1649                 mdelay(10);
1650         }
1651
1652         return 0;
1653 }
1654
1655 static void tg3_link_report(struct tg3 *tp)
1656 {
1657         if (!netif_carrier_ok(tp->dev)) {
1658                 netif_info(tp, link, tp->dev, "Link is down\n");
1659                 tg3_ump_link_report(tp);
1660         } else if (netif_msg_link(tp)) {
1661                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1662                             (tp->link_config.active_speed == SPEED_1000 ?
1663                              1000 :
1664                              (tp->link_config.active_speed == SPEED_100 ?
1665                               100 : 10)),
1666                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1667                              "full" : "half"));
1668
1669                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1670                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1671                             "on" : "off",
1672                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1673                             "on" : "off");
1674
1675                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1676                         netdev_info(tp->dev, "EEE is %s\n",
1677                                     tp->setlpicnt ? "enabled" : "disabled");
1678
1679                 tg3_ump_link_report(tp);
1680         }
1681 }
1682
1683 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1684 {
1685         u16 miireg;
1686
1687         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1688                 miireg = ADVERTISE_1000XPAUSE;
1689         else if (flow_ctrl & FLOW_CTRL_TX)
1690                 miireg = ADVERTISE_1000XPSE_ASYM;
1691         else if (flow_ctrl & FLOW_CTRL_RX)
1692                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1693         else
1694                 miireg = 0;
1695
1696         return miireg;
1697 }
1698
1699 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1700 {
1701         u8 cap = 0;
1702
1703         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1704                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1705         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1706                 if (lcladv & ADVERTISE_1000XPAUSE)
1707                         cap = FLOW_CTRL_RX;
1708                 if (rmtadv & ADVERTISE_1000XPAUSE)
1709                         cap = FLOW_CTRL_TX;
1710         }
1711
1712         return cap;
1713 }
1714
1715 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1716 {
1717         u8 autoneg;
1718         u8 flowctrl = 0;
1719         u32 old_rx_mode = tp->rx_mode;
1720         u32 old_tx_mode = tp->tx_mode;
1721
1722         if (tg3_flag(tp, USE_PHYLIB))
1723                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1724         else
1725                 autoneg = tp->link_config.autoneg;
1726
1727         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1728                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1729                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1730                 else
1731                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1732         } else
1733                 flowctrl = tp->link_config.flowctrl;
1734
1735         tp->link_config.active_flowctrl = flowctrl;
1736
1737         if (flowctrl & FLOW_CTRL_RX)
1738                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1739         else
1740                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1741
1742         if (old_rx_mode != tp->rx_mode)
1743                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1744
1745         if (flowctrl & FLOW_CTRL_TX)
1746                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1747         else
1748                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1749
1750         if (old_tx_mode != tp->tx_mode)
1751                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1752 }
1753
1754 static void tg3_adjust_link(struct net_device *dev)
1755 {
1756         u8 oldflowctrl, linkmesg = 0;
1757         u32 mac_mode, lcl_adv, rmt_adv;
1758         struct tg3 *tp = netdev_priv(dev);
1759         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1760
1761         spin_lock_bh(&tp->lock);
1762
1763         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1764                                     MAC_MODE_HALF_DUPLEX);
1765
1766         oldflowctrl = tp->link_config.active_flowctrl;
1767
1768         if (phydev->link) {
1769                 lcl_adv = 0;
1770                 rmt_adv = 0;
1771
1772                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1773                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1774                 else if (phydev->speed == SPEED_1000 ||
1775                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1776                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1777                 else
1778                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1779
1780                 if (phydev->duplex == DUPLEX_HALF)
1781                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1782                 else {
1783                         lcl_adv = mii_advertise_flowctrl(
1784                                   tp->link_config.flowctrl);
1785
1786                         if (phydev->pause)
1787                                 rmt_adv = LPA_PAUSE_CAP;
1788                         if (phydev->asym_pause)
1789                                 rmt_adv |= LPA_PAUSE_ASYM;
1790                 }
1791
1792                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1793         } else
1794                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1795
1796         if (mac_mode != tp->mac_mode) {
1797                 tp->mac_mode = mac_mode;
1798                 tw32_f(MAC_MODE, tp->mac_mode);
1799                 udelay(40);
1800         }
1801
1802         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1803                 if (phydev->speed == SPEED_10)
1804                         tw32(MAC_MI_STAT,
1805                              MAC_MI_STAT_10MBPS_MODE |
1806                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1807                 else
1808                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1809         }
1810
1811         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1812                 tw32(MAC_TX_LENGTHS,
1813                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1814                       (6 << TX_LENGTHS_IPG_SHIFT) |
1815                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1816         else
1817                 tw32(MAC_TX_LENGTHS,
1818                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1819                       (6 << TX_LENGTHS_IPG_SHIFT) |
1820                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1821
1822         if ((phydev->link && tp->link_config.active_speed == SPEED_UNKNOWN) ||
1823             (!phydev->link && tp->link_config.active_speed != SPEED_UNKNOWN) ||
1824             phydev->speed != tp->link_config.active_speed ||
1825             phydev->duplex != tp->link_config.active_duplex ||
1826             oldflowctrl != tp->link_config.active_flowctrl)
1827                 linkmesg = 1;
1828
1829         tp->link_config.active_speed = phydev->speed;
1830         tp->link_config.active_duplex = phydev->duplex;
1831
1832         spin_unlock_bh(&tp->lock);
1833
1834         if (linkmesg)
1835                 tg3_link_report(tp);
1836 }
1837
1838 static int tg3_phy_init(struct tg3 *tp)
1839 {
1840         struct phy_device *phydev;
1841
1842         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1843                 return 0;
1844
1845         /* Bring the PHY back to a known state. */
1846         tg3_bmcr_reset(tp);
1847
1848         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1849
1850         /* Attach the MAC to the PHY. */
1851         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1852                              phydev->dev_flags, phydev->interface);
1853         if (IS_ERR(phydev)) {
1854                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1855                 return PTR_ERR(phydev);
1856         }
1857
1858         /* Mask with MAC supported features. */
1859         switch (phydev->interface) {
1860         case PHY_INTERFACE_MODE_GMII:
1861         case PHY_INTERFACE_MODE_RGMII:
1862                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1863                         phydev->supported &= (PHY_GBIT_FEATURES |
1864                                               SUPPORTED_Pause |
1865                                               SUPPORTED_Asym_Pause);
1866                         break;
1867                 }
1868                 /* fallthru */
1869         case PHY_INTERFACE_MODE_MII:
1870                 phydev->supported &= (PHY_BASIC_FEATURES |
1871                                       SUPPORTED_Pause |
1872                                       SUPPORTED_Asym_Pause);
1873                 break;
1874         default:
1875                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1876                 return -EINVAL;
1877         }
1878
1879         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1880
1881         phydev->advertising = phydev->supported;
1882
1883         return 0;
1884 }
1885
1886 static void tg3_phy_start(struct tg3 *tp)
1887 {
1888         struct phy_device *phydev;
1889
1890         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1891                 return;
1892
1893         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1894
1895         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1896                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1897                 phydev->speed = tp->link_config.speed;
1898                 phydev->duplex = tp->link_config.duplex;
1899                 phydev->autoneg = tp->link_config.autoneg;
1900                 phydev->advertising = tp->link_config.advertising;
1901         }
1902
1903         phy_start(phydev);
1904
1905         phy_start_aneg(phydev);
1906 }
1907
1908 static void tg3_phy_stop(struct tg3 *tp)
1909 {
1910         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1911                 return;
1912
1913         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1914 }
1915
1916 static void tg3_phy_fini(struct tg3 *tp)
1917 {
1918         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1919                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1920                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1921         }
1922 }
1923
1924 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1925 {
1926         int err;
1927         u32 val;
1928
1929         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1930                 return 0;
1931
1932         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1933                 /* Cannot do read-modify-write on 5401 */
1934                 err = tg3_phy_auxctl_write(tp,
1935                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1936                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1937                                            0x4c20);
1938                 goto done;
1939         }
1940
1941         err = tg3_phy_auxctl_read(tp,
1942                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1943         if (err)
1944                 return err;
1945
1946         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1947         err = tg3_phy_auxctl_write(tp,
1948                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1949
1950 done:
1951         return err;
1952 }
1953
1954 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1955 {
1956         u32 phytest;
1957
1958         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1959                 u32 phy;
1960
1961                 tg3_writephy(tp, MII_TG3_FET_TEST,
1962                              phytest | MII_TG3_FET_SHADOW_EN);
1963                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1964                         if (enable)
1965                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1966                         else
1967                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1968                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1969                 }
1970                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1971         }
1972 }
1973
1974 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1975 {
1976         u32 reg;
1977
1978         if (!tg3_flag(tp, 5705_PLUS) ||
1979             (tg3_flag(tp, 5717_PLUS) &&
1980              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1981                 return;
1982
1983         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1984                 tg3_phy_fet_toggle_apd(tp, enable);
1985                 return;
1986         }
1987
1988         reg = MII_TG3_MISC_SHDW_WREN |
1989               MII_TG3_MISC_SHDW_SCR5_SEL |
1990               MII_TG3_MISC_SHDW_SCR5_LPED |
1991               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1992               MII_TG3_MISC_SHDW_SCR5_SDTL |
1993               MII_TG3_MISC_SHDW_SCR5_C125OE;
1994         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1995                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1996
1997         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1998
1999
2000         reg = MII_TG3_MISC_SHDW_WREN |
2001               MII_TG3_MISC_SHDW_APD_SEL |
2002               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2003         if (enable)
2004                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2005
2006         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2007 }
2008
2009 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2010 {
2011         u32 phy;
2012
2013         if (!tg3_flag(tp, 5705_PLUS) ||
2014             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2015                 return;
2016
2017         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2018                 u32 ephy;
2019
2020                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2021                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2022
2023                         tg3_writephy(tp, MII_TG3_FET_TEST,
2024                                      ephy | MII_TG3_FET_SHADOW_EN);
2025                         if (!tg3_readphy(tp, reg, &phy)) {
2026                                 if (enable)
2027                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2028                                 else
2029                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2030                                 tg3_writephy(tp, reg, phy);
2031                         }
2032                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2033                 }
2034         } else {
2035                 int ret;
2036
2037                 ret = tg3_phy_auxctl_read(tp,
2038                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2039                 if (!ret) {
2040                         if (enable)
2041                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2042                         else
2043                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2044                         tg3_phy_auxctl_write(tp,
2045                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2046                 }
2047         }
2048 }
2049
2050 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2051 {
2052         int ret;
2053         u32 val;
2054
2055         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2056                 return;
2057
2058         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2059         if (!ret)
2060                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2061                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2062 }
2063
2064 static void tg3_phy_apply_otp(struct tg3 *tp)
2065 {
2066         u32 otp, phy;
2067
2068         if (!tp->phy_otp)
2069                 return;
2070
2071         otp = tp->phy_otp;
2072
2073         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2074                 return;
2075
2076         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2077         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2078         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2079
2080         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2081               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2082         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2083
2084         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2085         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2086         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2087
2088         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2089         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2090
2091         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2092         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2093
2094         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2095               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2096         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2097
2098         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2099 }
2100
2101 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2102 {
2103         u32 val;
2104
2105         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2106                 return;
2107
2108         tp->setlpicnt = 0;
2109
2110         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2111             current_link_up == 1 &&
2112             tp->link_config.active_duplex == DUPLEX_FULL &&
2113             (tp->link_config.active_speed == SPEED_100 ||
2114              tp->link_config.active_speed == SPEED_1000)) {
2115                 u32 eeectl;
2116
2117                 if (tp->link_config.active_speed == SPEED_1000)
2118                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2119                 else
2120                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2121
2122                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2123
2124                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2125                                   TG3_CL45_D7_EEERES_STAT, &val);
2126
2127                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2128                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2129                         tp->setlpicnt = 2;
2130         }
2131
2132         if (!tp->setlpicnt) {
2133                 if (current_link_up == 1 &&
2134                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2135                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2136                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2137                 }
2138
2139                 val = tr32(TG3_CPMU_EEE_MODE);
2140                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2141         }
2142 }
2143
2144 static void tg3_phy_eee_enable(struct tg3 *tp)
2145 {
2146         u32 val;
2147
2148         if (tp->link_config.active_speed == SPEED_1000 &&
2149             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2150              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2151              tg3_flag(tp, 57765_CLASS)) &&
2152             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2153                 val = MII_TG3_DSP_TAP26_ALNOKO |
2154                       MII_TG3_DSP_TAP26_RMRXSTO;
2155                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2156                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2157         }
2158
2159         val = tr32(TG3_CPMU_EEE_MODE);
2160         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2161 }
2162
2163 static int tg3_wait_macro_done(struct tg3 *tp)
2164 {
2165         int limit = 100;
2166
2167         while (limit--) {
2168                 u32 tmp32;
2169
2170                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2171                         if ((tmp32 & 0x1000) == 0)
2172                                 break;
2173                 }
2174         }
2175         if (limit < 0)
2176                 return -EBUSY;
2177
2178         return 0;
2179 }
2180
2181 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2182 {
2183         static const u32 test_pat[4][6] = {
2184         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2185         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2186         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2187         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2188         };
2189         int chan;
2190
2191         for (chan = 0; chan < 4; chan++) {
2192                 int i;
2193
2194                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2195                              (chan * 0x2000) | 0x0200);
2196                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2197
2198                 for (i = 0; i < 6; i++)
2199                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2200                                      test_pat[chan][i]);
2201
2202                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2203                 if (tg3_wait_macro_done(tp)) {
2204                         *resetp = 1;
2205                         return -EBUSY;
2206                 }
2207
2208                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2209                              (chan * 0x2000) | 0x0200);
2210                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2211                 if (tg3_wait_macro_done(tp)) {
2212                         *resetp = 1;
2213                         return -EBUSY;
2214                 }
2215
2216                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2217                 if (tg3_wait_macro_done(tp)) {
2218                         *resetp = 1;
2219                         return -EBUSY;
2220                 }
2221
2222                 for (i = 0; i < 6; i += 2) {
2223                         u32 low, high;
2224
2225                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2226                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2227                             tg3_wait_macro_done(tp)) {
2228                                 *resetp = 1;
2229                                 return -EBUSY;
2230                         }
2231                         low &= 0x7fff;
2232                         high &= 0x000f;
2233                         if (low != test_pat[chan][i] ||
2234                             high != test_pat[chan][i+1]) {
2235                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2236                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2237                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2238
2239                                 return -EBUSY;
2240                         }
2241                 }
2242         }
2243
2244         return 0;
2245 }
2246
2247 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2248 {
2249         int chan;
2250
2251         for (chan = 0; chan < 4; chan++) {
2252                 int i;
2253
2254                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2255                              (chan * 0x2000) | 0x0200);
2256                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2257                 for (i = 0; i < 6; i++)
2258                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2259                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2260                 if (tg3_wait_macro_done(tp))
2261                         return -EBUSY;
2262         }
2263
2264         return 0;
2265 }
2266
2267 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2268 {
2269         u32 reg32, phy9_orig;
2270         int retries, do_phy_reset, err;
2271
2272         retries = 10;
2273         do_phy_reset = 1;
2274         do {
2275                 if (do_phy_reset) {
2276                         err = tg3_bmcr_reset(tp);
2277                         if (err)
2278                                 return err;
2279                         do_phy_reset = 0;
2280                 }
2281
2282                 /* Disable transmitter and interrupt.  */
2283                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2284                         continue;
2285
2286                 reg32 |= 0x3000;
2287                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2288
2289                 /* Set full-duplex, 1000 mbps.  */
2290                 tg3_writephy(tp, MII_BMCR,
2291                              BMCR_FULLDPLX | BMCR_SPEED1000);
2292
2293                 /* Set to master mode.  */
2294                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2295                         continue;
2296
2297                 tg3_writephy(tp, MII_CTRL1000,
2298                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2299
2300                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2301                 if (err)
2302                         return err;
2303
2304                 /* Block the PHY control access.  */
2305                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2306
2307                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2308                 if (!err)
2309                         break;
2310         } while (--retries);
2311
2312         err = tg3_phy_reset_chanpat(tp);
2313         if (err)
2314                 return err;
2315
2316         tg3_phydsp_write(tp, 0x8005, 0x0000);
2317
2318         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2319         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2320
2321         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2322
2323         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2324
2325         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2326                 reg32 &= ~0x3000;
2327                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2328         } else if (!err)
2329                 err = -EBUSY;
2330
2331         return err;
2332 }
2333
2334 /* This will reset the tigon3 PHY if there is no valid
2335  * link unless the FORCE argument is non-zero.
2336  */
2337 static int tg3_phy_reset(struct tg3 *tp)
2338 {
2339         u32 val, cpmuctrl;
2340         int err;
2341
2342         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2343                 val = tr32(GRC_MISC_CFG);
2344                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2345                 udelay(40);
2346         }
2347         err  = tg3_readphy(tp, MII_BMSR, &val);
2348         err |= tg3_readphy(tp, MII_BMSR, &val);
2349         if (err != 0)
2350                 return -EBUSY;
2351
2352         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2353                 netif_carrier_off(tp->dev);
2354                 tg3_link_report(tp);
2355         }
2356
2357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2358             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2359             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2360                 err = tg3_phy_reset_5703_4_5(tp);
2361                 if (err)
2362                         return err;
2363                 goto out;
2364         }
2365
2366         cpmuctrl = 0;
2367         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2368             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2369                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2370                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2371                         tw32(TG3_CPMU_CTRL,
2372                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2373         }
2374
2375         err = tg3_bmcr_reset(tp);
2376         if (err)
2377                 return err;
2378
2379         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2380                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2381                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2382
2383                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2384         }
2385
2386         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2387             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2388                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2389                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2390                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2391                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2392                         udelay(40);
2393                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2394                 }
2395         }
2396
2397         if (tg3_flag(tp, 5717_PLUS) &&
2398             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2399                 return 0;
2400
2401         tg3_phy_apply_otp(tp);
2402
2403         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2404                 tg3_phy_toggle_apd(tp, true);
2405         else
2406                 tg3_phy_toggle_apd(tp, false);
2407
2408 out:
2409         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2410             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2411                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2412                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2413                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2414         }
2415
2416         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2417                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2418                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2419         }
2420
2421         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2422                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2423                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2424                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2425                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2426                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2427                 }
2428         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2429                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2430                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2431                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2432                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2433                                 tg3_writephy(tp, MII_TG3_TEST1,
2434                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2435                         } else
2436                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2437
2438                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439                 }
2440         }
2441
2442         /* Set Extended packet length bit (bit 14) on all chips that */
2443         /* support jumbo frames */
2444         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2445                 /* Cannot do read-modify-write on 5401 */
2446                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2447         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2448                 /* Set bit 14 with read-modify-write to preserve other bits */
2449                 err = tg3_phy_auxctl_read(tp,
2450                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2451                 if (!err)
2452                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2453                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2454         }
2455
2456         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2457          * jumbo frames transmission.
2458          */
2459         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2460                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2461                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2462                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2463         }
2464
2465         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2466                 /* adjust output voltage */
2467                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2468         }
2469
2470         tg3_phy_toggle_automdix(tp, 1);
2471         tg3_phy_set_wirespeed(tp);
2472         return 0;
2473 }
2474
2475 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2476 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2477 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2478                                           TG3_GPIO_MSG_NEED_VAUX)
2479 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2480         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2481          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2482          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2483          (TG3_GPIO_MSG_DRVR_PRES << 12))
2484
2485 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2486         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2487          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2488          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2489          (TG3_GPIO_MSG_NEED_VAUX << 12))
2490
2491 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2492 {
2493         u32 status, shift;
2494
2495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2498         else
2499                 status = tr32(TG3_CPMU_DRV_STATUS);
2500
2501         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2502         status &= ~(TG3_GPIO_MSG_MASK << shift);
2503         status |= (newstat << shift);
2504
2505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2506             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2507                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2508         else
2509                 tw32(TG3_CPMU_DRV_STATUS, status);
2510
2511         return status >> TG3_APE_GPIO_MSG_SHIFT;
2512 }
2513
2514 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2515 {
2516         if (!tg3_flag(tp, IS_NIC))
2517                 return 0;
2518
2519         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2520             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2521             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2522                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2523                         return -EIO;
2524
2525                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2526
2527                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2528                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2529
2530                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2531         } else {
2532                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2533                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2534         }
2535
2536         return 0;
2537 }
2538
2539 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2540 {
2541         u32 grc_local_ctrl;
2542
2543         if (!tg3_flag(tp, IS_NIC) ||
2544             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2545             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2546                 return;
2547
2548         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2549
2550         tw32_wait_f(GRC_LOCAL_CTRL,
2551                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2552                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2553
2554         tw32_wait_f(GRC_LOCAL_CTRL,
2555                     grc_local_ctrl,
2556                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2557
2558         tw32_wait_f(GRC_LOCAL_CTRL,
2559                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2560                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2561 }
2562
2563 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2564 {
2565         if (!tg3_flag(tp, IS_NIC))
2566                 return;
2567
2568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2569             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2570                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2571                             (GRC_LCLCTRL_GPIO_OE0 |
2572                              GRC_LCLCTRL_GPIO_OE1 |
2573                              GRC_LCLCTRL_GPIO_OE2 |
2574                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2575                              GRC_LCLCTRL_GPIO_OUTPUT1),
2576                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2577         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2578                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2579                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2580                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2581                                      GRC_LCLCTRL_GPIO_OE1 |
2582                                      GRC_LCLCTRL_GPIO_OE2 |
2583                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2584                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2585                                      tp->grc_local_ctrl;
2586                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2587                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2588
2589                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2590                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2591                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2592
2593                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2594                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2595                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2596         } else {
2597                 u32 no_gpio2;
2598                 u32 grc_local_ctrl = 0;
2599
2600                 /* Workaround to prevent overdrawing Amps. */
2601                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2602                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2603                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2604                                     grc_local_ctrl,
2605                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2606                 }
2607
2608                 /* On 5753 and variants, GPIO2 cannot be used. */
2609                 no_gpio2 = tp->nic_sram_data_cfg &
2610                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2611
2612                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2613                                   GRC_LCLCTRL_GPIO_OE1 |
2614                                   GRC_LCLCTRL_GPIO_OE2 |
2615                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2616                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2617                 if (no_gpio2) {
2618                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2619                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2620                 }
2621                 tw32_wait_f(GRC_LOCAL_CTRL,
2622                             tp->grc_local_ctrl | grc_local_ctrl,
2623                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2624
2625                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2626
2627                 tw32_wait_f(GRC_LOCAL_CTRL,
2628                             tp->grc_local_ctrl | grc_local_ctrl,
2629                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2630
2631                 if (!no_gpio2) {
2632                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2633                         tw32_wait_f(GRC_LOCAL_CTRL,
2634                                     tp->grc_local_ctrl | grc_local_ctrl,
2635                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2636                 }
2637         }
2638 }
2639
2640 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2641 {
2642         u32 msg = 0;
2643
2644         /* Serialize power state transitions */
2645         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2646                 return;
2647
2648         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2649                 msg = TG3_GPIO_MSG_NEED_VAUX;
2650
2651         msg = tg3_set_function_status(tp, msg);
2652
2653         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2654                 goto done;
2655
2656         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2657                 tg3_pwrsrc_switch_to_vaux(tp);
2658         else
2659                 tg3_pwrsrc_die_with_vmain(tp);
2660
2661 done:
2662         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2663 }
2664
2665 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2666 {
2667         bool need_vaux = false;
2668
2669         /* The GPIOs do something completely different on 57765. */
2670         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2671                 return;
2672
2673         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2674             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2675             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2676                 tg3_frob_aux_power_5717(tp, include_wol ?
2677                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2678                 return;
2679         }
2680
2681         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2682                 struct net_device *dev_peer;
2683
2684                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2685
2686                 /* remove_one() may have been run on the peer. */
2687                 if (dev_peer) {
2688                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2689
2690                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2691                                 return;
2692
2693                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2694                             tg3_flag(tp_peer, ENABLE_ASF))
2695                                 need_vaux = true;
2696                 }
2697         }
2698
2699         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2700             tg3_flag(tp, ENABLE_ASF))
2701                 need_vaux = true;
2702
2703         if (need_vaux)
2704                 tg3_pwrsrc_switch_to_vaux(tp);
2705         else
2706                 tg3_pwrsrc_die_with_vmain(tp);
2707 }
2708
2709 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2710 {
2711         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2712                 return 1;
2713         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2714                 if (speed != SPEED_10)
2715                         return 1;
2716         } else if (speed == SPEED_10)
2717                 return 1;
2718
2719         return 0;
2720 }
2721
2722 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2723 {
2724         u32 val;
2725
2726         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2727                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2728                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2729                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2730
2731                         sg_dig_ctrl |=
2732                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2733                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2734                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2735                 }
2736                 return;
2737         }
2738
2739         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2740                 tg3_bmcr_reset(tp);
2741                 val = tr32(GRC_MISC_CFG);
2742                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2743                 udelay(40);
2744                 return;
2745         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2746                 u32 phytest;
2747                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2748                         u32 phy;
2749
2750                         tg3_writephy(tp, MII_ADVERTISE, 0);
2751                         tg3_writephy(tp, MII_BMCR,
2752                                      BMCR_ANENABLE | BMCR_ANRESTART);
2753
2754                         tg3_writephy(tp, MII_TG3_FET_TEST,
2755                                      phytest | MII_TG3_FET_SHADOW_EN);
2756                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2757                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2758                                 tg3_writephy(tp,
2759                                              MII_TG3_FET_SHDW_AUXMODE4,
2760                                              phy);
2761                         }
2762                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2763                 }
2764                 return;
2765         } else if (do_low_power) {
2766                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2767                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2768
2769                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2770                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2771                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2772                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2773         }
2774
2775         /* The PHY should not be powered down on some chips because
2776          * of bugs.
2777          */
2778         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2779             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2780             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2781              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2782                 return;
2783
2784         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2785             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2786                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2787                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2788                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2789                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2790         }
2791
2792         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2793 }
2794
2795 /* tp->lock is held. */
2796 static int tg3_nvram_lock(struct tg3 *tp)
2797 {
2798         if (tg3_flag(tp, NVRAM)) {
2799                 int i;
2800
2801                 if (tp->nvram_lock_cnt == 0) {
2802                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2803                         for (i = 0; i < 8000; i++) {
2804                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2805                                         break;
2806                                 udelay(20);
2807                         }
2808                         if (i == 8000) {
2809                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2810                                 return -ENODEV;
2811                         }
2812                 }
2813                 tp->nvram_lock_cnt++;
2814         }
2815         return 0;
2816 }
2817
2818 /* tp->lock is held. */
2819 static void tg3_nvram_unlock(struct tg3 *tp)
2820 {
2821         if (tg3_flag(tp, NVRAM)) {
2822                 if (tp->nvram_lock_cnt > 0)
2823                         tp->nvram_lock_cnt--;
2824                 if (tp->nvram_lock_cnt == 0)
2825                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2826         }
2827 }
2828
2829 /* tp->lock is held. */
2830 static void tg3_enable_nvram_access(struct tg3 *tp)
2831 {
2832         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2833                 u32 nvaccess = tr32(NVRAM_ACCESS);
2834
2835                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2836         }
2837 }
2838
2839 /* tp->lock is held. */
2840 static void tg3_disable_nvram_access(struct tg3 *tp)
2841 {
2842         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2843                 u32 nvaccess = tr32(NVRAM_ACCESS);
2844
2845                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2846         }
2847 }
2848
2849 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2850                                         u32 offset, u32 *val)
2851 {
2852         u32 tmp;
2853         int i;
2854
2855         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2856                 return -EINVAL;
2857
2858         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2859                                         EEPROM_ADDR_DEVID_MASK |
2860                                         EEPROM_ADDR_READ);
2861         tw32(GRC_EEPROM_ADDR,
2862              tmp |
2863              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2864              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2865               EEPROM_ADDR_ADDR_MASK) |
2866              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2867
2868         for (i = 0; i < 1000; i++) {
2869                 tmp = tr32(GRC_EEPROM_ADDR);
2870
2871                 if (tmp & EEPROM_ADDR_COMPLETE)
2872                         break;
2873                 msleep(1);
2874         }
2875         if (!(tmp & EEPROM_ADDR_COMPLETE))
2876                 return -EBUSY;
2877
2878         tmp = tr32(GRC_EEPROM_DATA);
2879
2880         /*
2881          * The data will always be opposite the native endian
2882          * format.  Perform a blind byteswap to compensate.
2883          */
2884         *val = swab32(tmp);
2885
2886         return 0;
2887 }
2888
2889 #define NVRAM_CMD_TIMEOUT 10000
2890
2891 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2892 {
2893         int i;
2894
2895         tw32(NVRAM_CMD, nvram_cmd);
2896         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2897                 udelay(10);
2898                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2899                         udelay(10);
2900                         break;
2901                 }
2902         }
2903
2904         if (i == NVRAM_CMD_TIMEOUT)
2905                 return -EBUSY;
2906
2907         return 0;
2908 }
2909
2910 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2911 {
2912         if (tg3_flag(tp, NVRAM) &&
2913             tg3_flag(tp, NVRAM_BUFFERED) &&
2914             tg3_flag(tp, FLASH) &&
2915             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2916             (tp->nvram_jedecnum == JEDEC_ATMEL))
2917
2918                 addr = ((addr / tp->nvram_pagesize) <<
2919                         ATMEL_AT45DB0X1B_PAGE_POS) +
2920                        (addr % tp->nvram_pagesize);
2921
2922         return addr;
2923 }
2924
2925 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2926 {
2927         if (tg3_flag(tp, NVRAM) &&
2928             tg3_flag(tp, NVRAM_BUFFERED) &&
2929             tg3_flag(tp, FLASH) &&
2930             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2931             (tp->nvram_jedecnum == JEDEC_ATMEL))
2932
2933                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2934                         tp->nvram_pagesize) +
2935                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2936
2937         return addr;
2938 }
2939
2940 /* NOTE: Data read in from NVRAM is byteswapped according to
2941  * the byteswapping settings for all other register accesses.
2942  * tg3 devices are BE devices, so on a BE machine, the data
2943  * returned will be exactly as it is seen in NVRAM.  On a LE
2944  * machine, the 32-bit value will be byteswapped.
2945  */
2946 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2947 {
2948         int ret;
2949
2950         if (!tg3_flag(tp, NVRAM))
2951                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2952
2953         offset = tg3_nvram_phys_addr(tp, offset);
2954
2955         if (offset > NVRAM_ADDR_MSK)
2956                 return -EINVAL;
2957
2958         ret = tg3_nvram_lock(tp);
2959         if (ret)
2960                 return ret;
2961
2962         tg3_enable_nvram_access(tp);
2963
2964         tw32(NVRAM_ADDR, offset);
2965         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2966                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2967
2968         if (ret == 0)
2969                 *val = tr32(NVRAM_RDDATA);
2970
2971         tg3_disable_nvram_access(tp);
2972
2973         tg3_nvram_unlock(tp);
2974
2975         return ret;
2976 }
2977
2978 /* Ensures NVRAM data is in bytestream format. */
2979 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2980 {
2981         u32 v;
2982         int res = tg3_nvram_read(tp, offset, &v);
2983         if (!res)
2984                 *val = cpu_to_be32(v);
2985         return res;
2986 }
2987
2988 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2989                                     u32 offset, u32 len, u8 *buf)
2990 {
2991         int i, j, rc = 0;
2992         u32 val;
2993
2994         for (i = 0; i < len; i += 4) {
2995                 u32 addr;
2996                 __be32 data;
2997
2998                 addr = offset + i;
2999
3000                 memcpy(&data, buf + i, 4);
3001
3002                 /*
3003                  * The SEEPROM interface expects the data to always be opposite
3004                  * the native endian format.  We accomplish this by reversing
3005                  * all the operations that would have been performed on the
3006                  * data from a call to tg3_nvram_read_be32().
3007                  */
3008                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3009
3010                 val = tr32(GRC_EEPROM_ADDR);
3011                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3012
3013                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3014                         EEPROM_ADDR_READ);
3015                 tw32(GRC_EEPROM_ADDR, val |
3016                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3017                         (addr & EEPROM_ADDR_ADDR_MASK) |
3018                         EEPROM_ADDR_START |
3019                         EEPROM_ADDR_WRITE);
3020
3021                 for (j = 0; j < 1000; j++) {
3022                         val = tr32(GRC_EEPROM_ADDR);
3023
3024                         if (val & EEPROM_ADDR_COMPLETE)
3025                                 break;
3026                         msleep(1);
3027                 }
3028                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3029                         rc = -EBUSY;
3030                         break;
3031                 }
3032         }
3033
3034         return rc;
3035 }
3036
3037 /* offset and length are dword aligned */
3038 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3039                 u8 *buf)
3040 {
3041         int ret = 0;
3042         u32 pagesize = tp->nvram_pagesize;
3043         u32 pagemask = pagesize - 1;
3044         u32 nvram_cmd;
3045         u8 *tmp;
3046
3047         tmp = kmalloc(pagesize, GFP_KERNEL);
3048         if (tmp == NULL)
3049                 return -ENOMEM;
3050
3051         while (len) {
3052                 int j;
3053                 u32 phy_addr, page_off, size;
3054
3055                 phy_addr = offset & ~pagemask;
3056
3057                 for (j = 0; j < pagesize; j += 4) {
3058                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3059                                                   (__be32 *) (tmp + j));
3060                         if (ret)
3061                                 break;
3062                 }
3063                 if (ret)
3064                         break;
3065
3066                 page_off = offset & pagemask;
3067                 size = pagesize;
3068                 if (len < size)
3069                         size = len;
3070
3071                 len -= size;
3072
3073                 memcpy(tmp + page_off, buf, size);
3074
3075                 offset = offset + (pagesize - page_off);
3076
3077                 tg3_enable_nvram_access(tp);
3078
3079                 /*
3080                  * Before we can erase the flash page, we need
3081                  * to issue a special "write enable" command.
3082                  */
3083                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3084
3085                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3086                         break;
3087
3088                 /* Erase the target page */
3089                 tw32(NVRAM_ADDR, phy_addr);
3090
3091                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3092                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3093
3094                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3095                         break;
3096
3097                 /* Issue another write enable to start the write. */
3098                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3099
3100                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3101                         break;
3102
3103                 for (j = 0; j < pagesize; j += 4) {
3104                         __be32 data;
3105
3106                         data = *((__be32 *) (tmp + j));
3107
3108                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3109
3110                         tw32(NVRAM_ADDR, phy_addr + j);
3111
3112                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3113                                 NVRAM_CMD_WR;
3114
3115                         if (j == 0)
3116                                 nvram_cmd |= NVRAM_CMD_FIRST;
3117                         else if (j == (pagesize - 4))
3118                                 nvram_cmd |= NVRAM_CMD_LAST;
3119
3120                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3121                         if (ret)
3122                                 break;
3123                 }
3124                 if (ret)
3125                         break;
3126         }
3127
3128         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3129         tg3_nvram_exec_cmd(tp, nvram_cmd);
3130
3131         kfree(tmp);
3132
3133         return ret;
3134 }
3135
3136 /* offset and length are dword aligned */
3137 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3138                 u8 *buf)
3139 {
3140         int i, ret = 0;
3141
3142         for (i = 0; i < len; i += 4, offset += 4) {
3143                 u32 page_off, phy_addr, nvram_cmd;
3144                 __be32 data;
3145
3146                 memcpy(&data, buf + i, 4);
3147                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3148
3149                 page_off = offset % tp->nvram_pagesize;
3150
3151                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3152
3153                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3154
3155                 if (page_off == 0 || i == 0)
3156                         nvram_cmd |= NVRAM_CMD_FIRST;
3157                 if (page_off == (tp->nvram_pagesize - 4))
3158                         nvram_cmd |= NVRAM_CMD_LAST;
3159
3160                 if (i == (len - 4))
3161                         nvram_cmd |= NVRAM_CMD_LAST;
3162
3163                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3164                     !tg3_flag(tp, FLASH) ||
3165                     !tg3_flag(tp, 57765_PLUS))
3166                         tw32(NVRAM_ADDR, phy_addr);
3167
3168                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3169                     !tg3_flag(tp, 5755_PLUS) &&
3170                     (tp->nvram_jedecnum == JEDEC_ST) &&
3171                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3172                         u32 cmd;
3173
3174                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3175                         ret = tg3_nvram_exec_cmd(tp, cmd);
3176                         if (ret)
3177                                 break;
3178                 }
3179                 if (!tg3_flag(tp, FLASH)) {
3180                         /* We always do complete word writes to eeprom. */
3181                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3182                 }
3183
3184                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3185                 if (ret)
3186                         break;
3187         }
3188         return ret;
3189 }
3190
3191 /* offset and length are dword aligned */
3192 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3193 {
3194         int ret;
3195
3196         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3197                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3198                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3199                 udelay(40);
3200         }
3201
3202         if (!tg3_flag(tp, NVRAM)) {
3203                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3204         } else {
3205                 u32 grc_mode;
3206
3207                 ret = tg3_nvram_lock(tp);
3208                 if (ret)
3209                         return ret;
3210
3211                 tg3_enable_nvram_access(tp);
3212                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3213                         tw32(NVRAM_WRITE1, 0x406);
3214
3215                 grc_mode = tr32(GRC_MODE);
3216                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3217
3218                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3219                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3220                                 buf);
3221                 } else {
3222                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3223                                 buf);
3224                 }
3225
3226                 grc_mode = tr32(GRC_MODE);
3227                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3228
3229                 tg3_disable_nvram_access(tp);
3230                 tg3_nvram_unlock(tp);
3231         }
3232
3233         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3234                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3235                 udelay(40);
3236         }
3237
3238         return ret;
3239 }
3240
3241 #define RX_CPU_SCRATCH_BASE     0x30000
3242 #define RX_CPU_SCRATCH_SIZE     0x04000
3243 #define TX_CPU_SCRATCH_BASE     0x34000
3244 #define TX_CPU_SCRATCH_SIZE     0x04000
3245
3246 /* tp->lock is held. */
3247 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3248 {
3249         int i;
3250
3251         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3252
3253         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3254                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3255
3256                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3257                 return 0;
3258         }
3259         if (offset == RX_CPU_BASE) {
3260                 for (i = 0; i < 10000; i++) {
3261                         tw32(offset + CPU_STATE, 0xffffffff);
3262                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3263                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3264                                 break;
3265                 }
3266
3267                 tw32(offset + CPU_STATE, 0xffffffff);
3268                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3269                 udelay(10);
3270         } else {
3271                 for (i = 0; i < 10000; i++) {
3272                         tw32(offset + CPU_STATE, 0xffffffff);
3273                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3274                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3275                                 break;
3276                 }
3277         }
3278
3279         if (i >= 10000) {
3280                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3281                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3282                 return -ENODEV;
3283         }
3284
3285         /* Clear firmware's nvram arbitration. */
3286         if (tg3_flag(tp, NVRAM))
3287                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3288         return 0;
3289 }
3290
3291 struct fw_info {
3292         unsigned int fw_base;
3293         unsigned int fw_len;
3294         const __be32 *fw_data;
3295 };
3296
3297 /* tp->lock is held. */
3298 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3299                                  u32 cpu_scratch_base, int cpu_scratch_size,
3300                                  struct fw_info *info)
3301 {
3302         int err, lock_err, i;
3303         void (*write_op)(struct tg3 *, u32, u32);
3304
3305         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3306                 netdev_err(tp->dev,
3307                            "%s: Trying to load TX cpu firmware which is 5705\n",
3308                            __func__);
3309                 return -EINVAL;
3310         }
3311
3312         if (tg3_flag(tp, 5705_PLUS))
3313                 write_op = tg3_write_mem;
3314         else
3315                 write_op = tg3_write_indirect_reg32;
3316
3317         /* It is possible that bootcode is still loading at this point.
3318          * Get the nvram lock first before halting the cpu.
3319          */
3320         lock_err = tg3_nvram_lock(tp);
3321         err = tg3_halt_cpu(tp, cpu_base);
3322         if (!lock_err)
3323                 tg3_nvram_unlock(tp);
3324         if (err)
3325                 goto out;
3326
3327         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3328                 write_op(tp, cpu_scratch_base + i, 0);
3329         tw32(cpu_base + CPU_STATE, 0xffffffff);
3330         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3331         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3332                 write_op(tp, (cpu_scratch_base +
3333                               (info->fw_base & 0xffff) +
3334                               (i * sizeof(u32))),
3335                               be32_to_cpu(info->fw_data[i]));
3336
3337         err = 0;
3338
3339 out:
3340         return err;
3341 }
3342
3343 /* tp->lock is held. */
3344 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3345 {
3346         struct fw_info info;
3347         const __be32 *fw_data;
3348         int err, i;
3349
3350         fw_data = (void *)tp->fw->data;
3351
3352         /* Firmware blob starts with version numbers, followed by
3353            start address and length. We are setting complete length.
3354            length = end_address_of_bss - start_address_of_text.
3355            Remainder is the blob to be loaded contiguously
3356            from start address. */
3357
3358         info.fw_base = be32_to_cpu(fw_data[1]);
3359         info.fw_len = tp->fw->size - 12;
3360         info.fw_data = &fw_data[3];
3361
3362         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3363                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3364                                     &info);
3365         if (err)
3366                 return err;
3367
3368         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3369                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3370                                     &info);
3371         if (err)
3372                 return err;
3373
3374         /* Now startup only the RX cpu. */
3375         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3376         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3377
3378         for (i = 0; i < 5; i++) {
3379                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3380                         break;
3381                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3382                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3383                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3384                 udelay(1000);
3385         }
3386         if (i >= 5) {
3387                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3388                            "should be %08x\n", __func__,
3389                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3390                 return -ENODEV;
3391         }
3392         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3393         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3394
3395         return 0;
3396 }
3397
3398 /* tp->lock is held. */
3399 static int tg3_load_tso_firmware(struct tg3 *tp)
3400 {
3401         struct fw_info info;
3402         const __be32 *fw_data;
3403         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3404         int err, i;
3405
3406         if (tg3_flag(tp, HW_TSO_1) ||
3407             tg3_flag(tp, HW_TSO_2) ||
3408             tg3_flag(tp, HW_TSO_3))
3409                 return 0;
3410
3411         fw_data = (void *)tp->fw->data;
3412
3413         /* Firmware blob starts with version numbers, followed by
3414            start address and length. We are setting complete length.
3415            length = end_address_of_bss - start_address_of_text.
3416            Remainder is the blob to be loaded contiguously
3417            from start address. */
3418
3419         info.fw_base = be32_to_cpu(fw_data[1]);
3420         cpu_scratch_size = tp->fw_len;
3421         info.fw_len = tp->fw->size - 12;
3422         info.fw_data = &fw_data[3];
3423
3424         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3425                 cpu_base = RX_CPU_BASE;
3426                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3427         } else {
3428                 cpu_base = TX_CPU_BASE;
3429                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3430                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3431         }
3432
3433         err = tg3_load_firmware_cpu(tp, cpu_base,
3434                                     cpu_scratch_base, cpu_scratch_size,
3435                                     &info);
3436         if (err)
3437                 return err;
3438
3439         /* Now startup the cpu. */
3440         tw32(cpu_base + CPU_STATE, 0xffffffff);
3441         tw32_f(cpu_base + CPU_PC, info.fw_base);
3442
3443         for (i = 0; i < 5; i++) {
3444                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3445                         break;
3446                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3447                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3448                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3449                 udelay(1000);
3450         }
3451         if (i >= 5) {
3452                 netdev_err(tp->dev,
3453                            "%s fails to set CPU PC, is %08x should be %08x\n",
3454                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3455                 return -ENODEV;
3456         }
3457         tw32(cpu_base + CPU_STATE, 0xffffffff);
3458         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3459         return 0;
3460 }
3461
3462
3463 /* tp->lock is held. */
3464 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3465 {
3466         u32 addr_high, addr_low;
3467         int i;
3468
3469         addr_high = ((tp->dev->dev_addr[0] << 8) |
3470                      tp->dev->dev_addr[1]);
3471         addr_low = ((tp->dev->dev_addr[2] << 24) |
3472                     (tp->dev->dev_addr[3] << 16) |
3473                     (tp->dev->dev_addr[4] <<  8) |
3474                     (tp->dev->dev_addr[5] <<  0));
3475         for (i = 0; i < 4; i++) {
3476                 if (i == 1 && skip_mac_1)
3477                         continue;
3478                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3479                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3480         }
3481
3482         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3483             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3484                 for (i = 0; i < 12; i++) {
3485                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3486                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3487                 }
3488         }
3489
3490         addr_high = (tp->dev->dev_addr[0] +
3491                      tp->dev->dev_addr[1] +
3492                      tp->dev->dev_addr[2] +
3493                      tp->dev->dev_addr[3] +
3494                      tp->dev->dev_addr[4] +
3495                      tp->dev->dev_addr[5]) &
3496                 TX_BACKOFF_SEED_MASK;
3497         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3498 }
3499
3500 static void tg3_enable_register_access(struct tg3 *tp)
3501 {
3502         /*
3503          * Make sure register accesses (indirect or otherwise) will function
3504          * correctly.
3505          */
3506         pci_write_config_dword(tp->pdev,
3507                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3508 }
3509
3510 static int tg3_power_up(struct tg3 *tp)
3511 {
3512         int err;
3513
3514         tg3_enable_register_access(tp);
3515
3516         err = pci_set_power_state(tp->pdev, PCI_D0);
3517         if (!err) {
3518                 /* Switch out of Vaux if it is a NIC */
3519                 tg3_pwrsrc_switch_to_vmain(tp);
3520         } else {
3521                 netdev_err(tp->dev, "Transition to D0 failed\n");
3522         }
3523
3524         return err;
3525 }
3526
3527 static int tg3_setup_phy(struct tg3 *, int);
3528
3529 static int tg3_power_down_prepare(struct tg3 *tp)
3530 {
3531         u32 misc_host_ctrl;
3532         bool device_should_wake, do_low_power;
3533
3534         tg3_enable_register_access(tp);
3535
3536         /* Restore the CLKREQ setting. */
3537         if (tg3_flag(tp, CLKREQ_BUG)) {
3538                 u16 lnkctl;
3539
3540                 pci_read_config_word(tp->pdev,
3541                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3542                                      &lnkctl);
3543                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3544                 pci_write_config_word(tp->pdev,
3545                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3546                                       lnkctl);
3547         }
3548
3549         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3550         tw32(TG3PCI_MISC_HOST_CTRL,
3551              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3552
3553         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3554                              tg3_flag(tp, WOL_ENABLE);
3555
3556         if (tg3_flag(tp, USE_PHYLIB)) {
3557                 do_low_power = false;
3558                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3559                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3560                         struct phy_device *phydev;
3561                         u32 phyid, advertising;
3562
3563                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3564
3565                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3566
3567                         tp->link_config.speed = phydev->speed;
3568                         tp->link_config.duplex = phydev->duplex;
3569                         tp->link_config.autoneg = phydev->autoneg;
3570                         tp->link_config.advertising = phydev->advertising;
3571
3572                         advertising = ADVERTISED_TP |
3573                                       ADVERTISED_Pause |
3574                                       ADVERTISED_Autoneg |
3575                                       ADVERTISED_10baseT_Half;
3576
3577                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3578                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3579                                         advertising |=
3580                                                 ADVERTISED_100baseT_Half |
3581                                                 ADVERTISED_100baseT_Full |
3582                                                 ADVERTISED_10baseT_Full;
3583                                 else
3584                                         advertising |= ADVERTISED_10baseT_Full;
3585                         }
3586
3587                         phydev->advertising = advertising;
3588
3589                         phy_start_aneg(phydev);
3590
3591                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3592                         if (phyid != PHY_ID_BCMAC131) {
3593                                 phyid &= PHY_BCM_OUI_MASK;
3594                                 if (phyid == PHY_BCM_OUI_1 ||
3595                                     phyid == PHY_BCM_OUI_2 ||
3596                                     phyid == PHY_BCM_OUI_3)
3597                                         do_low_power = true;
3598                         }
3599                 }
3600         } else {
3601                 do_low_power = true;
3602
3603                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3604                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3605
3606                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3607                         tg3_setup_phy(tp, 0);
3608         }
3609
3610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3611                 u32 val;
3612
3613                 val = tr32(GRC_VCPU_EXT_CTRL);
3614                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3615         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3616                 int i;
3617                 u32 val;
3618
3619                 for (i = 0; i < 200; i++) {
3620                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3621                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3622                                 break;
3623                         msleep(1);
3624                 }
3625         }
3626         if (tg3_flag(tp, WOL_CAP))
3627                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3628                                                      WOL_DRV_STATE_SHUTDOWN |
3629                                                      WOL_DRV_WOL |
3630                                                      WOL_SET_MAGIC_PKT);
3631
3632         if (device_should_wake) {
3633                 u32 mac_mode;
3634
3635                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3636                         if (do_low_power &&
3637                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3638                                 tg3_phy_auxctl_write(tp,
3639                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3640                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3641                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3642                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3643                                 udelay(40);
3644                         }
3645
3646                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3647                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3648                         else
3649                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3650
3651                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3652                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3653                             ASIC_REV_5700) {
3654                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3655                                              SPEED_100 : SPEED_10;
3656                                 if (tg3_5700_link_polarity(tp, speed))
3657                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3658                                 else
3659                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3660                         }
3661                 } else {
3662                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3663                 }
3664
3665                 if (!tg3_flag(tp, 5750_PLUS))
3666                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3667
3668                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3669                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3670                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3671                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3672
3673                 if (tg3_flag(tp, ENABLE_APE))
3674                         mac_mode |= MAC_MODE_APE_TX_EN |
3675                                     MAC_MODE_APE_RX_EN |
3676                                     MAC_MODE_TDE_ENABLE;
3677
3678                 tw32_f(MAC_MODE, mac_mode);
3679                 udelay(100);
3680
3681                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3682                 udelay(10);
3683         }
3684
3685         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3686             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3687              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3688                 u32 base_val;
3689
3690                 base_val = tp->pci_clock_ctrl;
3691                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3692                              CLOCK_CTRL_TXCLK_DISABLE);
3693
3694                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3695                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3696         } else if (tg3_flag(tp, 5780_CLASS) ||
3697                    tg3_flag(tp, CPMU_PRESENT) ||
3698                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3699                 /* do nothing */
3700         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3701                 u32 newbits1, newbits2;
3702
3703                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3704                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3705                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3706                                     CLOCK_CTRL_TXCLK_DISABLE |
3707                                     CLOCK_CTRL_ALTCLK);
3708                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3709                 } else if (tg3_flag(tp, 5705_PLUS)) {
3710                         newbits1 = CLOCK_CTRL_625_CORE;
3711                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3712                 } else {
3713                         newbits1 = CLOCK_CTRL_ALTCLK;
3714                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3715                 }
3716
3717                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3718                             40);
3719
3720                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3721                             40);
3722
3723                 if (!tg3_flag(tp, 5705_PLUS)) {
3724                         u32 newbits3;
3725
3726                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3727                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3728                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3729                                             CLOCK_CTRL_TXCLK_DISABLE |
3730                                             CLOCK_CTRL_44MHZ_CORE);
3731                         } else {
3732                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3733                         }
3734
3735                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3736                                     tp->pci_clock_ctrl | newbits3, 40);
3737                 }
3738         }
3739
3740         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3741                 tg3_power_down_phy(tp, do_low_power);
3742
3743         tg3_frob_aux_power(tp, true);
3744
3745         /* Workaround for unstable PLL clock */
3746         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3747             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3748                 u32 val = tr32(0x7d00);
3749
3750                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3751                 tw32(0x7d00, val);
3752                 if (!tg3_flag(tp, ENABLE_ASF)) {
3753                         int err;
3754
3755                         err = tg3_nvram_lock(tp);
3756                         tg3_halt_cpu(tp, RX_CPU_BASE);
3757                         if (!err)
3758                                 tg3_nvram_unlock(tp);
3759                 }
3760         }
3761
3762         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3763
3764         return 0;
3765 }
3766
3767 static void tg3_power_down(struct tg3 *tp)
3768 {
3769         tg3_power_down_prepare(tp);
3770
3771         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3772         pci_set_power_state(tp->pdev, PCI_D3hot);
3773 }
3774
3775 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3776 {
3777         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3778         case MII_TG3_AUX_STAT_10HALF:
3779                 *speed = SPEED_10;
3780                 *duplex = DUPLEX_HALF;
3781                 break;
3782
3783         case MII_TG3_AUX_STAT_10FULL:
3784                 *speed = SPEED_10;
3785                 *duplex = DUPLEX_FULL;
3786                 break;
3787
3788         case MII_TG3_AUX_STAT_100HALF:
3789                 *speed = SPEED_100;
3790                 *duplex = DUPLEX_HALF;
3791                 break;
3792
3793         case MII_TG3_AUX_STAT_100FULL:
3794                 *speed = SPEED_100;
3795                 *duplex = DUPLEX_FULL;
3796                 break;
3797
3798         case MII_TG3_AUX_STAT_1000HALF:
3799                 *speed = SPEED_1000;
3800                 *duplex = DUPLEX_HALF;
3801                 break;
3802
3803         case MII_TG3_AUX_STAT_1000FULL:
3804                 *speed = SPEED_1000;
3805                 *duplex = DUPLEX_FULL;
3806                 break;
3807
3808         default:
3809                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3810                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3811                                  SPEED_10;
3812                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3813                                   DUPLEX_HALF;
3814                         break;
3815                 }
3816                 *speed = SPEED_UNKNOWN;
3817                 *duplex = DUPLEX_UNKNOWN;
3818                 break;
3819         }
3820 }
3821
3822 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3823 {
3824         int err = 0;
3825         u32 val, new_adv;
3826
3827         new_adv = ADVERTISE_CSMA;
3828         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3829         new_adv |= mii_advertise_flowctrl(flowctrl);
3830
3831         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3832         if (err)
3833                 goto done;
3834
3835         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3836                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3837
3838                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3839                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3840                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3841
3842                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3843                 if (err)
3844                         goto done;
3845         }
3846
3847         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3848                 goto done;
3849
3850         tw32(TG3_CPMU_EEE_MODE,
3851              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3852
3853         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3854         if (!err) {
3855                 u32 err2;
3856
3857                 val = 0;
3858                 /* Advertise 100-BaseTX EEE ability */
3859                 if (advertise & ADVERTISED_100baseT_Full)
3860                         val |= MDIO_AN_EEE_ADV_100TX;
3861                 /* Advertise 1000-BaseT EEE ability */
3862                 if (advertise & ADVERTISED_1000baseT_Full)
3863                         val |= MDIO_AN_EEE_ADV_1000T;
3864                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3865                 if (err)
3866                         val = 0;
3867
3868                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3869                 case ASIC_REV_5717:
3870                 case ASIC_REV_57765:
3871                 case ASIC_REV_57766:
3872                 case ASIC_REV_5719:
3873                         /* If we advertised any eee advertisements above... */
3874                         if (val)
3875                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3876                                       MII_TG3_DSP_TAP26_RMRXSTO |
3877                                       MII_TG3_DSP_TAP26_OPCSINPT;
3878                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3879                         /* Fall through */
3880                 case ASIC_REV_5720:
3881                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3882                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3883                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3884                 }
3885
3886                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3887                 if (!err)
3888                         err = err2;
3889         }
3890
3891 done:
3892         return err;
3893 }
3894
3895 static void tg3_phy_copper_begin(struct tg3 *tp)
3896 {
3897         u32 new_adv;
3898         int i;
3899
3900         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3901                 new_adv = ADVERTISED_10baseT_Half |
3902                           ADVERTISED_10baseT_Full;
3903                 if (tg3_flag(tp, WOL_SPEED_100MB))
3904                         new_adv |= ADVERTISED_100baseT_Half |
3905                                    ADVERTISED_100baseT_Full;
3906
3907                 tg3_phy_autoneg_cfg(tp, new_adv,
3908                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3909         } else if (tp->link_config.speed == SPEED_UNKNOWN) {
3910                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3911                         tp->link_config.advertising &=
3912                                 ~(ADVERTISED_1000baseT_Half |
3913                                   ADVERTISED_1000baseT_Full);
3914
3915                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3916                                     tp->link_config.flowctrl);
3917         } else {
3918                 /* Asking for a specific link mode. */
3919                 if (tp->link_config.speed == SPEED_1000) {
3920                         if (tp->link_config.duplex == DUPLEX_FULL)
3921                                 new_adv = ADVERTISED_1000baseT_Full;
3922                         else
3923                                 new_adv = ADVERTISED_1000baseT_Half;
3924                 } else if (tp->link_config.speed == SPEED_100) {
3925                         if (tp->link_config.duplex == DUPLEX_FULL)
3926                                 new_adv = ADVERTISED_100baseT_Full;
3927                         else
3928                                 new_adv = ADVERTISED_100baseT_Half;
3929                 } else {
3930                         if (tp->link_config.duplex == DUPLEX_FULL)
3931                                 new_adv = ADVERTISED_10baseT_Full;
3932                         else
3933                                 new_adv = ADVERTISED_10baseT_Half;
3934                 }
3935
3936                 tg3_phy_autoneg_cfg(tp, new_adv,
3937                                     tp->link_config.flowctrl);
3938         }
3939
3940         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3941             tp->link_config.speed != SPEED_UNKNOWN) {
3942                 u32 bmcr, orig_bmcr;
3943
3944                 tp->link_config.active_speed = tp->link_config.speed;
3945                 tp->link_config.active_duplex = tp->link_config.duplex;
3946
3947                 bmcr = 0;
3948                 switch (tp->link_config.speed) {
3949                 default:
3950                 case SPEED_10:
3951                         break;
3952
3953                 case SPEED_100:
3954                         bmcr |= BMCR_SPEED100;
3955                         break;
3956
3957                 case SPEED_1000:
3958                         bmcr |= BMCR_SPEED1000;
3959                         break;
3960                 }
3961
3962                 if (tp->link_config.duplex == DUPLEX_FULL)
3963                         bmcr |= BMCR_FULLDPLX;
3964
3965                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3966                     (bmcr != orig_bmcr)) {
3967                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3968                         for (i = 0; i < 1500; i++) {
3969                                 u32 tmp;
3970
3971                                 udelay(10);
3972                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3973                                     tg3_readphy(tp, MII_BMSR, &tmp))
3974                                         continue;
3975                                 if (!(tmp & BMSR_LSTATUS)) {
3976                                         udelay(40);
3977                                         break;
3978                                 }
3979                         }
3980                         tg3_writephy(tp, MII_BMCR, bmcr);
3981                         udelay(40);
3982                 }
3983         } else {
3984                 tg3_writephy(tp, MII_BMCR,
3985                              BMCR_ANENABLE | BMCR_ANRESTART);
3986         }
3987 }
3988
3989 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3990 {
3991         int err;
3992
3993         /* Turn off tap power management. */
3994         /* Set Extended packet length bit */
3995         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3996
3997         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3998         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3999         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4000         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4001         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4002
4003         udelay(40);
4004
4005         return err;
4006 }
4007
4008 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4009 {
4010         u32 advmsk, tgtadv, advertising;
4011
4012         advertising = tp->link_config.advertising;
4013         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4014
4015         advmsk = ADVERTISE_ALL;
4016         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4017                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4018                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4019         }
4020
4021         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4022                 return false;
4023
4024         if ((*lcladv & advmsk) != tgtadv)
4025                 return false;
4026
4027         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4028                 u32 tg3_ctrl;
4029
4030                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4031
4032                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4033                         return false;
4034
4035                 if (tgtadv &&
4036                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4037                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4038                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4039                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4040                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4041                 } else {
4042                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4043                 }
4044
4045                 if (tg3_ctrl != tgtadv)
4046                         return false;
4047         }
4048
4049         return true;
4050 }
4051
4052 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4053 {
4054         u32 lpeth = 0;
4055
4056         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4057                 u32 val;
4058
4059                 if (tg3_readphy(tp, MII_STAT1000, &val))
4060                         return false;
4061
4062                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4063         }
4064
4065         if (tg3_readphy(tp, MII_LPA, rmtadv))
4066                 return false;
4067
4068         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4069         tp->link_config.rmt_adv = lpeth;
4070
4071         return true;
4072 }
4073
4074 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4075 {
4076         int current_link_up;
4077         u32 bmsr, val;
4078         u32 lcl_adv, rmt_adv;
4079         u16 current_speed;
4080         u8 current_duplex;
4081         int i, err;
4082
4083         tw32(MAC_EVENT, 0);
4084
4085         tw32_f(MAC_STATUS,
4086              (MAC_STATUS_SYNC_CHANGED |
4087               MAC_STATUS_CFG_CHANGED |
4088               MAC_STATUS_MI_COMPLETION |
4089               MAC_STATUS_LNKSTATE_CHANGED));
4090         udelay(40);
4091
4092         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4093                 tw32_f(MAC_MI_MODE,
4094                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4095                 udelay(80);
4096         }
4097
4098         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4099
4100         /* Some third-party PHYs need to be reset on link going
4101          * down.
4102          */
4103         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4104              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4105              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4106             netif_carrier_ok(tp->dev)) {
4107                 tg3_readphy(tp, MII_BMSR, &bmsr);
4108                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4109                     !(bmsr & BMSR_LSTATUS))
4110                         force_reset = 1;
4111         }
4112         if (force_reset)
4113                 tg3_phy_reset(tp);
4114
4115         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4116                 tg3_readphy(tp, MII_BMSR, &bmsr);
4117                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4118                     !tg3_flag(tp, INIT_COMPLETE))
4119                         bmsr = 0;
4120
4121                 if (!(bmsr & BMSR_LSTATUS)) {
4122                         err = tg3_init_5401phy_dsp(tp);
4123                         if (err)
4124                                 return err;
4125
4126                         tg3_readphy(tp, MII_BMSR, &bmsr);
4127                         for (i = 0; i < 1000; i++) {
4128                                 udelay(10);
4129                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4130                                     (bmsr & BMSR_LSTATUS)) {
4131                                         udelay(40);
4132                                         break;
4133                                 }
4134                         }
4135
4136                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4137                             TG3_PHY_REV_BCM5401_B0 &&
4138                             !(bmsr & BMSR_LSTATUS) &&
4139                             tp->link_config.active_speed == SPEED_1000) {
4140                                 err = tg3_phy_reset(tp);
4141                                 if (!err)
4142                                         err = tg3_init_5401phy_dsp(tp);
4143                                 if (err)
4144                                         return err;
4145                         }
4146                 }
4147         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4148                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4149                 /* 5701 {A0,B0} CRC bug workaround */
4150                 tg3_writephy(tp, 0x15, 0x0a75);
4151                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4152                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4153                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4154         }
4155
4156         /* Clear pending interrupts... */
4157         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4158         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4159
4160         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4161                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4162         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4163                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4164
4165         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4166             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4167                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4168                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4169                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4170                 else
4171                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4172         }
4173
4174         current_link_up = 0;
4175         current_speed = SPEED_UNKNOWN;
4176         current_duplex = DUPLEX_UNKNOWN;
4177         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4178         tp->link_config.rmt_adv = 0;
4179
4180         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4181                 err = tg3_phy_auxctl_read(tp,
4182                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4183                                           &val);
4184                 if (!err && !(val & (1 << 10))) {
4185                         tg3_phy_auxctl_write(tp,
4186                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4187                                              val | (1 << 10));
4188                         goto relink;
4189                 }
4190         }
4191
4192         bmsr = 0;
4193         for (i = 0; i < 100; i++) {
4194                 tg3_readphy(tp, MII_BMSR, &bmsr);
4195                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4196                     (bmsr & BMSR_LSTATUS))
4197                         break;
4198                 udelay(40);
4199         }
4200
4201         if (bmsr & BMSR_LSTATUS) {
4202                 u32 aux_stat, bmcr;
4203
4204                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4205                 for (i = 0; i < 2000; i++) {
4206                         udelay(10);
4207                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4208                             aux_stat)
4209                                 break;
4210                 }
4211
4212                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4213                                              &current_speed,
4214                                              &current_duplex);
4215
4216                 bmcr = 0;
4217                 for (i = 0; i < 200; i++) {
4218                         tg3_readphy(tp, MII_BMCR, &bmcr);
4219                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4220                                 continue;
4221                         if (bmcr && bmcr != 0x7fff)
4222                                 break;
4223                         udelay(10);
4224                 }
4225
4226                 lcl_adv = 0;
4227                 rmt_adv = 0;
4228
4229                 tp->link_config.active_speed = current_speed;
4230                 tp->link_config.active_duplex = current_duplex;
4231
4232                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4233                         if ((bmcr & BMCR_ANENABLE) &&
4234                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4235                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4236                                 current_link_up = 1;
4237                 } else {
4238                         if (!(bmcr & BMCR_ANENABLE) &&
4239                             tp->link_config.speed == current_speed &&
4240                             tp->link_config.duplex == current_duplex &&
4241                             tp->link_config.flowctrl ==
4242                             tp->link_config.active_flowctrl) {
4243                                 current_link_up = 1;
4244                         }
4245                 }
4246
4247                 if (current_link_up == 1 &&
4248                     tp->link_config.active_duplex == DUPLEX_FULL) {
4249                         u32 reg, bit;
4250
4251                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4252                                 reg = MII_TG3_FET_GEN_STAT;
4253                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4254                         } else {
4255                                 reg = MII_TG3_EXT_STAT;
4256                                 bit = MII_TG3_EXT_STAT_MDIX;
4257                         }
4258
4259                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4260                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4261
4262                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4263                 }
4264         }
4265
4266 relink:
4267         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4268                 tg3_phy_copper_begin(tp);
4269
4270                 tg3_readphy(tp, MII_BMSR, &bmsr);
4271                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4272                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4273                         current_link_up = 1;
4274         }
4275
4276         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4277         if (current_link_up == 1) {
4278                 if (tp->link_config.active_speed == SPEED_100 ||
4279                     tp->link_config.active_speed == SPEED_10)
4280                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4281                 else
4282                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4283         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4284                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4285         else
4286                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4287
4288         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4289         if (tp->link_config.active_duplex == DUPLEX_HALF)
4290                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4291
4292         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4293                 if (current_link_up == 1 &&
4294                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4295                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4296                 else
4297                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4298         }
4299
4300         /* ??? Without this setting Netgear GA302T PHY does not
4301          * ??? send/receive packets...
4302          */
4303         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4304             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4305                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4306                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4307                 udelay(80);
4308         }
4309
4310         tw32_f(MAC_MODE, tp->mac_mode);
4311         udelay(40);
4312
4313         tg3_phy_eee_adjust(tp, current_link_up);
4314
4315         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4316                 /* Polled via timer. */
4317                 tw32_f(MAC_EVENT, 0);
4318         } else {
4319                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4320         }
4321         udelay(40);
4322
4323         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4324             current_link_up == 1 &&
4325             tp->link_config.active_speed == SPEED_1000 &&
4326             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4327                 udelay(120);
4328                 tw32_f(MAC_STATUS,
4329                      (MAC_STATUS_SYNC_CHANGED |
4330                       MAC_STATUS_CFG_CHANGED));
4331                 udelay(40);
4332                 tg3_write_mem(tp,
4333                               NIC_SRAM_FIRMWARE_MBOX,
4334                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4335         }
4336
4337         /* Prevent send BD corruption. */
4338         if (tg3_flag(tp, CLKREQ_BUG)) {
4339                 u16 oldlnkctl, newlnkctl;
4340
4341                 pci_read_config_word(tp->pdev,
4342                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4343                                      &oldlnkctl);
4344                 if (tp->link_config.active_speed == SPEED_100 ||
4345                     tp->link_config.active_speed == SPEED_10)
4346                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4347                 else
4348                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4349                 if (newlnkctl != oldlnkctl)
4350                         pci_write_config_word(tp->pdev,
4351                                               pci_pcie_cap(tp->pdev) +
4352                                               PCI_EXP_LNKCTL, newlnkctl);
4353         }
4354
4355         if (current_link_up != netif_carrier_ok(tp->dev)) {
4356                 if (current_link_up)
4357                         netif_carrier_on(tp->dev);
4358                 else
4359                         netif_carrier_off(tp->dev);
4360                 tg3_link_report(tp);
4361         }
4362
4363         return 0;
4364 }
4365
4366 struct tg3_fiber_aneginfo {
4367         int state;
4368 #define ANEG_STATE_UNKNOWN              0
4369 #define ANEG_STATE_AN_ENABLE            1
4370 #define ANEG_STATE_RESTART_INIT         2
4371 #define ANEG_STATE_RESTART              3
4372 #define ANEG_STATE_DISABLE_LINK_OK      4
4373 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4374 #define ANEG_STATE_ABILITY_DETECT       6
4375 #define ANEG_STATE_ACK_DETECT_INIT      7
4376 #define ANEG_STATE_ACK_DETECT           8
4377 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4378 #define ANEG_STATE_COMPLETE_ACK         10
4379 #define ANEG_STATE_IDLE_DETECT_INIT     11
4380 #define ANEG_STATE_IDLE_DETECT          12
4381 #define ANEG_STATE_LINK_OK              13
4382 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4383 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4384
4385         u32 flags;
4386 #define MR_AN_ENABLE            0x00000001
4387 #define MR_RESTART_AN           0x00000002
4388 #define MR_AN_COMPLETE          0x00000004
4389 #define MR_PAGE_RX              0x00000008
4390 #define MR_NP_LOADED            0x00000010
4391 #define MR_TOGGLE_TX            0x00000020
4392 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4393 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4394 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4395 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4396 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4397 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4398 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4399 #define MR_TOGGLE_RX            0x00002000
4400 #define MR_NP_RX                0x00004000
4401
4402 #define MR_LINK_OK              0x80000000
4403
4404         unsigned long link_time, cur_time;
4405
4406         u32 ability_match_cfg;
4407         int ability_match_count;
4408
4409         char ability_match, idle_match, ack_match;
4410
4411         u32 txconfig, rxconfig;
4412 #define ANEG_CFG_NP             0x00000080
4413 #define ANEG_CFG_ACK            0x00000040
4414 #define ANEG_CFG_RF2            0x00000020
4415 #define ANEG_CFG_RF1            0x00000010
4416 #define ANEG_CFG_PS2            0x00000001
4417 #define ANEG_CFG_PS1            0x00008000
4418 #define ANEG_CFG_HD             0x00004000
4419 #define ANEG_CFG_FD             0x00002000
4420 #define ANEG_CFG_INVAL          0x00001f06
4421
4422 };
4423 #define ANEG_OK         0
4424 #define ANEG_DONE       1
4425 #define ANEG_TIMER_ENAB 2
4426 #define ANEG_FAILED     -1
4427
4428 #define ANEG_STATE_SETTLE_TIME  10000
4429
4430 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4431                                    struct tg3_fiber_aneginfo *ap)
4432 {
4433         u16 flowctrl;
4434         unsigned long delta;
4435         u32 rx_cfg_reg;
4436         int ret;
4437
4438         if (ap->state == ANEG_STATE_UNKNOWN) {
4439                 ap->rxconfig = 0;
4440                 ap->link_time = 0;
4441                 ap->cur_time = 0;
4442                 ap->ability_match_cfg = 0;
4443                 ap->ability_match_count = 0;
4444                 ap->ability_match = 0;
4445                 ap->idle_match = 0;
4446                 ap->ack_match = 0;
4447         }
4448         ap->cur_time++;
4449
4450         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4451                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4452
4453                 if (rx_cfg_reg != ap->ability_match_cfg) {
4454                         ap->ability_match_cfg = rx_cfg_reg;
4455                         ap->ability_match = 0;
4456                         ap->ability_match_count = 0;
4457                 } else {
4458                         if (++ap->ability_match_count > 1) {
4459                                 ap->ability_match = 1;
4460                                 ap->ability_match_cfg = rx_cfg_reg;
4461                         }
4462                 }
4463                 if (rx_cfg_reg & ANEG_CFG_ACK)
4464                         ap->ack_match = 1;
4465                 else
4466                         ap->ack_match = 0;
4467
4468                 ap->idle_match = 0;
4469         } else {
4470                 ap->idle_match = 1;
4471                 ap->ability_match_cfg = 0;
4472                 ap->ability_match_count = 0;
4473                 ap->ability_match = 0;
4474                 ap->ack_match = 0;
4475
4476                 rx_cfg_reg = 0;
4477         }
4478
4479         ap->rxconfig = rx_cfg_reg;
4480         ret = ANEG_OK;
4481
4482         switch (ap->state) {
4483         case ANEG_STATE_UNKNOWN:
4484                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4485                         ap->state = ANEG_STATE_AN_ENABLE;
4486
4487                 /* fallthru */
4488         case ANEG_STATE_AN_ENABLE:
4489                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4490                 if (ap->flags & MR_AN_ENABLE) {
4491                         ap->link_time = 0;
4492                         ap->cur_time = 0;
4493                         ap->ability_match_cfg = 0;
4494                         ap->ability_match_count = 0;
4495                         ap->ability_match = 0;
4496                         ap->idle_match = 0;
4497                         ap->ack_match = 0;
4498
4499                         ap->state = ANEG_STATE_RESTART_INIT;
4500                 } else {
4501                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4502                 }
4503                 break;
4504
4505         case ANEG_STATE_RESTART_INIT:
4506                 ap->link_time = ap->cur_time;
4507                 ap->flags &= ~(MR_NP_LOADED);
4508                 ap->txconfig = 0;
4509                 tw32(MAC_TX_AUTO_NEG, 0);
4510                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4511                 tw32_f(MAC_MODE, tp->mac_mode);
4512                 udelay(40);
4513
4514                 ret = ANEG_TIMER_ENAB;
4515                 ap->state = ANEG_STATE_RESTART;
4516
4517                 /* fallthru */
4518         case ANEG_STATE_RESTART:
4519                 delta = ap->cur_time - ap->link_time;
4520                 if (delta > ANEG_STATE_SETTLE_TIME)
4521                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4522                 else
4523                         ret = ANEG_TIMER_ENAB;
4524                 break;
4525
4526         case ANEG_STATE_DISABLE_LINK_OK:
4527                 ret = ANEG_DONE;
4528                 break;
4529
4530         case ANEG_STATE_ABILITY_DETECT_INIT:
4531                 ap->flags &= ~(MR_TOGGLE_TX);
4532                 ap->txconfig = ANEG_CFG_FD;
4533                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4534                 if (flowctrl & ADVERTISE_1000XPAUSE)
4535                         ap->txconfig |= ANEG_CFG_PS1;
4536                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4537                         ap->txconfig |= ANEG_CFG_PS2;
4538                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4539                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4540                 tw32_f(MAC_MODE, tp->mac_mode);
4541                 udelay(40);
4542
4543                 ap->state = ANEG_STATE_ABILITY_DETECT;
4544                 break;
4545
4546         case ANEG_STATE_ABILITY_DETECT:
4547                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4548                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4549                 break;
4550
4551         case ANEG_STATE_ACK_DETECT_INIT:
4552                 ap->txconfig |= ANEG_CFG_ACK;
4553                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4554                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4555                 tw32_f(MAC_MODE, tp->mac_mode);
4556                 udelay(40);
4557
4558                 ap->state = ANEG_STATE_ACK_DETECT;
4559
4560                 /* fallthru */
4561         case ANEG_STATE_ACK_DETECT:
4562                 if (ap->ack_match != 0) {
4563                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4564                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4565                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4566                         } else {
4567                                 ap->state = ANEG_STATE_AN_ENABLE;
4568                         }
4569                 } else if (ap->ability_match != 0 &&
4570                            ap->rxconfig == 0) {
4571                         ap->state = ANEG_STATE_AN_ENABLE;
4572                 }
4573                 break;
4574
4575         case ANEG_STATE_COMPLETE_ACK_INIT:
4576                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4577                         ret = ANEG_FAILED;
4578                         break;
4579                 }
4580                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4581                                MR_LP_ADV_HALF_DUPLEX |
4582                                MR_LP_ADV_SYM_PAUSE |
4583                                MR_LP_ADV_ASYM_PAUSE |
4584                                MR_LP_ADV_REMOTE_FAULT1 |
4585                                MR_LP_ADV_REMOTE_FAULT2 |
4586                                MR_LP_ADV_NEXT_PAGE |
4587                                MR_TOGGLE_RX |
4588                                MR_NP_RX);
4589                 if (ap->rxconfig & ANEG_CFG_FD)
4590                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4591                 if (ap->rxconfig & ANEG_CFG_HD)
4592                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4593                 if (ap->rxconfig & ANEG_CFG_PS1)
4594                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4595                 if (ap->rxconfig & ANEG_CFG_PS2)
4596                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4597                 if (ap->rxconfig & ANEG_CFG_RF1)
4598                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4599                 if (ap->rxconfig & ANEG_CFG_RF2)
4600                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4601                 if (ap->rxconfig & ANEG_CFG_NP)
4602                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4603
4604                 ap->link_time = ap->cur_time;
4605
4606                 ap->flags ^= (MR_TOGGLE_TX);
4607                 if (ap->rxconfig & 0x0008)
4608                         ap->flags |= MR_TOGGLE_RX;
4609                 if (ap->rxconfig & ANEG_CFG_NP)
4610                         ap->flags |= MR_NP_RX;
4611                 ap->flags |= MR_PAGE_RX;
4612
4613                 ap->state = ANEG_STATE_COMPLETE_ACK;
4614                 ret = ANEG_TIMER_ENAB;
4615                 break;
4616
4617         case ANEG_STATE_COMPLETE_ACK:
4618                 if (ap->ability_match != 0 &&
4619                     ap->rxconfig == 0) {
4620                         ap->state = ANEG_STATE_AN_ENABLE;
4621                         break;
4622                 }
4623                 delta = ap->cur_time - ap->link_time;
4624                 if (delta > ANEG_STATE_SETTLE_TIME) {
4625                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4626                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4627                         } else {
4628                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4629                                     !(ap->flags & MR_NP_RX)) {
4630                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4631                                 } else {
4632                                         ret = ANEG_FAILED;
4633                                 }
4634                         }
4635                 }
4636                 break;
4637
4638         case ANEG_STATE_IDLE_DETECT_INIT:
4639                 ap->link_time = ap->cur_time;
4640                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4641                 tw32_f(MAC_MODE, tp->mac_mode);
4642                 udelay(40);
4643
4644                 ap->state = ANEG_STATE_IDLE_DETECT;
4645                 ret = ANEG_TIMER_ENAB;
4646                 break;
4647
4648         case ANEG_STATE_IDLE_DETECT:
4649                 if (ap->ability_match != 0 &&
4650                     ap->rxconfig == 0) {
4651                         ap->state = ANEG_STATE_AN_ENABLE;
4652                         break;
4653                 }
4654                 delta = ap->cur_time - ap->link_time;
4655                 if (delta > ANEG_STATE_SETTLE_TIME) {
4656                         /* XXX another gem from the Broadcom driver :( */
4657                         ap->state = ANEG_STATE_LINK_OK;
4658                 }
4659                 break;
4660
4661         case ANEG_STATE_LINK_OK:
4662                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4663                 ret = ANEG_DONE;
4664                 break;
4665
4666         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4667                 /* ??? unimplemented */
4668                 break;
4669
4670         case ANEG_STATE_NEXT_PAGE_WAIT:
4671                 /* ??? unimplemented */
4672                 break;
4673
4674         default:
4675                 ret = ANEG_FAILED;
4676                 break;
4677         }
4678
4679         return ret;
4680 }
4681
4682 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4683 {
4684         int res = 0;
4685         struct tg3_fiber_aneginfo aninfo;
4686         int status = ANEG_FAILED;
4687         unsigned int tick;
4688         u32 tmp;
4689
4690         tw32_f(MAC_TX_AUTO_NEG, 0);
4691
4692         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4693         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4694         udelay(40);
4695
4696         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4697         udelay(40);
4698
4699         memset(&aninfo, 0, sizeof(aninfo));
4700         aninfo.flags |= MR_AN_ENABLE;
4701         aninfo.state = ANEG_STATE_UNKNOWN;
4702         aninfo.cur_time = 0;
4703         tick = 0;
4704         while (++tick < 195000) {
4705                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4706                 if (status == ANEG_DONE || status == ANEG_FAILED)
4707                         break;
4708
4709                 udelay(1);
4710         }
4711
4712         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4713         tw32_f(MAC_MODE, tp->mac_mode);
4714         udelay(40);
4715
4716         *txflags = aninfo.txconfig;
4717         *rxflags = aninfo.flags;
4718
4719         if (status == ANEG_DONE &&
4720             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4721                              MR_LP_ADV_FULL_DUPLEX)))
4722                 res = 1;
4723
4724         return res;
4725 }
4726
4727 static void tg3_init_bcm8002(struct tg3 *tp)
4728 {
4729         u32 mac_status = tr32(MAC_STATUS);
4730         int i;
4731
4732         /* Reset when initting first time or we have a link. */
4733         if (tg3_flag(tp, INIT_COMPLETE) &&
4734             !(mac_status & MAC_STATUS_PCS_SYNCED))
4735                 return;
4736
4737         /* Set PLL lock range. */
4738         tg3_writephy(tp, 0x16, 0x8007);
4739
4740         /* SW reset */
4741         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4742
4743         /* Wait for reset to complete. */
4744         /* XXX schedule_timeout() ... */
4745         for (i = 0; i < 500; i++)
4746                 udelay(10);
4747
4748         /* Config mode; select PMA/Ch 1 regs. */
4749         tg3_writephy(tp, 0x10, 0x8411);
4750
4751         /* Enable auto-lock and comdet, select txclk for tx. */
4752         tg3_writephy(tp, 0x11, 0x0a10);
4753
4754         tg3_writephy(tp, 0x18, 0x00a0);
4755         tg3_writephy(tp, 0x16, 0x41ff);
4756
4757         /* Assert and deassert POR. */
4758         tg3_writephy(tp, 0x13, 0x0400);
4759         udelay(40);
4760         tg3_writephy(tp, 0x13, 0x0000);
4761
4762         tg3_writephy(tp, 0x11, 0x0a50);
4763         udelay(40);
4764         tg3_writephy(tp, 0x11, 0x0a10);
4765
4766         /* Wait for signal to stabilize */
4767         /* XXX schedule_timeout() ... */
4768         for (i = 0; i < 15000; i++)
4769                 udelay(10);
4770
4771         /* Deselect the channel register so we can read the PHYID
4772          * later.
4773          */
4774         tg3_writephy(tp, 0x10, 0x8011);
4775 }
4776
4777 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4778 {
4779         u16 flowctrl;
4780         u32 sg_dig_ctrl, sg_dig_status;
4781         u32 serdes_cfg, expected_sg_dig_ctrl;
4782         int workaround, port_a;
4783         int current_link_up;
4784
4785         serdes_cfg = 0;
4786         expected_sg_dig_ctrl = 0;
4787         workaround = 0;
4788         port_a = 1;
4789         current_link_up = 0;
4790
4791         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4792             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4793                 workaround = 1;
4794                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4795                         port_a = 0;
4796
4797                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4798                 /* preserve bits 20-23 for voltage regulator */
4799                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4800         }
4801
4802         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4803
4804         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4805                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4806                         if (workaround) {
4807                                 u32 val = serdes_cfg;
4808
4809                                 if (port_a)
4810                                         val |= 0xc010000;
4811                                 else
4812                                         val |= 0x4010000;
4813                                 tw32_f(MAC_SERDES_CFG, val);
4814                         }
4815
4816                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4817                 }
4818                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4819                         tg3_setup_flow_control(tp, 0, 0);
4820                         current_link_up = 1;
4821                 }
4822                 goto out;
4823         }
4824
4825         /* Want auto-negotiation.  */
4826         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4827
4828         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4829         if (flowctrl & ADVERTISE_1000XPAUSE)
4830                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4831         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4832                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4833
4834         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4835                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4836                     tp->serdes_counter &&
4837                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4838                                     MAC_STATUS_RCVD_CFG)) ==
4839                      MAC_STATUS_PCS_SYNCED)) {
4840                         tp->serdes_counter--;
4841                         current_link_up = 1;
4842                         goto out;
4843                 }
4844 restart_autoneg:
4845                 if (workaround)
4846                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4847                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4848                 udelay(5);
4849                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4850
4851                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4852                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4853         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4854                                  MAC_STATUS_SIGNAL_DET)) {
4855                 sg_dig_status = tr32(SG_DIG_STATUS);
4856                 mac_status = tr32(MAC_STATUS);
4857
4858                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4859                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4860                         u32 local_adv = 0, remote_adv = 0;
4861
4862                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4863                                 local_adv |= ADVERTISE_1000XPAUSE;
4864                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4865                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4866
4867                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4868                                 remote_adv |= LPA_1000XPAUSE;
4869                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4870                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4871
4872                         tp->link_config.rmt_adv =
4873                                            mii_adv_to_ethtool_adv_x(remote_adv);
4874
4875                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4876                         current_link_up = 1;
4877                         tp->serdes_counter = 0;
4878                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4879                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4880                         if (tp->serdes_counter)
4881                                 tp->serdes_counter--;
4882                         else {
4883                                 if (workaround) {
4884                                         u32 val = serdes_cfg;
4885
4886                                         if (port_a)
4887                                                 val |= 0xc010000;
4888                                         else
4889                                                 val |= 0x4010000;
4890
4891                                         tw32_f(MAC_SERDES_CFG, val);
4892                                 }
4893
4894                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4895                                 udelay(40);
4896
4897                                 /* Link parallel detection - link is up */
4898                                 /* only if we have PCS_SYNC and not */
4899                                 /* receiving config code words */
4900                                 mac_status = tr32(MAC_STATUS);
4901                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4902                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4903                                         tg3_setup_flow_control(tp, 0, 0);
4904                                         current_link_up = 1;
4905                                         tp->phy_flags |=
4906                                                 TG3_PHYFLG_PARALLEL_DETECT;
4907                                         tp->serdes_counter =
4908                                                 SERDES_PARALLEL_DET_TIMEOUT;
4909                                 } else
4910                                         goto restart_autoneg;
4911                         }
4912                 }
4913         } else {
4914                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4915                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4916         }
4917
4918 out:
4919         return current_link_up;
4920 }
4921
4922 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4923 {
4924         int current_link_up = 0;
4925
4926         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4927                 goto out;
4928
4929         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4930                 u32 txflags, rxflags;
4931                 int i;
4932
4933                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4934                         u32 local_adv = 0, remote_adv = 0;
4935
4936                         if (txflags & ANEG_CFG_PS1)
4937                                 local_adv |= ADVERTISE_1000XPAUSE;
4938                         if (txflags & ANEG_CFG_PS2)
4939                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4940
4941                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4942                                 remote_adv |= LPA_1000XPAUSE;
4943                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4944                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4945
4946                         tp->link_config.rmt_adv =
4947                                            mii_adv_to_ethtool_adv_x(remote_adv);
4948
4949                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4950
4951                         current_link_up = 1;
4952                 }
4953                 for (i = 0; i < 30; i++) {
4954                         udelay(20);
4955                         tw32_f(MAC_STATUS,
4956                                (MAC_STATUS_SYNC_CHANGED |
4957                                 MAC_STATUS_CFG_CHANGED));
4958                         udelay(40);
4959                         if ((tr32(MAC_STATUS) &
4960                              (MAC_STATUS_SYNC_CHANGED |
4961                               MAC_STATUS_CFG_CHANGED)) == 0)
4962                                 break;
4963                 }
4964
4965                 mac_status = tr32(MAC_STATUS);
4966                 if (current_link_up == 0 &&
4967                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4968                     !(mac_status & MAC_STATUS_RCVD_CFG))
4969                         current_link_up = 1;
4970         } else {
4971                 tg3_setup_flow_control(tp, 0, 0);
4972
4973                 /* Forcing 1000FD link up. */
4974                 current_link_up = 1;
4975
4976                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4977                 udelay(40);
4978
4979                 tw32_f(MAC_MODE, tp->mac_mode);
4980                 udelay(40);
4981         }
4982
4983 out:
4984         return current_link_up;
4985 }
4986
4987 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4988 {
4989         u32 orig_pause_cfg;
4990         u16 orig_active_speed;
4991         u8 orig_active_duplex;
4992         u32 mac_status;
4993         int current_link_up;
4994         int i;
4995
4996         orig_pause_cfg = tp->link_config.active_flowctrl;
4997         orig_active_speed = tp->link_config.active_speed;
4998         orig_active_duplex = tp->link_config.active_duplex;
4999
5000         if (!tg3_flag(tp, HW_AUTONEG) &&
5001             netif_carrier_ok(tp->dev) &&
5002             tg3_flag(tp, INIT_COMPLETE)) {
5003                 mac_status = tr32(MAC_STATUS);
5004                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5005                                MAC_STATUS_SIGNAL_DET |
5006                                MAC_STATUS_CFG_CHANGED |
5007                                MAC_STATUS_RCVD_CFG);
5008                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5009                                    MAC_STATUS_SIGNAL_DET)) {
5010                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5011                                             MAC_STATUS_CFG_CHANGED));
5012                         return 0;
5013                 }
5014         }
5015
5016         tw32_f(MAC_TX_AUTO_NEG, 0);
5017
5018         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5019         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5020         tw32_f(MAC_MODE, tp->mac_mode);
5021         udelay(40);
5022
5023         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5024                 tg3_init_bcm8002(tp);
5025
5026         /* Enable link change event even when serdes polling.  */
5027         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5028         udelay(40);
5029
5030         current_link_up = 0;
5031         tp->link_config.rmt_adv = 0;
5032         mac_status = tr32(MAC_STATUS);
5033
5034         if (tg3_flag(tp, HW_AUTONEG))
5035                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5036         else
5037                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5038
5039         tp->napi[0].hw_status->status =
5040                 (SD_STATUS_UPDATED |
5041                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5042
5043         for (i = 0; i < 100; i++) {
5044                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5045                                     MAC_STATUS_CFG_CHANGED));
5046                 udelay(5);
5047                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5048                                          MAC_STATUS_CFG_CHANGED |
5049                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5050                         break;
5051         }
5052
5053         mac_status = tr32(MAC_STATUS);
5054         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5055                 current_link_up = 0;
5056                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5057                     tp->serdes_counter == 0) {
5058                         tw32_f(MAC_MODE, (tp->mac_mode |
5059                                           MAC_MODE_SEND_CONFIGS));
5060                         udelay(1);
5061                         tw32_f(MAC_MODE, tp->mac_mode);
5062                 }
5063         }
5064
5065         if (current_link_up == 1) {
5066                 tp->link_config.active_speed = SPEED_1000;
5067                 tp->link_config.active_duplex = DUPLEX_FULL;
5068                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5069                                     LED_CTRL_LNKLED_OVERRIDE |
5070                                     LED_CTRL_1000MBPS_ON));
5071         } else {
5072                 tp->link_config.active_speed = SPEED_UNKNOWN;
5073                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5074                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5075                                     LED_CTRL_LNKLED_OVERRIDE |
5076                                     LED_CTRL_TRAFFIC_OVERRIDE));
5077         }
5078
5079         if (current_link_up != netif_carrier_ok(tp->dev)) {
5080                 if (current_link_up)
5081                         netif_carrier_on(tp->dev);
5082                 else
5083                         netif_carrier_off(tp->dev);
5084                 tg3_link_report(tp);
5085         } else {
5086                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5087                 if (orig_pause_cfg != now_pause_cfg ||
5088                     orig_active_speed != tp->link_config.active_speed ||
5089                     orig_active_duplex != tp->link_config.active_duplex)
5090                         tg3_link_report(tp);
5091         }
5092
5093         return 0;
5094 }
5095
5096 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5097 {
5098         int current_link_up, err = 0;
5099         u32 bmsr, bmcr;
5100         u16 current_speed;
5101         u8 current_duplex;
5102         u32 local_adv, remote_adv;
5103
5104         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5105         tw32_f(MAC_MODE, tp->mac_mode);
5106         udelay(40);
5107
5108         tw32(MAC_EVENT, 0);
5109
5110         tw32_f(MAC_STATUS,
5111              (MAC_STATUS_SYNC_CHANGED |
5112               MAC_STATUS_CFG_CHANGED |
5113               MAC_STATUS_MI_COMPLETION |
5114               MAC_STATUS_LNKSTATE_CHANGED));
5115         udelay(40);
5116
5117         if (force_reset)
5118                 tg3_phy_reset(tp);
5119
5120         current_link_up = 0;
5121         current_speed = SPEED_UNKNOWN;
5122         current_duplex = DUPLEX_UNKNOWN;
5123         tp->link_config.rmt_adv = 0;
5124
5125         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5126         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5127         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5128                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5129                         bmsr |= BMSR_LSTATUS;
5130                 else
5131                         bmsr &= ~BMSR_LSTATUS;
5132         }
5133
5134         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5135
5136         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5137             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5138                 /* do nothing, just check for link up at the end */
5139         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5140                 u32 adv, newadv;
5141
5142                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5143                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5144                                  ADVERTISE_1000XPAUSE |
5145                                  ADVERTISE_1000XPSE_ASYM |
5146                                  ADVERTISE_SLCT);
5147
5148                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5149                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5150
5151                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5152                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5153                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5154                         tg3_writephy(tp, MII_BMCR, bmcr);
5155
5156                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5157                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5158                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5159
5160                         return err;
5161                 }
5162         } else {
5163                 u32 new_bmcr;
5164
5165                 bmcr &= ~BMCR_SPEED1000;
5166                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5167
5168                 if (tp->link_config.duplex == DUPLEX_FULL)
5169                         new_bmcr |= BMCR_FULLDPLX;
5170
5171                 if (new_bmcr != bmcr) {
5172                         /* BMCR_SPEED1000 is a reserved bit that needs
5173                          * to be set on write.
5174                          */
5175                         new_bmcr |= BMCR_SPEED1000;
5176
5177                         /* Force a linkdown */
5178                         if (netif_carrier_ok(tp->dev)) {
5179                                 u32 adv;
5180
5181                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5182                                 adv &= ~(ADVERTISE_1000XFULL |
5183                                          ADVERTISE_1000XHALF |
5184                                          ADVERTISE_SLCT);
5185                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5186                                 tg3_writephy(tp, MII_BMCR, bmcr |
5187                                                            BMCR_ANRESTART |
5188                                                            BMCR_ANENABLE);
5189                                 udelay(10);
5190                                 netif_carrier_off(tp->dev);
5191                         }
5192                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5193                         bmcr = new_bmcr;
5194                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5195                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5196                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5197                             ASIC_REV_5714) {
5198                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5199                                         bmsr |= BMSR_LSTATUS;
5200                                 else
5201                                         bmsr &= ~BMSR_LSTATUS;
5202                         }
5203                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5204                 }
5205         }
5206
5207         if (bmsr & BMSR_LSTATUS) {
5208                 current_speed = SPEED_1000;
5209                 current_link_up = 1;
5210                 if (bmcr & BMCR_FULLDPLX)
5211                         current_duplex = DUPLEX_FULL;
5212                 else
5213                         current_duplex = DUPLEX_HALF;
5214
5215                 local_adv = 0;
5216                 remote_adv = 0;
5217
5218                 if (bmcr & BMCR_ANENABLE) {
5219                         u32 common;
5220
5221                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5222                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5223                         common = local_adv & remote_adv;
5224                         if (common & (ADVERTISE_1000XHALF |
5225                                       ADVERTISE_1000XFULL)) {
5226                                 if (common & ADVERTISE_1000XFULL)
5227                                         current_duplex = DUPLEX_FULL;
5228                                 else
5229                                         current_duplex = DUPLEX_HALF;
5230
5231                                 tp->link_config.rmt_adv =
5232                                            mii_adv_to_ethtool_adv_x(remote_adv);
5233                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5234                                 /* Link is up via parallel detect */
5235                         } else {
5236                                 current_link_up = 0;
5237                         }
5238                 }
5239         }
5240
5241         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5242                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5243
5244         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5245         if (tp->link_config.active_duplex == DUPLEX_HALF)
5246                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5247
5248         tw32_f(MAC_MODE, tp->mac_mode);
5249         udelay(40);
5250
5251         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5252
5253         tp->link_config.active_speed = current_speed;
5254         tp->link_config.active_duplex = current_duplex;
5255
5256         if (current_link_up != netif_carrier_ok(tp->dev)) {
5257                 if (current_link_up)
5258                         netif_carrier_on(tp->dev);
5259                 else {
5260                         netif_carrier_off(tp->dev);
5261                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5262                 }
5263                 tg3_link_report(tp);
5264         }
5265         return err;
5266 }
5267
5268 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5269 {
5270         if (tp->serdes_counter) {
5271                 /* Give autoneg time to complete. */
5272                 tp->serdes_counter--;
5273                 return;
5274         }
5275
5276         if (!netif_carrier_ok(tp->dev) &&
5277             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5278                 u32 bmcr;
5279
5280                 tg3_readphy(tp, MII_BMCR, &bmcr);
5281                 if (bmcr & BMCR_ANENABLE) {
5282                         u32 phy1, phy2;
5283
5284                         /* Select shadow register 0x1f */
5285                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5286                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5287
5288                         /* Select expansion interrupt status register */
5289                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5290                                          MII_TG3_DSP_EXP1_INT_STAT);
5291                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5292                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5293
5294                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5295                                 /* We have signal detect and not receiving
5296                                  * config code words, link is up by parallel
5297                                  * detection.
5298                                  */
5299
5300                                 bmcr &= ~BMCR_ANENABLE;
5301                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5302                                 tg3_writephy(tp, MII_BMCR, bmcr);
5303                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5304                         }
5305                 }
5306         } else if (netif_carrier_ok(tp->dev) &&
5307                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5308                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5309                 u32 phy2;
5310
5311                 /* Select expansion interrupt status register */
5312                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5313                                  MII_TG3_DSP_EXP1_INT_STAT);
5314                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5315                 if (phy2 & 0x20) {
5316                         u32 bmcr;
5317
5318                         /* Config code words received, turn on autoneg. */
5319                         tg3_readphy(tp, MII_BMCR, &bmcr);
5320                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5321
5322                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5323
5324                 }
5325         }
5326 }
5327
5328 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5329 {
5330         u32 val;
5331         int err;
5332
5333         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5334                 err = tg3_setup_fiber_phy(tp, force_reset);
5335         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5336                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5337         else
5338                 err = tg3_setup_copper_phy(tp, force_reset);
5339
5340         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5341                 u32 scale;
5342
5343                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5344                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5345                         scale = 65;
5346                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5347                         scale = 6;
5348                 else
5349                         scale = 12;
5350
5351                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5352                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5353                 tw32(GRC_MISC_CFG, val);
5354         }
5355
5356         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5357               (6 << TX_LENGTHS_IPG_SHIFT);
5358         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5359                 val |= tr32(MAC_TX_LENGTHS) &
5360                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5361                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5362
5363         if (tp->link_config.active_speed == SPEED_1000 &&
5364             tp->link_config.active_duplex == DUPLEX_HALF)
5365                 tw32(MAC_TX_LENGTHS, val |
5366                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5367         else
5368                 tw32(MAC_TX_LENGTHS, val |
5369                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5370
5371         if (!tg3_flag(tp, 5705_PLUS)) {
5372                 if (netif_carrier_ok(tp->dev)) {
5373                         tw32(HOSTCC_STAT_COAL_TICKS,
5374                              tp->coal.stats_block_coalesce_usecs);
5375                 } else {
5376                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5377                 }
5378         }
5379
5380         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5381                 val = tr32(PCIE_PWR_MGMT_THRESH);
5382                 if (!netif_carrier_ok(tp->dev))
5383                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5384                               tp->pwrmgmt_thresh;
5385                 else
5386                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5387                 tw32(PCIE_PWR_MGMT_THRESH, val);
5388         }
5389
5390         return err;
5391 }
5392
5393 static inline int tg3_irq_sync(struct tg3 *tp)
5394 {
5395         return tp->irq_sync;
5396 }
5397
5398 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5399 {
5400         int i;
5401
5402         dst = (u32 *)((u8 *)dst + off);
5403         for (i = 0; i < len; i += sizeof(u32))
5404                 *dst++ = tr32(off + i);
5405 }
5406
5407 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5408 {
5409         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5410         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5411         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5412         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5413         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5414         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5415         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5416         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5417         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5418         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5419         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5420         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5421         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5422         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5423         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5424         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5425         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5426         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5427         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5428
5429         if (tg3_flag(tp, SUPPORT_MSIX))
5430                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5431
5432         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5433         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5434         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5435         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5436         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5437         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5438         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5439         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5440
5441         if (!tg3_flag(tp, 5705_PLUS)) {
5442                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5443                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5444                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5445         }
5446
5447         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5448         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5449         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5450         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5451         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5452
5453         if (tg3_flag(tp, NVRAM))
5454                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5455 }
5456
5457 static void tg3_dump_state(struct tg3 *tp)
5458 {
5459         int i;
5460         u32 *regs;
5461
5462         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5463         if (!regs) {
5464                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5465                 return;
5466         }
5467
5468         if (tg3_flag(tp, PCI_EXPRESS)) {
5469                 /* Read up to but not including private PCI registers */
5470                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5471                         regs[i / sizeof(u32)] = tr32(i);
5472         } else
5473                 tg3_dump_legacy_regs(tp, regs);
5474
5475         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5476                 if (!regs[i + 0] && !regs[i + 1] &&
5477                     !regs[i + 2] && !regs[i + 3])
5478                         continue;
5479
5480                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5481                            i * 4,
5482                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5483         }
5484
5485         kfree(regs);
5486
5487         for (i = 0; i < tp->irq_cnt; i++) {
5488                 struct tg3_napi *tnapi = &tp->napi[i];
5489
5490                 /* SW status block */
5491                 netdev_err(tp->dev,
5492                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5493                            i,
5494                            tnapi->hw_status->status,
5495                            tnapi->hw_status->status_tag,
5496                            tnapi->hw_status->rx_jumbo_consumer,
5497                            tnapi->hw_status->rx_consumer,
5498                            tnapi->hw_status->rx_mini_consumer,
5499                            tnapi->hw_status->idx[0].rx_producer,
5500                            tnapi->hw_status->idx[0].tx_consumer);
5501
5502                 netdev_err(tp->dev,
5503                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5504                            i,
5505                            tnapi->last_tag, tnapi->last_irq_tag,
5506                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5507                            tnapi->rx_rcb_ptr,
5508                            tnapi->prodring.rx_std_prod_idx,
5509                            tnapi->prodring.rx_std_cons_idx,
5510                            tnapi->prodring.rx_jmb_prod_idx,
5511                            tnapi->prodring.rx_jmb_cons_idx);
5512         }
5513 }
5514
5515 /* This is called whenever we suspect that the system chipset is re-
5516  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5517  * is bogus tx completions. We try to recover by setting the
5518  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5519  * in the workqueue.
5520  */
5521 static void tg3_tx_recover(struct tg3 *tp)
5522 {
5523         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5524                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5525
5526         netdev_warn(tp->dev,
5527                     "The system may be re-ordering memory-mapped I/O "
5528                     "cycles to the network device, attempting to recover. "
5529                     "Please report the problem to the driver maintainer "
5530                     "and include system chipset information.\n");
5531
5532         spin_lock(&tp->lock);
5533         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5534         spin_unlock(&tp->lock);
5535 }
5536
5537 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5538 {
5539         /* Tell compiler to fetch tx indices from memory. */
5540         barrier();
5541         return tnapi->tx_pending -
5542                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5543 }
5544
5545 /* Tigon3 never reports partial packet sends.  So we do not
5546  * need special logic to handle SKBs that have not had all
5547  * of their frags sent yet, like SunGEM does.
5548  */
5549 static void tg3_tx(struct tg3_napi *tnapi)
5550 {
5551         struct tg3 *tp = tnapi->tp;
5552         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5553         u32 sw_idx = tnapi->tx_cons;
5554         struct netdev_queue *txq;
5555         int index = tnapi - tp->napi;
5556         unsigned int pkts_compl = 0, bytes_compl = 0;
5557
5558         if (tg3_flag(tp, ENABLE_TSS))
5559                 index--;
5560
5561         txq = netdev_get_tx_queue(tp->dev, index);
5562
5563         while (sw_idx != hw_idx) {
5564                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5565                 struct sk_buff *skb = ri->skb;
5566                 int i, tx_bug = 0;
5567
5568                 if (unlikely(skb == NULL)) {
5569                         tg3_tx_recover(tp);
5570                         return;
5571                 }
5572
5573                 pci_unmap_single(tp->pdev,
5574                                  dma_unmap_addr(ri, mapping),
5575                                  skb_headlen(skb),
5576                                  PCI_DMA_TODEVICE);
5577
5578                 ri->skb = NULL;
5579
5580                 while (ri->fragmented) {
5581                         ri->fragmented = false;
5582                         sw_idx = NEXT_TX(sw_idx);
5583                         ri = &tnapi->tx_buffers[sw_idx];
5584                 }
5585
5586                 sw_idx = NEXT_TX(sw_idx);
5587
5588                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5589                         ri = &tnapi->tx_buffers[sw_idx];
5590                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5591                                 tx_bug = 1;
5592
5593                         pci_unmap_page(tp->pdev,
5594                                        dma_unmap_addr(ri, mapping),
5595                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5596                                        PCI_DMA_TODEVICE);
5597
5598                         while (ri->fragmented) {
5599                                 ri->fragmented = false;
5600                                 sw_idx = NEXT_TX(sw_idx);
5601                                 ri = &tnapi->tx_buffers[sw_idx];
5602                         }
5603
5604                         sw_idx = NEXT_TX(sw_idx);
5605                 }
5606
5607                 pkts_compl++;
5608                 bytes_compl += skb->len;
5609
5610                 dev_kfree_skb(skb);
5611
5612                 if (unlikely(tx_bug)) {
5613                         tg3_tx_recover(tp);
5614                         return;
5615                 }
5616         }
5617
5618         netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5619
5620         tnapi->tx_cons = sw_idx;
5621
5622         /* Need to make the tx_cons update visible to tg3_start_xmit()
5623          * before checking for netif_queue_stopped().  Without the
5624          * memory barrier, there is a small possibility that tg3_start_xmit()
5625          * will miss it and cause the queue to be stopped forever.
5626          */
5627         smp_mb();
5628
5629         if (unlikely(netif_tx_queue_stopped(txq) &&
5630                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5631                 __netif_tx_lock(txq, smp_processor_id());
5632                 if (netif_tx_queue_stopped(txq) &&
5633                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5634                         netif_tx_wake_queue(txq);
5635                 __netif_tx_unlock(txq);
5636         }
5637 }
5638
5639 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5640 {
5641         if (!ri->data)
5642                 return;
5643
5644         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5645                          map_sz, PCI_DMA_FROMDEVICE);
5646         kfree(ri->data);
5647         ri->data = NULL;
5648 }
5649
5650 /* Returns size of skb allocated or < 0 on error.
5651  *
5652  * We only need to fill in the address because the other members
5653  * of the RX descriptor are invariant, see tg3_init_rings.
5654  *
5655  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5656  * posting buffers we only dirty the first cache line of the RX
5657  * descriptor (containing the address).  Whereas for the RX status
5658  * buffers the cpu only reads the last cacheline of the RX descriptor
5659  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5660  */
5661 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5662                             u32 opaque_key, u32 dest_idx_unmasked)
5663 {
5664         struct tg3_rx_buffer_desc *desc;
5665         struct ring_info *map;
5666         u8 *data;
5667         dma_addr_t mapping;
5668         int skb_size, data_size, dest_idx;
5669
5670         switch (opaque_key) {
5671         case RXD_OPAQUE_RING_STD:
5672                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5673                 desc = &tpr->rx_std[dest_idx];
5674                 map = &tpr->rx_std_buffers[dest_idx];
5675                 data_size = tp->rx_pkt_map_sz;
5676                 break;
5677
5678         case RXD_OPAQUE_RING_JUMBO:
5679                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5680                 desc = &tpr->rx_jmb[dest_idx].std;
5681                 map = &tpr->rx_jmb_buffers[dest_idx];
5682                 data_size = TG3_RX_JMB_MAP_SZ;
5683                 break;
5684
5685         default:
5686                 return -EINVAL;
5687         }
5688
5689         /* Do not overwrite any of the map or rp information
5690          * until we are sure we can commit to a new buffer.
5691          *
5692          * Callers depend upon this behavior and assume that
5693          * we leave everything unchanged if we fail.
5694          */
5695         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5696                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5697         data = kmalloc(skb_size, GFP_ATOMIC);
5698         if (!data)
5699                 return -ENOMEM;
5700
5701         mapping = pci_map_single(tp->pdev,
5702                                  data + TG3_RX_OFFSET(tp),
5703                                  data_size,
5704                                  PCI_DMA_FROMDEVICE);
5705         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5706                 kfree(data);
5707                 return -EIO;
5708         }
5709
5710         map->data = data;
5711         dma_unmap_addr_set(map, mapping, mapping);
5712
5713         desc->addr_hi = ((u64)mapping >> 32);
5714         desc->addr_lo = ((u64)mapping & 0xffffffff);
5715
5716         return data_size;
5717 }
5718
5719 /* We only need to move over in the address because the other
5720  * members of the RX descriptor are invariant.  See notes above
5721  * tg3_alloc_rx_data for full details.
5722  */
5723 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5724                            struct tg3_rx_prodring_set *dpr,
5725                            u32 opaque_key, int src_idx,
5726                            u32 dest_idx_unmasked)
5727 {
5728         struct tg3 *tp = tnapi->tp;
5729         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5730         struct ring_info *src_map, *dest_map;
5731         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5732         int dest_idx;
5733
5734         switch (opaque_key) {
5735         case RXD_OPAQUE_RING_STD:
5736                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5737                 dest_desc = &dpr->rx_std[dest_idx];
5738                 dest_map = &dpr->rx_std_buffers[dest_idx];
5739                 src_desc = &spr->rx_std[src_idx];
5740                 src_map = &spr->rx_std_buffers[src_idx];
5741                 break;
5742
5743         case RXD_OPAQUE_RING_JUMBO:
5744                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5745                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5746                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5747                 src_desc = &spr->rx_jmb[src_idx].std;
5748                 src_map = &spr->rx_jmb_buffers[src_idx];
5749                 break;
5750
5751         default:
5752                 return;
5753         }
5754
5755         dest_map->data = src_map->data;
5756         dma_unmap_addr_set(dest_map, mapping,
5757                            dma_unmap_addr(src_map, mapping));
5758         dest_desc->addr_hi = src_desc->addr_hi;
5759         dest_desc->addr_lo = src_desc->addr_lo;
5760
5761         /* Ensure that the update to the skb happens after the physical
5762          * addresses have been transferred to the new BD location.
5763          */
5764         smp_wmb();
5765
5766         src_map->data = NULL;
5767 }
5768
5769 /* The RX ring scheme is composed of multiple rings which post fresh
5770  * buffers to the chip, and one special ring the chip uses to report
5771  * status back to the host.
5772  *
5773  * The special ring reports the status of received packets to the
5774  * host.  The chip does not write into the original descriptor the
5775  * RX buffer was obtained from.  The chip simply takes the original
5776  * descriptor as provided by the host, updates the status and length
5777  * field, then writes this into the next status ring entry.
5778  *
5779  * Each ring the host uses to post buffers to the chip is described
5780  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5781  * it is first placed into the on-chip ram.  When the packet's length
5782  * is known, it walks down the TG3_BDINFO entries to select the ring.
5783  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5784  * which is within the range of the new packet's length is chosen.
5785  *
5786  * The "separate ring for rx status" scheme may sound queer, but it makes
5787  * sense from a cache coherency perspective.  If only the host writes
5788  * to the buffer post rings, and only the chip writes to the rx status
5789  * rings, then cache lines never move beyond shared-modified state.
5790  * If both the host and chip were to write into the same ring, cache line
5791  * eviction could occur since both entities want it in an exclusive state.
5792  */
5793 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5794 {
5795         struct tg3 *tp = tnapi->tp;
5796         u32 work_mask, rx_std_posted = 0;
5797         u32 std_prod_idx, jmb_prod_idx;
5798         u32 sw_idx = tnapi->rx_rcb_ptr;
5799         u16 hw_idx;
5800         int received;
5801         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5802
5803         hw_idx = *(tnapi->rx_rcb_prod_idx);
5804         /*
5805          * We need to order the read of hw_idx and the read of
5806          * the opaque cookie.
5807          */
5808         rmb();
5809         work_mask = 0;
5810         received = 0;
5811         std_prod_idx = tpr->rx_std_prod_idx;
5812         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5813         while (sw_idx != hw_idx && budget > 0) {
5814                 struct ring_info *ri;
5815                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5816                 unsigned int len;
5817                 struct sk_buff *skb;
5818                 dma_addr_t dma_addr;
5819                 u32 opaque_key, desc_idx, *post_ptr;
5820                 u8 *data;
5821
5822                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5823                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5824                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5825                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5826                         dma_addr = dma_unmap_addr(ri, mapping);
5827                         data = ri->data;
5828                         post_ptr = &std_prod_idx;
5829                         rx_std_posted++;
5830                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5831                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5832                         dma_addr = dma_unmap_addr(ri, mapping);
5833                         data = ri->data;
5834                         post_ptr = &jmb_prod_idx;
5835                 } else
5836                         goto next_pkt_nopost;
5837
5838                 work_mask |= opaque_key;
5839
5840                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5841                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5842                 drop_it:
5843                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5844                                        desc_idx, *post_ptr);
5845                 drop_it_no_recycle:
5846                         /* Other statistics kept track of by card. */
5847                         tp->rx_dropped++;
5848                         goto next_pkt;
5849                 }
5850
5851                 prefetch(data + TG3_RX_OFFSET(tp));
5852                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5853                       ETH_FCS_LEN;
5854
5855                 if (len > TG3_RX_COPY_THRESH(tp)) {
5856                         int skb_size;
5857
5858                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5859                                                     *post_ptr);
5860                         if (skb_size < 0)
5861                                 goto drop_it;
5862
5863                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5864                                          PCI_DMA_FROMDEVICE);
5865
5866                         skb = build_skb(data);
5867                         if (!skb) {
5868                                 kfree(data);
5869                                 goto drop_it_no_recycle;
5870                         }
5871                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5872                         /* Ensure that the update to the data happens
5873                          * after the usage of the old DMA mapping.
5874                          */
5875                         smp_wmb();
5876
5877                         ri->data = NULL;
5878
5879                 } else {
5880                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5881                                        desc_idx, *post_ptr);
5882
5883                         skb = netdev_alloc_skb(tp->dev,
5884                                                len + TG3_RAW_IP_ALIGN);
5885                         if (skb == NULL)
5886                                 goto drop_it_no_recycle;
5887
5888                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5889                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5890                         memcpy(skb->data,
5891                                data + TG3_RX_OFFSET(tp),
5892                                len);
5893                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5894                 }
5895
5896                 skb_put(skb, len);
5897                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5898                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5899                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5900                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5901                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5902                 else
5903                         skb_checksum_none_assert(skb);
5904
5905                 skb->protocol = eth_type_trans(skb, tp->dev);
5906
5907                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5908                     skb->protocol != htons(ETH_P_8021Q)) {
5909                         dev_kfree_skb(skb);
5910                         goto drop_it_no_recycle;
5911                 }
5912
5913                 if (desc->type_flags & RXD_FLAG_VLAN &&
5914                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5915                         __vlan_hwaccel_put_tag(skb,
5916                                                desc->err_vlan & RXD_VLAN_MASK);
5917
5918                 napi_gro_receive(&tnapi->napi, skb);
5919
5920                 received++;
5921                 budget--;
5922
5923 next_pkt:
5924                 (*post_ptr)++;
5925
5926                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5927                         tpr->rx_std_prod_idx = std_prod_idx &
5928                                                tp->rx_std_ring_mask;
5929                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5930                                      tpr->rx_std_prod_idx);
5931                         work_mask &= ~RXD_OPAQUE_RING_STD;
5932                         rx_std_posted = 0;
5933                 }
5934 next_pkt_nopost:
5935                 sw_idx++;
5936                 sw_idx &= tp->rx_ret_ring_mask;
5937
5938                 /* Refresh hw_idx to see if there is new work */
5939                 if (sw_idx == hw_idx) {
5940                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5941                         rmb();
5942                 }
5943         }
5944
5945         /* ACK the status ring. */
5946         tnapi->rx_rcb_ptr = sw_idx;
5947         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5948
5949         /* Refill RX ring(s). */
5950         if (!tg3_flag(tp, ENABLE_RSS)) {
5951                 if (work_mask & RXD_OPAQUE_RING_STD) {
5952                         tpr->rx_std_prod_idx = std_prod_idx &
5953                                                tp->rx_std_ring_mask;
5954                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5955                                      tpr->rx_std_prod_idx);
5956                 }
5957                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5958                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5959                                                tp->rx_jmb_ring_mask;
5960                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5961                                      tpr->rx_jmb_prod_idx);
5962                 }
5963                 mmiowb();
5964         } else if (work_mask) {
5965                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5966                  * updated before the producer indices can be updated.
5967                  */
5968                 smp_wmb();
5969
5970                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5971                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5972
5973                 if (tnapi != &tp->napi[1])
5974                         napi_schedule(&tp->napi[1].napi);
5975         }
5976
5977         return received;
5978 }
5979
5980 static void tg3_poll_link(struct tg3 *tp)
5981 {
5982         /* handle link change and other phy events */
5983         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5984                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5985
5986                 if (sblk->status & SD_STATUS_LINK_CHG) {
5987                         sblk->status = SD_STATUS_UPDATED |
5988                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5989                         spin_lock(&tp->lock);
5990                         if (tg3_flag(tp, USE_PHYLIB)) {
5991                                 tw32_f(MAC_STATUS,
5992                                      (MAC_STATUS_SYNC_CHANGED |
5993                                       MAC_STATUS_CFG_CHANGED |
5994                                       MAC_STATUS_MI_COMPLETION |
5995                                       MAC_STATUS_LNKSTATE_CHANGED));
5996                                 udelay(40);
5997                         } else
5998                                 tg3_setup_phy(tp, 0);
5999                         spin_unlock(&tp->lock);
6000                 }
6001         }
6002 }
6003
6004 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6005                                 struct tg3_rx_prodring_set *dpr,
6006                                 struct tg3_rx_prodring_set *spr)
6007 {
6008         u32 si, di, cpycnt, src_prod_idx;
6009         int i, err = 0;
6010
6011         while (1) {
6012                 src_prod_idx = spr->rx_std_prod_idx;
6013
6014                 /* Make sure updates to the rx_std_buffers[] entries and the
6015                  * standard producer index are seen in the correct order.
6016                  */
6017                 smp_rmb();
6018
6019                 if (spr->rx_std_cons_idx == src_prod_idx)
6020                         break;
6021
6022                 if (spr->rx_std_cons_idx < src_prod_idx)
6023                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6024                 else
6025                         cpycnt = tp->rx_std_ring_mask + 1 -
6026                                  spr->rx_std_cons_idx;
6027
6028                 cpycnt = min(cpycnt,
6029                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6030
6031                 si = spr->rx_std_cons_idx;
6032                 di = dpr->rx_std_prod_idx;
6033
6034                 for (i = di; i < di + cpycnt; i++) {
6035                         if (dpr->rx_std_buffers[i].data) {
6036                                 cpycnt = i - di;
6037                                 err = -ENOSPC;
6038                                 break;
6039                         }
6040                 }
6041
6042                 if (!cpycnt)
6043                         break;
6044
6045                 /* Ensure that updates to the rx_std_buffers ring and the
6046                  * shadowed hardware producer ring from tg3_recycle_skb() are
6047                  * ordered correctly WRT the skb check above.
6048                  */
6049                 smp_rmb();
6050
6051                 memcpy(&dpr->rx_std_buffers[di],
6052                        &spr->rx_std_buffers[si],
6053                        cpycnt * sizeof(struct ring_info));
6054
6055                 for (i = 0; i < cpycnt; i++, di++, si++) {
6056                         struct tg3_rx_buffer_desc *sbd, *dbd;
6057                         sbd = &spr->rx_std[si];
6058                         dbd = &dpr->rx_std[di];
6059                         dbd->addr_hi = sbd->addr_hi;
6060                         dbd->addr_lo = sbd->addr_lo;
6061                 }
6062
6063                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6064                                        tp->rx_std_ring_mask;
6065                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6066                                        tp->rx_std_ring_mask;
6067         }
6068
6069         while (1) {
6070                 src_prod_idx = spr->rx_jmb_prod_idx;
6071
6072                 /* Make sure updates to the rx_jmb_buffers[] entries and
6073                  * the jumbo producer index are seen in the correct order.
6074                  */
6075                 smp_rmb();
6076
6077                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6078                         break;
6079
6080                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6081                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6082                 else
6083                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6084                                  spr->rx_jmb_cons_idx;
6085
6086                 cpycnt = min(cpycnt,
6087                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6088
6089                 si = spr->rx_jmb_cons_idx;
6090                 di = dpr->rx_jmb_prod_idx;
6091
6092                 for (i = di; i < di + cpycnt; i++) {
6093                         if (dpr->rx_jmb_buffers[i].data) {
6094                                 cpycnt = i - di;
6095                                 err = -ENOSPC;
6096                                 break;
6097                         }
6098                 }
6099
6100                 if (!cpycnt)
6101                         break;
6102
6103                 /* Ensure that updates to the rx_jmb_buffers ring and the
6104                  * shadowed hardware producer ring from tg3_recycle_skb() are
6105                  * ordered correctly WRT the skb check above.
6106                  */
6107                 smp_rmb();
6108
6109                 memcpy(&dpr->rx_jmb_buffers[di],
6110                        &spr->rx_jmb_buffers[si],
6111                        cpycnt * sizeof(struct ring_info));
6112
6113                 for (i = 0; i < cpycnt; i++, di++, si++) {
6114                         struct tg3_rx_buffer_desc *sbd, *dbd;
6115                         sbd = &spr->rx_jmb[si].std;
6116                         dbd = &dpr->rx_jmb[di].std;
6117                         dbd->addr_hi = sbd->addr_hi;
6118                         dbd->addr_lo = sbd->addr_lo;
6119                 }
6120
6121                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6122                                        tp->rx_jmb_ring_mask;
6123                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6124                                        tp->rx_jmb_ring_mask;
6125         }
6126
6127         return err;
6128 }
6129
6130 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6131 {
6132         struct tg3 *tp = tnapi->tp;
6133
6134         /* run TX completion thread */
6135         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6136                 tg3_tx(tnapi);
6137                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6138                         return work_done;
6139         }
6140
6141         /* run RX thread, within the bounds set by NAPI.
6142          * All RX "locking" is done by ensuring outside
6143          * code synchronizes with tg3->napi.poll()
6144          */
6145         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6146                 work_done += tg3_rx(tnapi, budget - work_done);
6147
6148         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6149                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6150                 int i, err = 0;
6151                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6152                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6153
6154                 for (i = 1; i < tp->irq_cnt; i++)
6155                         err |= tg3_rx_prodring_xfer(tp, dpr,
6156                                                     &tp->napi[i].prodring);
6157
6158                 wmb();
6159
6160                 if (std_prod_idx != dpr->rx_std_prod_idx)
6161                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6162                                      dpr->rx_std_prod_idx);
6163
6164                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6165                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6166                                      dpr->rx_jmb_prod_idx);
6167
6168                 mmiowb();
6169
6170                 if (err)
6171                         tw32_f(HOSTCC_MODE, tp->coal_now);
6172         }
6173
6174         return work_done;
6175 }
6176
6177 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6178 {
6179         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6180                 schedule_work(&tp->reset_task);
6181 }
6182
6183 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6184 {
6185         cancel_work_sync(&tp->reset_task);
6186         tg3_flag_clear(tp, RESET_TASK_PENDING);
6187 }
6188
6189 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6190 {
6191         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6192         struct tg3 *tp = tnapi->tp;
6193         int work_done = 0;
6194         struct tg3_hw_status *sblk = tnapi->hw_status;
6195
6196         while (1) {
6197                 work_done = tg3_poll_work(tnapi, work_done, budget);
6198
6199                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6200                         goto tx_recovery;
6201
6202                 if (unlikely(work_done >= budget))
6203                         break;
6204
6205                 /* tp->last_tag is used in tg3_int_reenable() below
6206                  * to tell the hw how much work has been processed,
6207                  * so we must read it before checking for more work.
6208                  */
6209                 tnapi->last_tag = sblk->status_tag;
6210                 tnapi->last_irq_tag = tnapi->last_tag;
6211                 rmb();
6212
6213                 /* check for RX/TX work to do */
6214                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6215                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6216                         napi_complete(napi);
6217                         /* Reenable interrupts. */
6218                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6219                         mmiowb();
6220                         break;
6221                 }
6222         }
6223
6224         return work_done;
6225
6226 tx_recovery:
6227         /* work_done is guaranteed to be less than budget. */
6228         napi_complete(napi);
6229         tg3_reset_task_schedule(tp);
6230         return work_done;
6231 }
6232
6233 static void tg3_process_error(struct tg3 *tp)
6234 {
6235         u32 val;
6236         bool real_error = false;
6237
6238         if (tg3_flag(tp, ERROR_PROCESSED))
6239                 return;
6240
6241         /* Check Flow Attention register */
6242         val = tr32(HOSTCC_FLOW_ATTN);
6243         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6244                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6245                 real_error = true;
6246         }
6247
6248         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6249                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6250                 real_error = true;
6251         }
6252
6253         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6254                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6255                 real_error = true;
6256         }
6257
6258         if (!real_error)
6259                 return;
6260
6261         tg3_dump_state(tp);
6262
6263         tg3_flag_set(tp, ERROR_PROCESSED);
6264         tg3_reset_task_schedule(tp);
6265 }
6266
6267 static int tg3_poll(struct napi_struct *napi, int budget)
6268 {
6269         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6270         struct tg3 *tp = tnapi->tp;
6271         int work_done = 0;
6272         struct tg3_hw_status *sblk = tnapi->hw_status;
6273
6274         while (1) {
6275                 if (sblk->status & SD_STATUS_ERROR)
6276                         tg3_process_error(tp);
6277
6278                 tg3_poll_link(tp);
6279
6280                 work_done = tg3_poll_work(tnapi, work_done, budget);
6281
6282                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6283                         goto tx_recovery;
6284
6285                 if (unlikely(work_done >= budget))
6286                         break;
6287
6288                 if (tg3_flag(tp, TAGGED_STATUS)) {
6289                         /* tp->last_tag is used in tg3_int_reenable() below
6290                          * to tell the hw how much work has been processed,
6291                          * so we must read it before checking for more work.
6292                          */
6293                         tnapi->last_tag = sblk->status_tag;
6294                         tnapi->last_irq_tag = tnapi->last_tag;
6295                         rmb();
6296                 } else
6297                         sblk->status &= ~SD_STATUS_UPDATED;
6298
6299                 if (likely(!tg3_has_work(tnapi))) {
6300                         napi_complete(napi);
6301                         tg3_int_reenable(tnapi);
6302                         break;
6303                 }
6304         }
6305
6306         return work_done;
6307
6308 tx_recovery:
6309         /* work_done is guaranteed to be less than budget. */
6310         napi_complete(napi);
6311         tg3_reset_task_schedule(tp);
6312         return work_done;
6313 }
6314
6315 static void tg3_napi_disable(struct tg3 *tp)
6316 {
6317         int i;
6318
6319         for (i = tp->irq_cnt - 1; i >= 0; i--)
6320                 napi_disable(&tp->napi[i].napi);
6321 }
6322
6323 static void tg3_napi_enable(struct tg3 *tp)
6324 {
6325         int i;
6326
6327         for (i = 0; i < tp->irq_cnt; i++)
6328                 napi_enable(&tp->napi[i].napi);
6329 }
6330
6331 static void tg3_napi_init(struct tg3 *tp)
6332 {
6333         int i;
6334
6335         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6336         for (i = 1; i < tp->irq_cnt; i++)
6337                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6338 }
6339
6340 static void tg3_napi_fini(struct tg3 *tp)
6341 {
6342         int i;
6343
6344         for (i = 0; i < tp->irq_cnt; i++)
6345                 netif_napi_del(&tp->napi[i].napi);
6346 }
6347
6348 static inline void tg3_netif_stop(struct tg3 *tp)
6349 {
6350         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6351         tg3_napi_disable(tp);
6352         netif_tx_disable(tp->dev);
6353 }
6354
6355 static inline void tg3_netif_start(struct tg3 *tp)
6356 {
6357         /* NOTE: unconditional netif_tx_wake_all_queues is only
6358          * appropriate so long as all callers are assured to
6359          * have free tx slots (such as after tg3_init_hw)
6360          */
6361         netif_tx_wake_all_queues(tp->dev);
6362
6363         tg3_napi_enable(tp);
6364         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6365         tg3_enable_ints(tp);
6366 }
6367
6368 static void tg3_irq_quiesce(struct tg3 *tp)
6369 {
6370         int i;
6371
6372         BUG_ON(tp->irq_sync);
6373
6374         tp->irq_sync = 1;
6375         smp_mb();
6376
6377         for (i = 0; i < tp->irq_cnt; i++)
6378                 synchronize_irq(tp->napi[i].irq_vec);
6379 }
6380
6381 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6382  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6383  * with as well.  Most of the time, this is not necessary except when
6384  * shutting down the device.
6385  */
6386 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6387 {
6388         spin_lock_bh(&tp->lock);
6389         if (irq_sync)
6390                 tg3_irq_quiesce(tp);
6391 }
6392
6393 static inline void tg3_full_unlock(struct tg3 *tp)
6394 {
6395         spin_unlock_bh(&tp->lock);
6396 }
6397
6398 /* One-shot MSI handler - Chip automatically disables interrupt
6399  * after sending MSI so driver doesn't have to do it.
6400  */
6401 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6402 {
6403         struct tg3_napi *tnapi = dev_id;
6404         struct tg3 *tp = tnapi->tp;
6405
6406         prefetch(tnapi->hw_status);
6407         if (tnapi->rx_rcb)
6408                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6409
6410         if (likely(!tg3_irq_sync(tp)))
6411                 napi_schedule(&tnapi->napi);
6412
6413         return IRQ_HANDLED;
6414 }
6415
6416 /* MSI ISR - No need to check for interrupt sharing and no need to
6417  * flush status block and interrupt mailbox. PCI ordering rules
6418  * guarantee that MSI will arrive after the status block.
6419  */
6420 static irqreturn_t tg3_msi(int irq, void *dev_id)
6421 {
6422         struct tg3_napi *tnapi = dev_id;
6423         struct tg3 *tp = tnapi->tp;
6424
6425         prefetch(tnapi->hw_status);
6426         if (tnapi->rx_rcb)
6427                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6428         /*
6429          * Writing any value to intr-mbox-0 clears PCI INTA# and
6430          * chip-internal interrupt pending events.
6431          * Writing non-zero to intr-mbox-0 additional tells the
6432          * NIC to stop sending us irqs, engaging "in-intr-handler"
6433          * event coalescing.
6434          */
6435         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6436         if (likely(!tg3_irq_sync(tp)))
6437                 napi_schedule(&tnapi->napi);
6438
6439         return IRQ_RETVAL(1);
6440 }
6441
6442 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6443 {
6444         struct tg3_napi *tnapi = dev_id;
6445         struct tg3 *tp = tnapi->tp;
6446         struct tg3_hw_status *sblk = tnapi->hw_status;
6447         unsigned int handled = 1;
6448
6449         /* In INTx mode, it is possible for the interrupt to arrive at
6450          * the CPU before the status block posted prior to the interrupt.
6451          * Reading the PCI State register will confirm whether the
6452          * interrupt is ours and will flush the status block.
6453          */
6454         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6455                 if (tg3_flag(tp, CHIP_RESETTING) ||
6456                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6457                         handled = 0;
6458                         goto out;
6459                 }
6460         }
6461
6462         /*
6463          * Writing any value to intr-mbox-0 clears PCI INTA# and
6464          * chip-internal interrupt pending events.
6465          * Writing non-zero to intr-mbox-0 additional tells the
6466          * NIC to stop sending us irqs, engaging "in-intr-handler"
6467          * event coalescing.
6468          *
6469          * Flush the mailbox to de-assert the IRQ immediately to prevent
6470          * spurious interrupts.  The flush impacts performance but
6471          * excessive spurious interrupts can be worse in some cases.
6472          */
6473         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6474         if (tg3_irq_sync(tp))
6475                 goto out;
6476         sblk->status &= ~SD_STATUS_UPDATED;
6477         if (likely(tg3_has_work(tnapi))) {
6478                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6479                 napi_schedule(&tnapi->napi);
6480         } else {
6481                 /* No work, shared interrupt perhaps?  re-enable
6482                  * interrupts, and flush that PCI write
6483                  */
6484                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6485                                0x00000000);
6486         }
6487 out:
6488         return IRQ_RETVAL(handled);
6489 }
6490
6491 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6492 {
6493         struct tg3_napi *tnapi = dev_id;
6494         struct tg3 *tp = tnapi->tp;
6495         struct tg3_hw_status *sblk = tnapi->hw_status;
6496         unsigned int handled = 1;
6497
6498         /* In INTx mode, it is possible for the interrupt to arrive at
6499          * the CPU before the status block posted prior to the interrupt.
6500          * Reading the PCI State register will confirm whether the
6501          * interrupt is ours and will flush the status block.
6502          */
6503         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6504                 if (tg3_flag(tp, CHIP_RESETTING) ||
6505                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6506                         handled = 0;
6507                         goto out;
6508                 }
6509         }
6510
6511         /*
6512          * writing any value to intr-mbox-0 clears PCI INTA# and
6513          * chip-internal interrupt pending events.
6514          * writing non-zero to intr-mbox-0 additional tells the
6515          * NIC to stop sending us irqs, engaging "in-intr-handler"
6516          * event coalescing.
6517          *
6518          * Flush the mailbox to de-assert the IRQ immediately to prevent
6519          * spurious interrupts.  The flush impacts performance but
6520          * excessive spurious interrupts can be worse in some cases.
6521          */
6522         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6523
6524         /*
6525          * In a shared interrupt configuration, sometimes other devices'
6526          * interrupts will scream.  We record the current status tag here
6527          * so that the above check can report that the screaming interrupts
6528          * are unhandled.  Eventually they will be silenced.
6529          */
6530         tnapi->last_irq_tag = sblk->status_tag;
6531
6532         if (tg3_irq_sync(tp))
6533                 goto out;
6534
6535         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6536
6537         napi_schedule(&tnapi->napi);
6538
6539 out:
6540         return IRQ_RETVAL(handled);
6541 }
6542
6543 /* ISR for interrupt test */
6544 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6545 {
6546         struct tg3_napi *tnapi = dev_id;
6547         struct tg3 *tp = tnapi->tp;
6548         struct tg3_hw_status *sblk = tnapi->hw_status;
6549
6550         if ((sblk->status & SD_STATUS_UPDATED) ||
6551             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6552                 tg3_disable_ints(tp);
6553                 return IRQ_RETVAL(1);
6554         }
6555         return IRQ_RETVAL(0);
6556 }
6557
6558 #ifdef CONFIG_NET_POLL_CONTROLLER
6559 static void tg3_poll_controller(struct net_device *dev)
6560 {
6561         int i;
6562         struct tg3 *tp = netdev_priv(dev);
6563
6564         for (i = 0; i < tp->irq_cnt; i++)
6565                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6566 }
6567 #endif
6568
6569 static void tg3_tx_timeout(struct net_device *dev)
6570 {
6571         struct tg3 *tp = netdev_priv(dev);
6572
6573         if (netif_msg_tx_err(tp)) {
6574                 netdev_err(dev, "transmit timed out, resetting\n");
6575                 tg3_dump_state(tp);
6576         }
6577
6578         tg3_reset_task_schedule(tp);
6579 }
6580
6581 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6582 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6583 {
6584         u32 base = (u32) mapping & 0xffffffff;
6585
6586         return (base > 0xffffdcc0) && (base + len + 8 < base);
6587 }
6588
6589 /* Test for DMA addresses > 40-bit */
6590 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6591                                           int len)
6592 {
6593 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6594         if (tg3_flag(tp, 40BIT_DMA_BUG))
6595                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6596         return 0;
6597 #else
6598         return 0;
6599 #endif
6600 }
6601
6602 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6603                                  dma_addr_t mapping, u32 len, u32 flags,
6604                                  u32 mss, u32 vlan)
6605 {
6606         txbd->addr_hi = ((u64) mapping >> 32);
6607         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6608         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6609         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6610 }
6611
6612 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6613                             dma_addr_t map, u32 len, u32 flags,
6614                             u32 mss, u32 vlan)
6615 {
6616         struct tg3 *tp = tnapi->tp;
6617         bool hwbug = false;
6618
6619         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6620                 hwbug = true;
6621
6622         if (tg3_4g_overflow_test(map, len))
6623                 hwbug = true;
6624
6625         if (tg3_40bit_overflow_test(tp, map, len))
6626                 hwbug = true;
6627
6628         if (tp->dma_limit) {
6629                 u32 prvidx = *entry;
6630                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6631                 while (len > tp->dma_limit && *budget) {
6632                         u32 frag_len = tp->dma_limit;
6633                         len -= tp->dma_limit;
6634
6635                         /* Avoid the 8byte DMA problem */
6636                         if (len <= 8) {
6637                                 len += tp->dma_limit / 2;
6638                                 frag_len = tp->dma_limit / 2;
6639                         }
6640
6641                         tnapi->tx_buffers[*entry].fragmented = true;
6642
6643                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6644                                       frag_len, tmp_flag, mss, vlan);
6645                         *budget -= 1;
6646                         prvidx = *entry;
6647                         *entry = NEXT_TX(*entry);
6648
6649                         map += frag_len;
6650                 }
6651
6652                 if (len) {
6653                         if (*budget) {
6654                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6655                                               len, flags, mss, vlan);
6656                                 *budget -= 1;
6657                                 *entry = NEXT_TX(*entry);
6658                         } else {
6659                                 hwbug = true;
6660                                 tnapi->tx_buffers[prvidx].fragmented = false;
6661                         }
6662                 }
6663         } else {
6664                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6665                               len, flags, mss, vlan);
6666                 *entry = NEXT_TX(*entry);
6667         }
6668
6669         return hwbug;
6670 }
6671
6672 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6673 {
6674         int i;
6675         struct sk_buff *skb;
6676         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6677
6678         skb = txb->skb;
6679         txb->skb = NULL;
6680
6681         pci_unmap_single(tnapi->tp->pdev,
6682                          dma_unmap_addr(txb, mapping),
6683                          skb_headlen(skb),
6684                          PCI_DMA_TODEVICE);
6685
6686         while (txb->fragmented) {
6687                 txb->fragmented = false;
6688                 entry = NEXT_TX(entry);
6689                 txb = &tnapi->tx_buffers[entry];
6690         }
6691
6692         for (i = 0; i <= last; i++) {
6693                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6694
6695                 entry = NEXT_TX(entry);
6696                 txb = &tnapi->tx_buffers[entry];
6697
6698                 pci_unmap_page(tnapi->tp->pdev,
6699                                dma_unmap_addr(txb, mapping),
6700                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6701
6702                 while (txb->fragmented) {
6703                         txb->fragmented = false;
6704                         entry = NEXT_TX(entry);
6705                         txb = &tnapi->tx_buffers[entry];
6706                 }
6707         }
6708 }
6709
6710 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6711 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6712                                        struct sk_buff **pskb,
6713                                        u32 *entry, u32 *budget,
6714                                        u32 base_flags, u32 mss, u32 vlan)
6715 {
6716         struct tg3 *tp = tnapi->tp;
6717         struct sk_buff *new_skb, *skb = *pskb;
6718         dma_addr_t new_addr = 0;
6719         int ret = 0;
6720
6721         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6722                 new_skb = skb_copy(skb, GFP_ATOMIC);
6723         else {
6724                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6725
6726                 new_skb = skb_copy_expand(skb,
6727                                           skb_headroom(skb) + more_headroom,
6728                                           skb_tailroom(skb), GFP_ATOMIC);
6729         }
6730
6731         if (!new_skb) {
6732                 ret = -1;
6733         } else {
6734                 /* New SKB is guaranteed to be linear. */
6735                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6736                                           PCI_DMA_TODEVICE);
6737                 /* Make sure the mapping succeeded */
6738                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6739                         dev_kfree_skb(new_skb);
6740                         ret = -1;
6741                 } else {
6742                         u32 save_entry = *entry;
6743
6744                         base_flags |= TXD_FLAG_END;
6745
6746                         tnapi->tx_buffers[*entry].skb = new_skb;
6747                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6748                                            mapping, new_addr);
6749
6750                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6751                                             new_skb->len, base_flags,
6752                                             mss, vlan)) {
6753                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6754                                 dev_kfree_skb(new_skb);
6755                                 ret = -1;
6756                         }
6757                 }
6758         }
6759
6760         dev_kfree_skb(skb);
6761         *pskb = new_skb;
6762         return ret;
6763 }
6764
6765 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6766
6767 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6768  * TSO header is greater than 80 bytes.
6769  */
6770 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6771 {
6772         struct sk_buff *segs, *nskb;
6773         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6774
6775         /* Estimate the number of fragments in the worst case */
6776         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6777                 netif_stop_queue(tp->dev);
6778
6779                 /* netif_tx_stop_queue() must be done before checking
6780                  * checking tx index in tg3_tx_avail() below, because in
6781                  * tg3_tx(), we update tx index before checking for
6782                  * netif_tx_queue_stopped().
6783                  */
6784                 smp_mb();
6785                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6786                         return NETDEV_TX_BUSY;
6787
6788                 netif_wake_queue(tp->dev);
6789         }
6790
6791         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6792         if (IS_ERR(segs))
6793                 goto tg3_tso_bug_end;
6794
6795         do {
6796                 nskb = segs;
6797                 segs = segs->next;
6798                 nskb->next = NULL;
6799                 tg3_start_xmit(nskb, tp->dev);
6800         } while (segs);
6801
6802 tg3_tso_bug_end:
6803         dev_kfree_skb(skb);
6804
6805         return NETDEV_TX_OK;
6806 }
6807
6808 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6809  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6810  */
6811 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6812 {
6813         struct tg3 *tp = netdev_priv(dev);
6814         u32 len, entry, base_flags, mss, vlan = 0;
6815         u32 budget;
6816         int i = -1, would_hit_hwbug;
6817         dma_addr_t mapping;
6818         struct tg3_napi *tnapi;
6819         struct netdev_queue *txq;
6820         unsigned int last;
6821
6822         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6823         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6824         if (tg3_flag(tp, ENABLE_TSS))
6825                 tnapi++;
6826
6827         budget = tg3_tx_avail(tnapi);
6828
6829         /* We are running in BH disabled context with netif_tx_lock
6830          * and TX reclaim runs via tp->napi.poll inside of a software
6831          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6832          * no IRQ context deadlocks to worry about either.  Rejoice!
6833          */
6834         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6835                 if (!netif_tx_queue_stopped(txq)) {
6836                         netif_tx_stop_queue(txq);
6837
6838                         /* This is a hard error, log it. */
6839                         netdev_err(dev,
6840                                    "BUG! Tx Ring full when queue awake!\n");
6841                 }
6842                 return NETDEV_TX_BUSY;
6843         }
6844
6845         entry = tnapi->tx_prod;
6846         base_flags = 0;
6847         if (skb->ip_summed == CHECKSUM_PARTIAL)
6848                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6849
6850         mss = skb_shinfo(skb)->gso_size;
6851         if (mss) {
6852                 struct iphdr *iph;
6853                 u32 tcp_opt_len, hdr_len;
6854
6855                 if (skb_header_cloned(skb) &&
6856                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6857                         goto drop;
6858
6859                 iph = ip_hdr(skb);
6860                 tcp_opt_len = tcp_optlen(skb);
6861
6862                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6863
6864                 if (!skb_is_gso_v6(skb)) {
6865                         iph->check = 0;
6866                         iph->tot_len = htons(mss + hdr_len);
6867                 }
6868
6869                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6870                     tg3_flag(tp, TSO_BUG))
6871                         return tg3_tso_bug(tp, skb);
6872
6873                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6874                                TXD_FLAG_CPU_POST_DMA);
6875
6876                 if (tg3_flag(tp, HW_TSO_1) ||
6877                     tg3_flag(tp, HW_TSO_2) ||
6878                     tg3_flag(tp, HW_TSO_3)) {
6879                         tcp_hdr(skb)->check = 0;
6880                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6881                 } else
6882                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6883                                                                  iph->daddr, 0,
6884                                                                  IPPROTO_TCP,
6885                                                                  0);
6886
6887                 if (tg3_flag(tp, HW_TSO_3)) {
6888                         mss |= (hdr_len & 0xc) << 12;
6889                         if (hdr_len & 0x10)
6890                                 base_flags |= 0x00000010;
6891                         base_flags |= (hdr_len & 0x3e0) << 5;
6892                 } else if (tg3_flag(tp, HW_TSO_2))
6893                         mss |= hdr_len << 9;
6894                 else if (tg3_flag(tp, HW_TSO_1) ||
6895                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6896                         if (tcp_opt_len || iph->ihl > 5) {
6897                                 int tsflags;
6898
6899                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6900                                 mss |= (tsflags << 11);
6901                         }
6902                 } else {
6903                         if (tcp_opt_len || iph->ihl > 5) {
6904                                 int tsflags;
6905
6906                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6907                                 base_flags |= tsflags << 12;
6908                         }
6909                 }
6910         }
6911
6912         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6913             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6914                 base_flags |= TXD_FLAG_JMB_PKT;
6915
6916         if (vlan_tx_tag_present(skb)) {
6917                 base_flags |= TXD_FLAG_VLAN;
6918                 vlan = vlan_tx_tag_get(skb);
6919         }
6920
6921         len = skb_headlen(skb);
6922
6923         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6924         if (pci_dma_mapping_error(tp->pdev, mapping))
6925                 goto drop;
6926
6927
6928         tnapi->tx_buffers[entry].skb = skb;
6929         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6930
6931         would_hit_hwbug = 0;
6932
6933         if (tg3_flag(tp, 5701_DMA_BUG))
6934                 would_hit_hwbug = 1;
6935
6936         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6937                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6938                             mss, vlan)) {
6939                 would_hit_hwbug = 1;
6940         } else if (skb_shinfo(skb)->nr_frags > 0) {
6941                 u32 tmp_mss = mss;
6942
6943                 if (!tg3_flag(tp, HW_TSO_1) &&
6944                     !tg3_flag(tp, HW_TSO_2) &&
6945                     !tg3_flag(tp, HW_TSO_3))
6946                         tmp_mss = 0;
6947
6948                 /* Now loop through additional data
6949                  * fragments, and queue them.
6950                  */
6951                 last = skb_shinfo(skb)->nr_frags - 1;
6952                 for (i = 0; i <= last; i++) {
6953                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6954
6955                         len = skb_frag_size(frag);
6956                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6957                                                    len, DMA_TO_DEVICE);
6958
6959                         tnapi->tx_buffers[entry].skb = NULL;
6960                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6961                                            mapping);
6962                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6963                                 goto dma_error;
6964
6965                         if (!budget ||
6966                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6967                                             len, base_flags |
6968                                             ((i == last) ? TXD_FLAG_END : 0),
6969                                             tmp_mss, vlan)) {
6970                                 would_hit_hwbug = 1;
6971                                 break;
6972                         }
6973                 }
6974         }
6975
6976         if (would_hit_hwbug) {
6977                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6978
6979                 /* If the workaround fails due to memory/mapping
6980                  * failure, silently drop this packet.
6981                  */
6982                 entry = tnapi->tx_prod;
6983                 budget = tg3_tx_avail(tnapi);
6984                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6985                                                 base_flags, mss, vlan))
6986                         goto drop_nofree;
6987         }
6988
6989         skb_tx_timestamp(skb);
6990         netdev_sent_queue(tp->dev, skb->len);
6991
6992         /* Packets are ready, update Tx producer idx local and on card. */
6993         tw32_tx_mbox(tnapi->prodmbox, entry);
6994
6995         tnapi->tx_prod = entry;
6996         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6997                 netif_tx_stop_queue(txq);
6998
6999                 /* netif_tx_stop_queue() must be done before checking
7000                  * checking tx index in tg3_tx_avail() below, because in
7001                  * tg3_tx(), we update tx index before checking for
7002                  * netif_tx_queue_stopped().
7003                  */
7004                 smp_mb();
7005                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7006                         netif_tx_wake_queue(txq);
7007         }
7008
7009         mmiowb();
7010         return NETDEV_TX_OK;
7011
7012 dma_error:
7013         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7014         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7015 drop:
7016         dev_kfree_skb(skb);
7017 drop_nofree:
7018         tp->tx_dropped++;
7019         return NETDEV_TX_OK;
7020 }
7021
7022 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7023 {
7024         if (enable) {
7025                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7026                                   MAC_MODE_PORT_MODE_MASK);
7027
7028                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7029
7030                 if (!tg3_flag(tp, 5705_PLUS))
7031                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7032
7033                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7034                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7035                 else
7036                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7037         } else {
7038                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7039
7040                 if (tg3_flag(tp, 5705_PLUS) ||
7041                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7042                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7043                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7044         }
7045
7046         tw32(MAC_MODE, tp->mac_mode);
7047         udelay(40);
7048 }
7049
7050 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7051 {
7052         u32 val, bmcr, mac_mode, ptest = 0;
7053
7054         tg3_phy_toggle_apd(tp, false);
7055         tg3_phy_toggle_automdix(tp, 0);
7056
7057         if (extlpbk && tg3_phy_set_extloopbk(tp))
7058                 return -EIO;
7059
7060         bmcr = BMCR_FULLDPLX;
7061         switch (speed) {
7062         case SPEED_10:
7063                 break;
7064         case SPEED_100:
7065                 bmcr |= BMCR_SPEED100;
7066                 break;
7067         case SPEED_1000:
7068         default:
7069                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7070                         speed = SPEED_100;
7071                         bmcr |= BMCR_SPEED100;
7072                 } else {
7073                         speed = SPEED_1000;
7074                         bmcr |= BMCR_SPEED1000;
7075                 }
7076         }
7077
7078         if (extlpbk) {
7079                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7080                         tg3_readphy(tp, MII_CTRL1000, &val);
7081                         val |= CTL1000_AS_MASTER |
7082                                CTL1000_ENABLE_MASTER;
7083                         tg3_writephy(tp, MII_CTRL1000, val);
7084                 } else {
7085                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7086                                 MII_TG3_FET_PTEST_TRIM_2;
7087                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7088                 }
7089         } else
7090                 bmcr |= BMCR_LOOPBACK;
7091
7092         tg3_writephy(tp, MII_BMCR, bmcr);
7093
7094         /* The write needs to be flushed for the FETs */
7095         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7096                 tg3_readphy(tp, MII_BMCR, &bmcr);
7097
7098         udelay(40);
7099
7100         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7101             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7102                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7103                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7104                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7105
7106                 /* The write needs to be flushed for the AC131 */
7107                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7108         }
7109
7110         /* Reset to prevent losing 1st rx packet intermittently */
7111         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7112             tg3_flag(tp, 5780_CLASS)) {
7113                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7114                 udelay(10);
7115                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7116         }
7117
7118         mac_mode = tp->mac_mode &
7119                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7120         if (speed == SPEED_1000)
7121                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7122         else
7123                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7124
7125         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7126                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7127
7128                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7129                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7130                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7131                         mac_mode |= MAC_MODE_LINK_POLARITY;
7132
7133                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7134                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7135         }
7136
7137         tw32(MAC_MODE, mac_mode);
7138         udelay(40);
7139
7140         return 0;
7141 }
7142
7143 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7144 {
7145         struct tg3 *tp = netdev_priv(dev);
7146
7147         if (features & NETIF_F_LOOPBACK) {
7148                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7149                         return;
7150
7151                 spin_lock_bh(&tp->lock);
7152                 tg3_mac_loopback(tp, true);
7153                 netif_carrier_on(tp->dev);
7154                 spin_unlock_bh(&tp->lock);
7155                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7156         } else {
7157                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7158                         return;
7159
7160                 spin_lock_bh(&tp->lock);
7161                 tg3_mac_loopback(tp, false);
7162                 /* Force link status check */
7163                 tg3_setup_phy(tp, 1);
7164                 spin_unlock_bh(&tp->lock);
7165                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7166         }
7167 }
7168
7169 static netdev_features_t tg3_fix_features(struct net_device *dev,
7170         netdev_features_t features)
7171 {
7172         struct tg3 *tp = netdev_priv(dev);
7173
7174         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7175                 features &= ~NETIF_F_ALL_TSO;
7176
7177         return features;
7178 }
7179
7180 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7181 {
7182         netdev_features_t changed = dev->features ^ features;
7183
7184         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7185                 tg3_set_loopback(dev, features);
7186
7187         return 0;
7188 }
7189
7190 static void tg3_rx_prodring_free(struct tg3 *tp,
7191                                  struct tg3_rx_prodring_set *tpr)
7192 {
7193         int i;
7194
7195         if (tpr != &tp->napi[0].prodring) {
7196                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7197                      i = (i + 1) & tp->rx_std_ring_mask)
7198                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7199                                         tp->rx_pkt_map_sz);
7200
7201                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7202                         for (i = tpr->rx_jmb_cons_idx;
7203                              i != tpr->rx_jmb_prod_idx;
7204                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7205                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7206                                                 TG3_RX_JMB_MAP_SZ);
7207                         }
7208                 }
7209
7210                 return;
7211         }
7212
7213         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7214                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7215                                 tp->rx_pkt_map_sz);
7216
7217         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7218                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7219                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7220                                         TG3_RX_JMB_MAP_SZ);
7221         }
7222 }
7223
7224 /* Initialize rx rings for packet processing.
7225  *
7226  * The chip has been shut down and the driver detached from
7227  * the networking, so no interrupts or new tx packets will
7228  * end up in the driver.  tp->{tx,}lock are held and thus
7229  * we may not sleep.
7230  */
7231 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7232                                  struct tg3_rx_prodring_set *tpr)
7233 {
7234         u32 i, rx_pkt_dma_sz;
7235
7236         tpr->rx_std_cons_idx = 0;
7237         tpr->rx_std_prod_idx = 0;
7238         tpr->rx_jmb_cons_idx = 0;
7239         tpr->rx_jmb_prod_idx = 0;
7240
7241         if (tpr != &tp->napi[0].prodring) {
7242                 memset(&tpr->rx_std_buffers[0], 0,
7243                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7244                 if (tpr->rx_jmb_buffers)
7245                         memset(&tpr->rx_jmb_buffers[0], 0,
7246                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7247                 goto done;
7248         }
7249
7250         /* Zero out all descriptors. */
7251         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7252
7253         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7254         if (tg3_flag(tp, 5780_CLASS) &&
7255             tp->dev->mtu > ETH_DATA_LEN)
7256                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7257         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7258
7259         /* Initialize invariants of the rings, we only set this
7260          * stuff once.  This works because the card does not
7261          * write into the rx buffer posting rings.
7262          */
7263         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7264                 struct tg3_rx_buffer_desc *rxd;
7265
7266                 rxd = &tpr->rx_std[i];
7267                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7268                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7269                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7270                                (i << RXD_OPAQUE_INDEX_SHIFT));
7271         }
7272
7273         /* Now allocate fresh SKBs for each rx ring. */
7274         for (i = 0; i < tp->rx_pending; i++) {
7275                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7276                         netdev_warn(tp->dev,
7277                                     "Using a smaller RX standard ring. Only "
7278                                     "%d out of %d buffers were allocated "
7279                                     "successfully\n", i, tp->rx_pending);
7280                         if (i == 0)
7281                                 goto initfail;
7282                         tp->rx_pending = i;
7283                         break;
7284                 }
7285         }
7286
7287         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7288                 goto done;
7289
7290         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7291
7292         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7293                 goto done;
7294
7295         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7296                 struct tg3_rx_buffer_desc *rxd;
7297
7298                 rxd = &tpr->rx_jmb[i].std;
7299                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7300                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7301                                   RXD_FLAG_JUMBO;
7302                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7303                        (i << RXD_OPAQUE_INDEX_SHIFT));
7304         }
7305
7306         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7307                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7308                         netdev_warn(tp->dev,
7309                                     "Using a smaller RX jumbo ring. Only %d "
7310                                     "out of %d buffers were allocated "
7311                                     "successfully\n", i, tp->rx_jumbo_pending);
7312                         if (i == 0)
7313                                 goto initfail;
7314                         tp->rx_jumbo_pending = i;
7315                         break;
7316                 }
7317         }
7318
7319 done:
7320         return 0;
7321
7322 initfail:
7323         tg3_rx_prodring_free(tp, tpr);
7324         return -ENOMEM;
7325 }
7326
7327 static void tg3_rx_prodring_fini(struct tg3 *tp,
7328                                  struct tg3_rx_prodring_set *tpr)
7329 {
7330         kfree(tpr->rx_std_buffers);
7331         tpr->rx_std_buffers = NULL;
7332         kfree(tpr->rx_jmb_buffers);
7333         tpr->rx_jmb_buffers = NULL;
7334         if (tpr->rx_std) {
7335                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7336                                   tpr->rx_std, tpr->rx_std_mapping);
7337                 tpr->rx_std = NULL;
7338         }
7339         if (tpr->rx_jmb) {
7340                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7341                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7342                 tpr->rx_jmb = NULL;
7343         }
7344 }
7345
7346 static int tg3_rx_prodring_init(struct tg3 *tp,
7347                                 struct tg3_rx_prodring_set *tpr)
7348 {
7349         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7350                                       GFP_KERNEL);
7351         if (!tpr->rx_std_buffers)
7352                 return -ENOMEM;
7353
7354         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7355                                          TG3_RX_STD_RING_BYTES(tp),
7356                                          &tpr->rx_std_mapping,
7357                                          GFP_KERNEL);
7358         if (!tpr->rx_std)
7359                 goto err_out;
7360
7361         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7362                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7363                                               GFP_KERNEL);
7364                 if (!tpr->rx_jmb_buffers)
7365                         goto err_out;
7366
7367                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7368                                                  TG3_RX_JMB_RING_BYTES(tp),
7369                                                  &tpr->rx_jmb_mapping,
7370                                                  GFP_KERNEL);
7371                 if (!tpr->rx_jmb)
7372                         goto err_out;
7373         }
7374
7375         return 0;
7376
7377 err_out:
7378         tg3_rx_prodring_fini(tp, tpr);
7379         return -ENOMEM;
7380 }
7381
7382 /* Free up pending packets in all rx/tx rings.
7383  *
7384  * The chip has been shut down and the driver detached from
7385  * the networking, so no interrupts or new tx packets will
7386  * end up in the driver.  tp->{tx,}lock is not held and we are not
7387  * in an interrupt context and thus may sleep.
7388  */
7389 static void tg3_free_rings(struct tg3 *tp)
7390 {
7391         int i, j;
7392
7393         for (j = 0; j < tp->irq_cnt; j++) {
7394                 struct tg3_napi *tnapi = &tp->napi[j];
7395
7396                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7397
7398                 if (!tnapi->tx_buffers)
7399                         continue;
7400
7401                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7402                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7403
7404                         if (!skb)
7405                                 continue;
7406
7407                         tg3_tx_skb_unmap(tnapi, i,
7408                                          skb_shinfo(skb)->nr_frags - 1);
7409
7410                         dev_kfree_skb_any(skb);
7411                 }
7412         }
7413         netdev_reset_queue(tp->dev);
7414 }
7415
7416 /* Initialize tx/rx rings for packet processing.
7417  *
7418  * The chip has been shut down and the driver detached from
7419  * the networking, so no interrupts or new tx packets will
7420  * end up in the driver.  tp->{tx,}lock are held and thus
7421  * we may not sleep.
7422  */
7423 static int tg3_init_rings(struct tg3 *tp)
7424 {
7425         int i;
7426
7427         /* Free up all the SKBs. */
7428         tg3_free_rings(tp);
7429
7430         for (i = 0; i < tp->irq_cnt; i++) {
7431                 struct tg3_napi *tnapi = &tp->napi[i];
7432
7433                 tnapi->last_tag = 0;
7434                 tnapi->last_irq_tag = 0;
7435                 tnapi->hw_status->status = 0;
7436                 tnapi->hw_status->status_tag = 0;
7437                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7438
7439                 tnapi->tx_prod = 0;
7440                 tnapi->tx_cons = 0;
7441                 if (tnapi->tx_ring)
7442                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7443
7444                 tnapi->rx_rcb_ptr = 0;
7445                 if (tnapi->rx_rcb)
7446                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7447
7448                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7449                         tg3_free_rings(tp);
7450                         return -ENOMEM;
7451                 }
7452         }
7453
7454         return 0;
7455 }
7456
7457 /*
7458  * Must not be invoked with interrupt sources disabled and
7459  * the hardware shutdown down.
7460  */
7461 static void tg3_free_consistent(struct tg3 *tp)
7462 {
7463         int i;
7464
7465         for (i = 0; i < tp->irq_cnt; i++) {
7466                 struct tg3_napi *tnapi = &tp->napi[i];
7467
7468                 if (tnapi->tx_ring) {
7469                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7470                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7471                         tnapi->tx_ring = NULL;
7472                 }
7473
7474                 kfree(tnapi->tx_buffers);
7475                 tnapi->tx_buffers = NULL;
7476
7477                 if (tnapi->rx_rcb) {
7478                         dma_free_coherent(&tp->pdev->dev,
7479                                           TG3_RX_RCB_RING_BYTES(tp),
7480                                           tnapi->rx_rcb,
7481                                           tnapi->rx_rcb_mapping);
7482                         tnapi->rx_rcb = NULL;
7483                 }
7484
7485                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7486
7487                 if (tnapi->hw_status) {
7488                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7489                                           tnapi->hw_status,
7490                                           tnapi->status_mapping);
7491                         tnapi->hw_status = NULL;
7492                 }
7493         }
7494
7495         if (tp->hw_stats) {
7496                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7497                                   tp->hw_stats, tp->stats_mapping);
7498                 tp->hw_stats = NULL;
7499         }
7500 }
7501
7502 /*
7503  * Must not be invoked with interrupt sources disabled and
7504  * the hardware shutdown down.  Can sleep.
7505  */
7506 static int tg3_alloc_consistent(struct tg3 *tp)
7507 {
7508         int i;
7509
7510         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7511                                           sizeof(struct tg3_hw_stats),
7512                                           &tp->stats_mapping,
7513                                           GFP_KERNEL);
7514         if (!tp->hw_stats)
7515                 goto err_out;
7516
7517         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7518
7519         for (i = 0; i < tp->irq_cnt; i++) {
7520                 struct tg3_napi *tnapi = &tp->napi[i];
7521                 struct tg3_hw_status *sblk;
7522
7523                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7524                                                       TG3_HW_STATUS_SIZE,
7525                                                       &tnapi->status_mapping,
7526                                                       GFP_KERNEL);
7527                 if (!tnapi->hw_status)
7528                         goto err_out;
7529
7530                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7531                 sblk = tnapi->hw_status;
7532
7533                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7534                         goto err_out;
7535
7536                 /* If multivector TSS is enabled, vector 0 does not handle
7537                  * tx interrupts.  Don't allocate any resources for it.
7538                  */
7539                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7540                     (i && tg3_flag(tp, ENABLE_TSS))) {
7541                         tnapi->tx_buffers = kzalloc(
7542                                                sizeof(struct tg3_tx_ring_info) *
7543                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7544                         if (!tnapi->tx_buffers)
7545                                 goto err_out;
7546
7547                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7548                                                             TG3_TX_RING_BYTES,
7549                                                         &tnapi->tx_desc_mapping,
7550                                                             GFP_KERNEL);
7551                         if (!tnapi->tx_ring)
7552                                 goto err_out;
7553                 }
7554
7555                 /*
7556                  * When RSS is enabled, the status block format changes
7557                  * slightly.  The "rx_jumbo_consumer", "reserved",
7558                  * and "rx_mini_consumer" members get mapped to the
7559                  * other three rx return ring producer indexes.
7560                  */
7561                 switch (i) {
7562                 default:
7563                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7564                         break;
7565                 case 2:
7566                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7567                         break;
7568                 case 3:
7569                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7570                         break;
7571                 case 4:
7572                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7573                         break;
7574                 }
7575
7576                 /*
7577                  * If multivector RSS is enabled, vector 0 does not handle
7578                  * rx or tx interrupts.  Don't allocate any resources for it.
7579                  */
7580                 if (!i && tg3_flag(tp, ENABLE_RSS))
7581                         continue;
7582
7583                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7584                                                    TG3_RX_RCB_RING_BYTES(tp),
7585                                                    &tnapi->rx_rcb_mapping,
7586                                                    GFP_KERNEL);
7587                 if (!tnapi->rx_rcb)
7588                         goto err_out;
7589
7590                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7591         }
7592
7593         return 0;
7594
7595 err_out:
7596         tg3_free_consistent(tp);
7597         return -ENOMEM;
7598 }
7599
7600 #define MAX_WAIT_CNT 1000
7601
7602 /* To stop a block, clear the enable bit and poll till it
7603  * clears.  tp->lock is held.
7604  */
7605 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7606 {
7607         unsigned int i;
7608         u32 val;
7609
7610         if (tg3_flag(tp, 5705_PLUS)) {
7611                 switch (ofs) {
7612                 case RCVLSC_MODE:
7613                 case DMAC_MODE:
7614                 case MBFREE_MODE:
7615                 case BUFMGR_MODE:
7616                 case MEMARB_MODE:
7617                         /* We can't enable/disable these bits of the
7618                          * 5705/5750, just say success.
7619                          */
7620                         return 0;
7621
7622                 default:
7623                         break;
7624                 }
7625         }
7626
7627         val = tr32(ofs);
7628         val &= ~enable_bit;
7629         tw32_f(ofs, val);
7630
7631         for (i = 0; i < MAX_WAIT_CNT; i++) {
7632                 udelay(100);
7633                 val = tr32(ofs);
7634                 if ((val & enable_bit) == 0)
7635                         break;
7636         }
7637
7638         if (i == MAX_WAIT_CNT && !silent) {
7639                 dev_err(&tp->pdev->dev,
7640                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7641                         ofs, enable_bit);
7642                 return -ENODEV;
7643         }
7644
7645         return 0;
7646 }
7647
7648 /* tp->lock is held. */
7649 static int tg3_abort_hw(struct tg3 *tp, int silent)
7650 {
7651         int i, err;
7652
7653         tg3_disable_ints(tp);
7654
7655         tp->rx_mode &= ~RX_MODE_ENABLE;
7656         tw32_f(MAC_RX_MODE, tp->rx_mode);
7657         udelay(10);
7658
7659         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7660         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7661         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7662         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7663         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7664         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7665
7666         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7667         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7668         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7669         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7670         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7671         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7672         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7673
7674         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7675         tw32_f(MAC_MODE, tp->mac_mode);
7676         udelay(40);
7677
7678         tp->tx_mode &= ~TX_MODE_ENABLE;
7679         tw32_f(MAC_TX_MODE, tp->tx_mode);
7680
7681         for (i = 0; i < MAX_WAIT_CNT; i++) {
7682                 udelay(100);
7683                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7684                         break;
7685         }
7686         if (i >= MAX_WAIT_CNT) {
7687                 dev_err(&tp->pdev->dev,
7688                         "%s timed out, TX_MODE_ENABLE will not clear "
7689                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7690                 err |= -ENODEV;
7691         }
7692
7693         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7694         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7695         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7696
7697         tw32(FTQ_RESET, 0xffffffff);
7698         tw32(FTQ_RESET, 0x00000000);
7699
7700         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7701         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7702
7703         for (i = 0; i < tp->irq_cnt; i++) {
7704                 struct tg3_napi *tnapi = &tp->napi[i];
7705                 if (tnapi->hw_status)
7706                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7707         }
7708
7709         return err;
7710 }
7711
7712 /* Save PCI command register before chip reset */
7713 static void tg3_save_pci_state(struct tg3 *tp)
7714 {
7715         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7716 }
7717
7718 /* Restore PCI state after chip reset */
7719 static void tg3_restore_pci_state(struct tg3 *tp)
7720 {
7721         u32 val;
7722
7723         /* Re-enable indirect register accesses. */
7724         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7725                                tp->misc_host_ctrl);
7726
7727         /* Set MAX PCI retry to zero. */
7728         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7729         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7730             tg3_flag(tp, PCIX_MODE))
7731                 val |= PCISTATE_RETRY_SAME_DMA;
7732         /* Allow reads and writes to the APE register and memory space. */
7733         if (tg3_flag(tp, ENABLE_APE))
7734                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7735                        PCISTATE_ALLOW_APE_SHMEM_WR |
7736                        PCISTATE_ALLOW_APE_PSPACE_WR;
7737         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7738
7739         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7740
7741         if (!tg3_flag(tp, PCI_EXPRESS)) {
7742                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7743                                       tp->pci_cacheline_sz);
7744                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7745                                       tp->pci_lat_timer);
7746         }
7747
7748         /* Make sure PCI-X relaxed ordering bit is clear. */
7749         if (tg3_flag(tp, PCIX_MODE)) {
7750                 u16 pcix_cmd;
7751
7752                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7753                                      &pcix_cmd);
7754                 pcix_cmd &= ~PCI_X_CMD_ERO;
7755                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7756                                       pcix_cmd);
7757         }
7758
7759         if (tg3_flag(tp, 5780_CLASS)) {
7760
7761                 /* Chip reset on 5780 will reset MSI enable bit,
7762                  * so need to restore it.
7763                  */
7764                 if (tg3_flag(tp, USING_MSI)) {
7765                         u16 ctrl;
7766
7767                         pci_read_config_word(tp->pdev,
7768                                              tp->msi_cap + PCI_MSI_FLAGS,
7769                                              &ctrl);
7770                         pci_write_config_word(tp->pdev,
7771                                               tp->msi_cap + PCI_MSI_FLAGS,
7772                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7773                         val = tr32(MSGINT_MODE);
7774                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7775                 }
7776         }
7777 }
7778
7779 /* tp->lock is held. */
7780 static int tg3_chip_reset(struct tg3 *tp)
7781 {
7782         u32 val;
7783         void (*write_op)(struct tg3 *, u32, u32);
7784         int i, err;
7785
7786         tg3_nvram_lock(tp);
7787
7788         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7789
7790         /* No matching tg3_nvram_unlock() after this because
7791          * chip reset below will undo the nvram lock.
7792          */
7793         tp->nvram_lock_cnt = 0;
7794
7795         /* GRC_MISC_CFG core clock reset will clear the memory
7796          * enable bit in PCI register 4 and the MSI enable bit
7797          * on some chips, so we save relevant registers here.
7798          */
7799         tg3_save_pci_state(tp);
7800
7801         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7802             tg3_flag(tp, 5755_PLUS))
7803                 tw32(GRC_FASTBOOT_PC, 0);
7804
7805         /*
7806          * We must avoid the readl() that normally takes place.
7807          * It locks machines, causes machine checks, and other
7808          * fun things.  So, temporarily disable the 5701
7809          * hardware workaround, while we do the reset.
7810          */
7811         write_op = tp->write32;
7812         if (write_op == tg3_write_flush_reg32)
7813                 tp->write32 = tg3_write32;
7814
7815         /* Prevent the irq handler from reading or writing PCI registers
7816          * during chip reset when the memory enable bit in the PCI command
7817          * register may be cleared.  The chip does not generate interrupt
7818          * at this time, but the irq handler may still be called due to irq
7819          * sharing or irqpoll.
7820          */
7821         tg3_flag_set(tp, CHIP_RESETTING);
7822         for (i = 0; i < tp->irq_cnt; i++) {
7823                 struct tg3_napi *tnapi = &tp->napi[i];
7824                 if (tnapi->hw_status) {
7825                         tnapi->hw_status->status = 0;
7826                         tnapi->hw_status->status_tag = 0;
7827                 }
7828                 tnapi->last_tag = 0;
7829                 tnapi->last_irq_tag = 0;
7830         }
7831         smp_mb();
7832
7833         for (i = 0; i < tp->irq_cnt; i++)
7834                 synchronize_irq(tp->napi[i].irq_vec);
7835
7836         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7837                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7838                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7839         }
7840
7841         /* do the reset */
7842         val = GRC_MISC_CFG_CORECLK_RESET;
7843
7844         if (tg3_flag(tp, PCI_EXPRESS)) {
7845                 /* Force PCIe 1.0a mode */
7846                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7847                     !tg3_flag(tp, 57765_PLUS) &&
7848                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7849                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7850                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7851
7852                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7853                         tw32(GRC_MISC_CFG, (1 << 29));
7854                         val |= (1 << 29);
7855                 }
7856         }
7857
7858         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7859                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7860                 tw32(GRC_VCPU_EXT_CTRL,
7861                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7862         }
7863
7864         /* Manage gphy power for all CPMU absent PCIe devices. */
7865         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7866                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7867
7868         tw32(GRC_MISC_CFG, val);
7869
7870         /* restore 5701 hardware bug workaround write method */
7871         tp->write32 = write_op;
7872
7873         /* Unfortunately, we have to delay before the PCI read back.
7874          * Some 575X chips even will not respond to a PCI cfg access
7875          * when the reset command is given to the chip.
7876          *
7877          * How do these hardware designers expect things to work
7878          * properly if the PCI write is posted for a long period
7879          * of time?  It is always necessary to have some method by
7880          * which a register read back can occur to push the write
7881          * out which does the reset.
7882          *
7883          * For most tg3 variants the trick below was working.
7884          * Ho hum...
7885          */
7886         udelay(120);
7887
7888         /* Flush PCI posted writes.  The normal MMIO registers
7889          * are inaccessible at this time so this is the only
7890          * way to make this reliably (actually, this is no longer
7891          * the case, see above).  I tried to use indirect
7892          * register read/write but this upset some 5701 variants.
7893          */
7894         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7895
7896         udelay(120);
7897
7898         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7899                 u16 val16;
7900
7901                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7902                         int i;
7903                         u32 cfg_val;
7904
7905                         /* Wait for link training to complete.  */
7906                         for (i = 0; i < 5000; i++)
7907                                 udelay(100);
7908
7909                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7910                         pci_write_config_dword(tp->pdev, 0xc4,
7911                                                cfg_val | (1 << 15));
7912                 }
7913
7914                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7915                 pci_read_config_word(tp->pdev,
7916                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7917                                      &val16);
7918                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7919                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7920                 /*
7921                  * Older PCIe devices only support the 128 byte
7922                  * MPS setting.  Enforce the restriction.
7923                  */
7924                 if (!tg3_flag(tp, CPMU_PRESENT))
7925                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7926                 pci_write_config_word(tp->pdev,
7927                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7928                                       val16);
7929
7930                 /* Clear error status */
7931                 pci_write_config_word(tp->pdev,
7932                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7933                                       PCI_EXP_DEVSTA_CED |
7934                                       PCI_EXP_DEVSTA_NFED |
7935                                       PCI_EXP_DEVSTA_FED |
7936                                       PCI_EXP_DEVSTA_URD);
7937         }
7938
7939         tg3_restore_pci_state(tp);
7940
7941         tg3_flag_clear(tp, CHIP_RESETTING);
7942         tg3_flag_clear(tp, ERROR_PROCESSED);
7943
7944         val = 0;
7945         if (tg3_flag(tp, 5780_CLASS))
7946                 val = tr32(MEMARB_MODE);
7947         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7948
7949         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7950                 tg3_stop_fw(tp);
7951                 tw32(0x5000, 0x400);
7952         }
7953
7954         tw32(GRC_MODE, tp->grc_mode);
7955
7956         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7957                 val = tr32(0xc4);
7958
7959                 tw32(0xc4, val | (1 << 15));
7960         }
7961
7962         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7963             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7964                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7965                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7966                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7967                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7968         }
7969
7970         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7971                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7972                 val = tp->mac_mode;
7973         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7974                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7975                 val = tp->mac_mode;
7976         } else
7977                 val = 0;
7978
7979         tw32_f(MAC_MODE, val);
7980         udelay(40);
7981
7982         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7983
7984         err = tg3_poll_fw(tp);
7985         if (err)
7986                 return err;
7987
7988         tg3_mdio_start(tp);
7989
7990         if (tg3_flag(tp, PCI_EXPRESS) &&
7991             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7992             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7993             !tg3_flag(tp, 57765_PLUS)) {
7994                 val = tr32(0x7c00);
7995
7996                 tw32(0x7c00, val | (1 << 25));
7997         }
7998
7999         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8000                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8001                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8002         }
8003
8004         /* Reprobe ASF enable state.  */
8005         tg3_flag_clear(tp, ENABLE_ASF);
8006         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8007         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8008         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8009                 u32 nic_cfg;
8010
8011                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8012                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8013                         tg3_flag_set(tp, ENABLE_ASF);
8014                         tp->last_event_jiffies = jiffies;
8015                         if (tg3_flag(tp, 5750_PLUS))
8016                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8017                 }
8018         }
8019
8020         return 0;
8021 }
8022
8023 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
8024                                                  struct rtnl_link_stats64 *);
8025 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
8026                                                 struct tg3_ethtool_stats *);
8027
8028 /* tp->lock is held. */
8029 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8030 {
8031         int err;
8032
8033         tg3_stop_fw(tp);
8034
8035         tg3_write_sig_pre_reset(tp, kind);
8036
8037         tg3_abort_hw(tp, silent);
8038         err = tg3_chip_reset(tp);
8039
8040         __tg3_set_mac_addr(tp, 0);
8041
8042         tg3_write_sig_legacy(tp, kind);
8043         tg3_write_sig_post_reset(tp, kind);
8044
8045         if (tp->hw_stats) {
8046                 /* Save the stats across chip resets... */
8047                 tg3_get_stats64(tp->dev, &tp->net_stats_prev),
8048                 tg3_get_estats(tp, &tp->estats_prev);
8049
8050                 /* And make sure the next sample is new data */
8051                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8052         }
8053
8054         if (err)
8055                 return err;
8056
8057         return 0;
8058 }
8059
8060 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8061 {
8062         struct tg3 *tp = netdev_priv(dev);
8063         struct sockaddr *addr = p;
8064         int err = 0, skip_mac_1 = 0;
8065
8066         if (!is_valid_ether_addr(addr->sa_data))
8067                 return -EINVAL;
8068
8069         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8070
8071         if (!netif_running(dev))
8072                 return 0;
8073
8074         if (tg3_flag(tp, ENABLE_ASF)) {
8075                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8076
8077                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8078                 addr0_low = tr32(MAC_ADDR_0_LOW);
8079                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8080                 addr1_low = tr32(MAC_ADDR_1_LOW);
8081
8082                 /* Skip MAC addr 1 if ASF is using it. */
8083                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8084                     !(addr1_high == 0 && addr1_low == 0))
8085                         skip_mac_1 = 1;
8086         }
8087         spin_lock_bh(&tp->lock);
8088         __tg3_set_mac_addr(tp, skip_mac_1);
8089         spin_unlock_bh(&tp->lock);
8090
8091         return err;
8092 }
8093
8094 /* tp->lock is held. */
8095 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8096                            dma_addr_t mapping, u32 maxlen_flags,
8097                            u32 nic_addr)
8098 {
8099         tg3_write_mem(tp,
8100                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8101                       ((u64) mapping >> 32));
8102         tg3_write_mem(tp,
8103                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8104                       ((u64) mapping & 0xffffffff));
8105         tg3_write_mem(tp,
8106                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8107                        maxlen_flags);
8108
8109         if (!tg3_flag(tp, 5705_PLUS))
8110                 tg3_write_mem(tp,
8111                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8112                               nic_addr);
8113 }
8114
8115 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8116 {
8117         int i;
8118
8119         if (!tg3_flag(tp, ENABLE_TSS)) {
8120                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8121                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8122                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8123         } else {
8124                 tw32(HOSTCC_TXCOL_TICKS, 0);
8125                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8126                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8127         }
8128
8129         if (!tg3_flag(tp, ENABLE_RSS)) {
8130                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8131                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8132                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8133         } else {
8134                 tw32(HOSTCC_RXCOL_TICKS, 0);
8135                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8136                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8137         }
8138
8139         if (!tg3_flag(tp, 5705_PLUS)) {
8140                 u32 val = ec->stats_block_coalesce_usecs;
8141
8142                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8143                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8144
8145                 if (!netif_carrier_ok(tp->dev))
8146                         val = 0;
8147
8148                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8149         }
8150
8151         for (i = 0; i < tp->irq_cnt - 1; i++) {
8152                 u32 reg;
8153
8154                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8155                 tw32(reg, ec->rx_coalesce_usecs);
8156                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8157                 tw32(reg, ec->rx_max_coalesced_frames);
8158                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8159                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8160
8161                 if (tg3_flag(tp, ENABLE_TSS)) {
8162                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8163                         tw32(reg, ec->tx_coalesce_usecs);
8164                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8165                         tw32(reg, ec->tx_max_coalesced_frames);
8166                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8167                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8168                 }
8169         }
8170
8171         for (; i < tp->irq_max - 1; i++) {
8172                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8173                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8174                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8175
8176                 if (tg3_flag(tp, ENABLE_TSS)) {
8177                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8178                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8179                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8180                 }
8181         }
8182 }
8183
8184 /* tp->lock is held. */
8185 static void tg3_rings_reset(struct tg3 *tp)
8186 {
8187         int i;
8188         u32 stblk, txrcb, rxrcb, limit;
8189         struct tg3_napi *tnapi = &tp->napi[0];
8190
8191         /* Disable all transmit rings but the first. */
8192         if (!tg3_flag(tp, 5705_PLUS))
8193                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8194         else if (tg3_flag(tp, 5717_PLUS))
8195                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8196         else if (tg3_flag(tp, 57765_CLASS))
8197                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8198         else
8199                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8200
8201         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8202              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8203                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8204                               BDINFO_FLAGS_DISABLED);
8205
8206
8207         /* Disable all receive return rings but the first. */
8208         if (tg3_flag(tp, 5717_PLUS))
8209                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8210         else if (!tg3_flag(tp, 5705_PLUS))
8211                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8212         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8213                  tg3_flag(tp, 57765_CLASS))
8214                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8215         else
8216                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8217
8218         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8219              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8220                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8221                               BDINFO_FLAGS_DISABLED);
8222
8223         /* Disable interrupts */
8224         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8225         tp->napi[0].chk_msi_cnt = 0;
8226         tp->napi[0].last_rx_cons = 0;
8227         tp->napi[0].last_tx_cons = 0;
8228
8229         /* Zero mailbox registers. */
8230         if (tg3_flag(tp, SUPPORT_MSIX)) {
8231                 for (i = 1; i < tp->irq_max; i++) {
8232                         tp->napi[i].tx_prod = 0;
8233                         tp->napi[i].tx_cons = 0;
8234                         if (tg3_flag(tp, ENABLE_TSS))
8235                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8236                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8237                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8238                         tp->napi[i].chk_msi_cnt = 0;
8239                         tp->napi[i].last_rx_cons = 0;
8240                         tp->napi[i].last_tx_cons = 0;
8241                 }
8242                 if (!tg3_flag(tp, ENABLE_TSS))
8243                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8244         } else {
8245                 tp->napi[0].tx_prod = 0;
8246                 tp->napi[0].tx_cons = 0;
8247                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8248                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8249         }
8250
8251         /* Make sure the NIC-based send BD rings are disabled. */
8252         if (!tg3_flag(tp, 5705_PLUS)) {
8253                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8254                 for (i = 0; i < 16; i++)
8255                         tw32_tx_mbox(mbox + i * 8, 0);
8256         }
8257
8258         txrcb = NIC_SRAM_SEND_RCB;
8259         rxrcb = NIC_SRAM_RCV_RET_RCB;
8260
8261         /* Clear status block in ram. */
8262         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8263
8264         /* Set status block DMA address */
8265         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8266              ((u64) tnapi->status_mapping >> 32));
8267         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8268              ((u64) tnapi->status_mapping & 0xffffffff));
8269
8270         if (tnapi->tx_ring) {
8271                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8272                                (TG3_TX_RING_SIZE <<
8273                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8274                                NIC_SRAM_TX_BUFFER_DESC);
8275                 txrcb += TG3_BDINFO_SIZE;
8276         }
8277
8278         if (tnapi->rx_rcb) {
8279                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8280                                (tp->rx_ret_ring_mask + 1) <<
8281                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8282                 rxrcb += TG3_BDINFO_SIZE;
8283         }
8284
8285         stblk = HOSTCC_STATBLCK_RING1;
8286
8287         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8288                 u64 mapping = (u64)tnapi->status_mapping;
8289                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8290                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8291
8292                 /* Clear status block in ram. */
8293                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8294
8295                 if (tnapi->tx_ring) {
8296                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8297                                        (TG3_TX_RING_SIZE <<
8298                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8299                                        NIC_SRAM_TX_BUFFER_DESC);
8300                         txrcb += TG3_BDINFO_SIZE;
8301                 }
8302
8303                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8304                                ((tp->rx_ret_ring_mask + 1) <<
8305                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8306
8307                 stblk += 8;
8308                 rxrcb += TG3_BDINFO_SIZE;
8309         }
8310 }
8311
8312 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8313 {
8314         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8315
8316         if (!tg3_flag(tp, 5750_PLUS) ||
8317             tg3_flag(tp, 5780_CLASS) ||
8318             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8319             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8320             tg3_flag(tp, 57765_PLUS))
8321                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8322         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8323                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8324                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8325         else
8326                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8327
8328         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8329         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8330
8331         val = min(nic_rep_thresh, host_rep_thresh);
8332         tw32(RCVBDI_STD_THRESH, val);
8333
8334         if (tg3_flag(tp, 57765_PLUS))
8335                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8336
8337         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8338                 return;
8339
8340         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8341
8342         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8343
8344         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8345         tw32(RCVBDI_JUMBO_THRESH, val);
8346
8347         if (tg3_flag(tp, 57765_PLUS))
8348                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8349 }
8350
8351 static inline u32 calc_crc(unsigned char *buf, int len)
8352 {
8353         u32 reg;
8354         u32 tmp;
8355         int j, k;
8356
8357         reg = 0xffffffff;
8358
8359         for (j = 0; j < len; j++) {
8360                 reg ^= buf[j];
8361
8362                 for (k = 0; k < 8; k++) {
8363                         tmp = reg & 0x01;
8364
8365                         reg >>= 1;
8366
8367                         if (tmp)
8368                                 reg ^= 0xedb88320;
8369                 }
8370         }
8371
8372         return ~reg;
8373 }
8374
8375 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8376 {
8377         /* accept or reject all multicast frames */
8378         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8379         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8380         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8381         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8382 }
8383
8384 static void __tg3_set_rx_mode(struct net_device *dev)
8385 {
8386         struct tg3 *tp = netdev_priv(dev);
8387         u32 rx_mode;
8388
8389         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8390                                   RX_MODE_KEEP_VLAN_TAG);
8391
8392 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8393         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8394          * flag clear.
8395          */
8396         if (!tg3_flag(tp, ENABLE_ASF))
8397                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8398 #endif
8399
8400         if (dev->flags & IFF_PROMISC) {
8401                 /* Promiscuous mode. */
8402                 rx_mode |= RX_MODE_PROMISC;
8403         } else if (dev->flags & IFF_ALLMULTI) {
8404                 /* Accept all multicast. */
8405                 tg3_set_multi(tp, 1);
8406         } else if (netdev_mc_empty(dev)) {
8407                 /* Reject all multicast. */
8408                 tg3_set_multi(tp, 0);
8409         } else {
8410                 /* Accept one or more multicast(s). */
8411                 struct netdev_hw_addr *ha;
8412                 u32 mc_filter[4] = { 0, };
8413                 u32 regidx;
8414                 u32 bit;
8415                 u32 crc;
8416
8417                 netdev_for_each_mc_addr(ha, dev) {
8418                         crc = calc_crc(ha->addr, ETH_ALEN);
8419                         bit = ~crc & 0x7f;
8420                         regidx = (bit & 0x60) >> 5;
8421                         bit &= 0x1f;
8422                         mc_filter[regidx] |= (1 << bit);
8423                 }
8424
8425                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8426                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8427                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8428                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8429         }
8430
8431         if (rx_mode != tp->rx_mode) {
8432                 tp->rx_mode = rx_mode;
8433                 tw32_f(MAC_RX_MODE, rx_mode);
8434                 udelay(10);
8435         }
8436 }
8437
8438 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8439 {
8440         int i;
8441
8442         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8443                 tp->rss_ind_tbl[i] =
8444                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8445 }
8446
8447 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8448 {
8449         int i;
8450
8451         if (!tg3_flag(tp, SUPPORT_MSIX))
8452                 return;
8453
8454         if (tp->irq_cnt <= 2) {
8455                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8456                 return;
8457         }
8458
8459         /* Validate table against current IRQ count */
8460         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8461                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8462                         break;
8463         }
8464
8465         if (i != TG3_RSS_INDIR_TBL_SIZE)
8466                 tg3_rss_init_dflt_indir_tbl(tp);
8467 }
8468
8469 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8470 {
8471         int i = 0;
8472         u32 reg = MAC_RSS_INDIR_TBL_0;
8473
8474         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8475                 u32 val = tp->rss_ind_tbl[i];
8476                 i++;
8477                 for (; i % 8; i++) {
8478                         val <<= 4;
8479                         val |= tp->rss_ind_tbl[i];
8480                 }
8481                 tw32(reg, val);
8482                 reg += 4;
8483         }
8484 }
8485
8486 /* tp->lock is held. */
8487 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8488 {
8489         u32 val, rdmac_mode;
8490         int i, err, limit;
8491         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8492
8493         tg3_disable_ints(tp);
8494
8495         tg3_stop_fw(tp);
8496
8497         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8498
8499         if (tg3_flag(tp, INIT_COMPLETE))
8500                 tg3_abort_hw(tp, 1);
8501
8502         /* Enable MAC control of LPI */
8503         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8504                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8505                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8506                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8507
8508                 tw32_f(TG3_CPMU_EEE_CTRL,
8509                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8510
8511                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8512                       TG3_CPMU_EEEMD_LPI_IN_TX |
8513                       TG3_CPMU_EEEMD_LPI_IN_RX |
8514                       TG3_CPMU_EEEMD_EEE_ENABLE;
8515
8516                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8517                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8518
8519                 if (tg3_flag(tp, ENABLE_APE))
8520                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8521
8522                 tw32_f(TG3_CPMU_EEE_MODE, val);
8523
8524                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8525                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8526                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8527
8528                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8529                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8530                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8531         }
8532
8533         if (reset_phy)
8534                 tg3_phy_reset(tp);
8535
8536         err = tg3_chip_reset(tp);
8537         if (err)
8538                 return err;
8539
8540         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8541
8542         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8543                 val = tr32(TG3_CPMU_CTRL);
8544                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8545                 tw32(TG3_CPMU_CTRL, val);
8546
8547                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8548                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8549                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8550                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8551
8552                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8553                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8554                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8555                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8556
8557                 val = tr32(TG3_CPMU_HST_ACC);
8558                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8559                 val |= CPMU_HST_ACC_MACCLK_6_25;
8560                 tw32(TG3_CPMU_HST_ACC, val);
8561         }
8562
8563         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8564                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8565                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8566                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8567                 tw32(PCIE_PWR_MGMT_THRESH, val);
8568
8569                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8570                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8571
8572                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8573
8574                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8575                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8576         }
8577
8578         if (tg3_flag(tp, L1PLLPD_EN)) {
8579                 u32 grc_mode = tr32(GRC_MODE);
8580
8581                 /* Access the lower 1K of PL PCIE block registers. */
8582                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8583                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8584
8585                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8586                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8587                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8588
8589                 tw32(GRC_MODE, grc_mode);
8590         }
8591
8592         if (tg3_flag(tp, 57765_CLASS)) {
8593                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8594                         u32 grc_mode = tr32(GRC_MODE);
8595
8596                         /* Access the lower 1K of PL PCIE block registers. */
8597                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8598                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8599
8600                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8601                                    TG3_PCIE_PL_LO_PHYCTL5);
8602                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8603                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8604
8605                         tw32(GRC_MODE, grc_mode);
8606                 }
8607
8608                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8609                         u32 grc_mode = tr32(GRC_MODE);
8610
8611                         /* Access the lower 1K of DL PCIE block registers. */
8612                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8613                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8614
8615                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8616                                    TG3_PCIE_DL_LO_FTSMAX);
8617                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8618                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8619                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8620
8621                         tw32(GRC_MODE, grc_mode);
8622                 }
8623
8624                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8625                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8626                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8627                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8628         }
8629
8630         /* This works around an issue with Athlon chipsets on
8631          * B3 tigon3 silicon.  This bit has no effect on any
8632          * other revision.  But do not set this on PCI Express
8633          * chips and don't even touch the clocks if the CPMU is present.
8634          */
8635         if (!tg3_flag(tp, CPMU_PRESENT)) {
8636                 if (!tg3_flag(tp, PCI_EXPRESS))
8637                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8638                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8639         }
8640
8641         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8642             tg3_flag(tp, PCIX_MODE)) {
8643                 val = tr32(TG3PCI_PCISTATE);
8644                 val |= PCISTATE_RETRY_SAME_DMA;
8645                 tw32(TG3PCI_PCISTATE, val);
8646         }
8647
8648         if (tg3_flag(tp, ENABLE_APE)) {
8649                 /* Allow reads and writes to the
8650                  * APE register and memory space.
8651                  */
8652                 val = tr32(TG3PCI_PCISTATE);
8653                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8654                        PCISTATE_ALLOW_APE_SHMEM_WR |
8655                        PCISTATE_ALLOW_APE_PSPACE_WR;
8656                 tw32(TG3PCI_PCISTATE, val);
8657         }
8658
8659         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8660                 /* Enable some hw fixes.  */
8661                 val = tr32(TG3PCI_MSI_DATA);
8662                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8663                 tw32(TG3PCI_MSI_DATA, val);
8664         }
8665
8666         /* Descriptor ring init may make accesses to the
8667          * NIC SRAM area to setup the TX descriptors, so we
8668          * can only do this after the hardware has been
8669          * successfully reset.
8670          */
8671         err = tg3_init_rings(tp);
8672         if (err)
8673                 return err;
8674
8675         if (tg3_flag(tp, 57765_PLUS)) {
8676                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8677                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8678                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8679                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8680                 if (!tg3_flag(tp, 57765_CLASS) &&
8681                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8682                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8683                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8684         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8685                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8686                 /* This value is determined during the probe time DMA
8687                  * engine test, tg3_test_dma.
8688                  */
8689                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8690         }
8691
8692         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8693                           GRC_MODE_4X_NIC_SEND_RINGS |
8694                           GRC_MODE_NO_TX_PHDR_CSUM |
8695                           GRC_MODE_NO_RX_PHDR_CSUM);
8696         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8697
8698         /* Pseudo-header checksum is done by hardware logic and not
8699          * the offload processers, so make the chip do the pseudo-
8700          * header checksums on receive.  For transmit it is more
8701          * convenient to do the pseudo-header checksum in software
8702          * as Linux does that on transmit for us in all cases.
8703          */
8704         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8705
8706         tw32(GRC_MODE,
8707              tp->grc_mode |
8708              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8709
8710         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8711         val = tr32(GRC_MISC_CFG);
8712         val &= ~0xff;
8713         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8714         tw32(GRC_MISC_CFG, val);
8715
8716         /* Initialize MBUF/DESC pool. */
8717         if (tg3_flag(tp, 5750_PLUS)) {
8718                 /* Do nothing.  */
8719         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8720                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8721                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8722                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8723                 else
8724                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8725                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8726                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8727         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8728                 int fw_len;
8729
8730                 fw_len = tp->fw_len;
8731                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8732                 tw32(BUFMGR_MB_POOL_ADDR,
8733                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8734                 tw32(BUFMGR_MB_POOL_SIZE,
8735                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8736         }
8737
8738         if (tp->dev->mtu <= ETH_DATA_LEN) {
8739                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8740                      tp->bufmgr_config.mbuf_read_dma_low_water);
8741                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8742                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8743                 tw32(BUFMGR_MB_HIGH_WATER,
8744                      tp->bufmgr_config.mbuf_high_water);
8745         } else {
8746                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8747                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8748                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8749                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8750                 tw32(BUFMGR_MB_HIGH_WATER,
8751                      tp->bufmgr_config.mbuf_high_water_jumbo);
8752         }
8753         tw32(BUFMGR_DMA_LOW_WATER,
8754              tp->bufmgr_config.dma_low_water);
8755         tw32(BUFMGR_DMA_HIGH_WATER,
8756              tp->bufmgr_config.dma_high_water);
8757
8758         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8759         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8760                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8761         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8762             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8763             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8764                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8765         tw32(BUFMGR_MODE, val);
8766         for (i = 0; i < 2000; i++) {
8767                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8768                         break;
8769                 udelay(10);
8770         }
8771         if (i >= 2000) {
8772                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8773                 return -ENODEV;
8774         }
8775
8776         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8777                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8778
8779         tg3_setup_rxbd_thresholds(tp);
8780
8781         /* Initialize TG3_BDINFO's at:
8782          *  RCVDBDI_STD_BD:     standard eth size rx ring
8783          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8784          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8785          *
8786          * like so:
8787          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8788          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8789          *                              ring attribute flags
8790          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8791          *
8792          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8793          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8794          *
8795          * The size of each ring is fixed in the firmware, but the location is
8796          * configurable.
8797          */
8798         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8799              ((u64) tpr->rx_std_mapping >> 32));
8800         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8801              ((u64) tpr->rx_std_mapping & 0xffffffff));
8802         if (!tg3_flag(tp, 5717_PLUS))
8803                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8804                      NIC_SRAM_RX_BUFFER_DESC);
8805
8806         /* Disable the mini ring */
8807         if (!tg3_flag(tp, 5705_PLUS))
8808                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8809                      BDINFO_FLAGS_DISABLED);
8810
8811         /* Program the jumbo buffer descriptor ring control
8812          * blocks on those devices that have them.
8813          */
8814         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8815             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8816
8817                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8818                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8819                              ((u64) tpr->rx_jmb_mapping >> 32));
8820                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8821                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8822                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8823                               BDINFO_FLAGS_MAXLEN_SHIFT;
8824                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8825                              val | BDINFO_FLAGS_USE_EXT_RECV);
8826                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8827                             tg3_flag(tp, 57765_CLASS))
8828                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8829                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8830                 } else {
8831                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8832                              BDINFO_FLAGS_DISABLED);
8833                 }
8834
8835                 if (tg3_flag(tp, 57765_PLUS)) {
8836                         val = TG3_RX_STD_RING_SIZE(tp);
8837                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8838                         val |= (TG3_RX_STD_DMA_SZ << 2);
8839                 } else
8840                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8841         } else
8842                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8843
8844         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8845
8846         tpr->rx_std_prod_idx = tp->rx_pending;
8847         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8848
8849         tpr->rx_jmb_prod_idx =
8850                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8851         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8852
8853         tg3_rings_reset(tp);
8854
8855         /* Initialize MAC address and backoff seed. */
8856         __tg3_set_mac_addr(tp, 0);
8857
8858         /* MTU + ethernet header + FCS + optional VLAN tag */
8859         tw32(MAC_RX_MTU_SIZE,
8860              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8861
8862         /* The slot time is changed by tg3_setup_phy if we
8863          * run at gigabit with half duplex.
8864          */
8865         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8866               (6 << TX_LENGTHS_IPG_SHIFT) |
8867               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8868
8869         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8870                 val |= tr32(MAC_TX_LENGTHS) &
8871                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8872                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8873
8874         tw32(MAC_TX_LENGTHS, val);
8875
8876         /* Receive rules. */
8877         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8878         tw32(RCVLPC_CONFIG, 0x0181);
8879
8880         /* Calculate RDMAC_MODE setting early, we need it to determine
8881          * the RCVLPC_STATE_ENABLE mask.
8882          */
8883         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8884                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8885                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8886                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8887                       RDMAC_MODE_LNGREAD_ENAB);
8888
8889         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8890                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8891
8892         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8893             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8894             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8895                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8896                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8897                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8898
8899         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8900             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8901                 if (tg3_flag(tp, TSO_CAPABLE) &&
8902                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8903                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8904                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8905                            !tg3_flag(tp, IS_5788)) {
8906                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8907                 }
8908         }
8909
8910         if (tg3_flag(tp, PCI_EXPRESS))
8911                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8912
8913         if (tg3_flag(tp, HW_TSO_1) ||
8914             tg3_flag(tp, HW_TSO_2) ||
8915             tg3_flag(tp, HW_TSO_3))
8916                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8917
8918         if (tg3_flag(tp, 57765_PLUS) ||
8919             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8920             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8921                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8922
8923         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8924                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8925
8926         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8927             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8928             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8929             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8930             tg3_flag(tp, 57765_PLUS)) {
8931                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8932                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8933                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8934                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8935                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8936                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8937                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8938                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8939                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8940                 }
8941                 tw32(TG3_RDMA_RSRVCTRL_REG,
8942                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8943         }
8944
8945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8946             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8947                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8948                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8949                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8950                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8951         }
8952
8953         /* Receive/send statistics. */
8954         if (tg3_flag(tp, 5750_PLUS)) {
8955                 val = tr32(RCVLPC_STATS_ENABLE);
8956                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8957                 tw32(RCVLPC_STATS_ENABLE, val);
8958         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8959                    tg3_flag(tp, TSO_CAPABLE)) {
8960                 val = tr32(RCVLPC_STATS_ENABLE);
8961                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8962                 tw32(RCVLPC_STATS_ENABLE, val);
8963         } else {
8964                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8965         }
8966         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8967         tw32(SNDDATAI_STATSENAB, 0xffffff);
8968         tw32(SNDDATAI_STATSCTRL,
8969              (SNDDATAI_SCTRL_ENABLE |
8970               SNDDATAI_SCTRL_FASTUPD));
8971
8972         /* Setup host coalescing engine. */
8973         tw32(HOSTCC_MODE, 0);
8974         for (i = 0; i < 2000; i++) {
8975                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8976                         break;
8977                 udelay(10);
8978         }
8979
8980         __tg3_set_coalesce(tp, &tp->coal);
8981
8982         if (!tg3_flag(tp, 5705_PLUS)) {
8983                 /* Status/statistics block address.  See tg3_timer,
8984                  * the tg3_periodic_fetch_stats call there, and
8985                  * tg3_get_stats to see how this works for 5705/5750 chips.
8986                  */
8987                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8988                      ((u64) tp->stats_mapping >> 32));
8989                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8990                      ((u64) tp->stats_mapping & 0xffffffff));
8991                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8992
8993                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8994
8995                 /* Clear statistics and status block memory areas */
8996                 for (i = NIC_SRAM_STATS_BLK;
8997                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8998                      i += sizeof(u32)) {
8999                         tg3_write_mem(tp, i, 0);
9000                         udelay(40);
9001                 }
9002         }
9003
9004         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9005
9006         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9007         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9008         if (!tg3_flag(tp, 5705_PLUS))
9009                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9010
9011         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9012                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9013                 /* reset to prevent losing 1st rx packet intermittently */
9014                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9015                 udelay(10);
9016         }
9017
9018         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9019                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9020                         MAC_MODE_FHDE_ENABLE;
9021         if (tg3_flag(tp, ENABLE_APE))
9022                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9023         if (!tg3_flag(tp, 5705_PLUS) &&
9024             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9025             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9026                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9027         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9028         udelay(40);
9029
9030         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9031          * If TG3_FLAG_IS_NIC is zero, we should read the
9032          * register to preserve the GPIO settings for LOMs. The GPIOs,
9033          * whether used as inputs or outputs, are set by boot code after
9034          * reset.
9035          */
9036         if (!tg3_flag(tp, IS_NIC)) {
9037                 u32 gpio_mask;
9038
9039                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9040                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9041                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9042
9043                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9044                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9045                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9046
9047                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9048                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9049
9050                 tp->grc_local_ctrl &= ~gpio_mask;
9051                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9052
9053                 /* GPIO1 must be driven high for eeprom write protect */
9054                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9055                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9056                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9057         }
9058         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9059         udelay(100);
9060
9061         if (tg3_flag(tp, USING_MSIX)) {
9062                 val = tr32(MSGINT_MODE);
9063                 val |= MSGINT_MODE_ENABLE;
9064                 if (tp->irq_cnt > 1)
9065                         val |= MSGINT_MODE_MULTIVEC_EN;
9066                 if (!tg3_flag(tp, 1SHOT_MSI))
9067                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9068                 tw32(MSGINT_MODE, val);
9069         }
9070
9071         if (!tg3_flag(tp, 5705_PLUS)) {
9072                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9073                 udelay(40);
9074         }
9075
9076         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9077                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9078                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9079                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9080                WDMAC_MODE_LNGREAD_ENAB);
9081
9082         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9083             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9084                 if (tg3_flag(tp, TSO_CAPABLE) &&
9085                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9086                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9087                         /* nothing */
9088                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9089                            !tg3_flag(tp, IS_5788)) {
9090                         val |= WDMAC_MODE_RX_ACCEL;
9091                 }
9092         }
9093
9094         /* Enable host coalescing bug fix */
9095         if (tg3_flag(tp, 5755_PLUS))
9096                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9097
9098         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9099                 val |= WDMAC_MODE_BURST_ALL_DATA;
9100
9101         tw32_f(WDMAC_MODE, val);
9102         udelay(40);
9103
9104         if (tg3_flag(tp, PCIX_MODE)) {
9105                 u16 pcix_cmd;
9106
9107                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9108                                      &pcix_cmd);
9109                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9110                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9111                         pcix_cmd |= PCI_X_CMD_READ_2K;
9112                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9113                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9114                         pcix_cmd |= PCI_X_CMD_READ_2K;
9115                 }
9116                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9117                                       pcix_cmd);
9118         }
9119
9120         tw32_f(RDMAC_MODE, rdmac_mode);
9121         udelay(40);
9122
9123         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9124         if (!tg3_flag(tp, 5705_PLUS))
9125                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9126
9127         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9128                 tw32(SNDDATAC_MODE,
9129                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9130         else
9131                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9132
9133         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9134         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9135         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9136         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9137                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9138         tw32(RCVDBDI_MODE, val);
9139         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9140         if (tg3_flag(tp, HW_TSO_1) ||
9141             tg3_flag(tp, HW_TSO_2) ||
9142             tg3_flag(tp, HW_TSO_3))
9143                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9144         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9145         if (tg3_flag(tp, ENABLE_TSS))
9146                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9147         tw32(SNDBDI_MODE, val);
9148         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9149
9150         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9151                 err = tg3_load_5701_a0_firmware_fix(tp);
9152                 if (err)
9153                         return err;
9154         }
9155
9156         if (tg3_flag(tp, TSO_CAPABLE)) {
9157                 err = tg3_load_tso_firmware(tp);
9158                 if (err)
9159                         return err;
9160         }
9161
9162         tp->tx_mode = TX_MODE_ENABLE;
9163
9164         if (tg3_flag(tp, 5755_PLUS) ||
9165             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9166                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9167
9168         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9169                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9170                 tp->tx_mode &= ~val;
9171                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9172         }
9173
9174         tw32_f(MAC_TX_MODE, tp->tx_mode);
9175         udelay(100);
9176
9177         if (tg3_flag(tp, ENABLE_RSS)) {
9178                 tg3_rss_write_indir_tbl(tp);
9179
9180                 /* Setup the "secret" hash key. */
9181                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9182                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9183                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9184                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9185                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9186                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9187                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9188                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9189                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9190                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9191         }
9192
9193         tp->rx_mode = RX_MODE_ENABLE;
9194         if (tg3_flag(tp, 5755_PLUS))
9195                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9196
9197         if (tg3_flag(tp, ENABLE_RSS))
9198                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9199                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9200                                RX_MODE_RSS_IPV6_HASH_EN |
9201                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9202                                RX_MODE_RSS_IPV4_HASH_EN |
9203                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9204
9205         tw32_f(MAC_RX_MODE, tp->rx_mode);
9206         udelay(10);
9207
9208         tw32(MAC_LED_CTRL, tp->led_ctrl);
9209
9210         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9211         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9212                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9213                 udelay(10);
9214         }
9215         tw32_f(MAC_RX_MODE, tp->rx_mode);
9216         udelay(10);
9217
9218         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9219                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9220                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9221                         /* Set drive transmission level to 1.2V  */
9222                         /* only if the signal pre-emphasis bit is not set  */
9223                         val = tr32(MAC_SERDES_CFG);
9224                         val &= 0xfffff000;
9225                         val |= 0x880;
9226                         tw32(MAC_SERDES_CFG, val);
9227                 }
9228                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9229                         tw32(MAC_SERDES_CFG, 0x616000);
9230         }
9231
9232         /* Prevent chip from dropping frames when flow control
9233          * is enabled.
9234          */
9235         if (tg3_flag(tp, 57765_CLASS))
9236                 val = 1;
9237         else
9238                 val = 2;
9239         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9240
9241         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9242             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9243                 /* Use hardware link auto-negotiation */
9244                 tg3_flag_set(tp, HW_AUTONEG);
9245         }
9246
9247         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9248             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9249                 u32 tmp;
9250
9251                 tmp = tr32(SERDES_RX_CTRL);
9252                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9253                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9254                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9255                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9256         }
9257
9258         if (!tg3_flag(tp, USE_PHYLIB)) {
9259                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9260                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9261
9262                 err = tg3_setup_phy(tp, 0);
9263                 if (err)
9264                         return err;
9265
9266                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9267                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9268                         u32 tmp;
9269
9270                         /* Clear CRC stats. */
9271                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9272                                 tg3_writephy(tp, MII_TG3_TEST1,
9273                                              tmp | MII_TG3_TEST1_CRC_EN);
9274                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9275                         }
9276                 }
9277         }
9278
9279         __tg3_set_rx_mode(tp->dev);
9280
9281         /* Initialize receive rules. */
9282         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9283         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9284         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9285         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9286
9287         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9288                 limit = 8;
9289         else
9290                 limit = 16;
9291         if (tg3_flag(tp, ENABLE_ASF))
9292                 limit -= 4;
9293         switch (limit) {
9294         case 16:
9295                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9296         case 15:
9297                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9298         case 14:
9299                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9300         case 13:
9301                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9302         case 12:
9303                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9304         case 11:
9305                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9306         case 10:
9307                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9308         case 9:
9309                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9310         case 8:
9311                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9312         case 7:
9313                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9314         case 6:
9315                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9316         case 5:
9317                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9318         case 4:
9319                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9320         case 3:
9321                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9322         case 2:
9323         case 1:
9324
9325         default:
9326                 break;
9327         }
9328
9329         if (tg3_flag(tp, ENABLE_APE))
9330                 /* Write our heartbeat update interval to APE. */
9331                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9332                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9333
9334         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9335
9336         return 0;
9337 }
9338
9339 /* Called at device open time to get the chip ready for
9340  * packet processing.  Invoked with tp->lock held.
9341  */
9342 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9343 {
9344         tg3_switch_clocks(tp);
9345
9346         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9347
9348         return tg3_reset_hw(tp, reset_phy);
9349 }
9350
9351 /* Restart hardware after configuration changes, self-test, etc.
9352  * Invoked with tp->lock held.
9353  */
9354 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9355         __releases(tp->lock)
9356         __acquires(tp->lock)
9357 {
9358         int err;
9359
9360         err = tg3_init_hw(tp, reset_phy);
9361         if (err) {
9362                 netdev_err(tp->dev,
9363                            "Failed to re-initialize device, aborting\n");
9364                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9365                 tg3_full_unlock(tp);
9366                 del_timer_sync(&tp->timer);
9367                 tp->irq_sync = 0;
9368                 tg3_napi_enable(tp);
9369                 dev_close(tp->dev);
9370                 tg3_full_lock(tp, 0);
9371         }
9372         return err;
9373 }
9374
9375 static void tg3_reset_task(struct work_struct *work)
9376 {
9377         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9378         int err;
9379
9380         tg3_full_lock(tp, 0);
9381
9382         if (!netif_running(tp->dev)) {
9383                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9384                 tg3_full_unlock(tp);
9385                 return;
9386         }
9387
9388         tg3_full_unlock(tp);
9389
9390         tg3_phy_stop(tp);
9391
9392         tg3_netif_stop(tp);
9393
9394         tg3_full_lock(tp, 1);
9395
9396         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9397                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9398                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9399                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9400                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9401         }
9402
9403         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9404         err = tg3_init_hw(tp, 1);
9405         if (err)
9406                 goto out;
9407
9408         tg3_netif_start(tp);
9409
9410 out:
9411         tg3_full_unlock(tp);
9412
9413         if (!err)
9414                 tg3_phy_start(tp);
9415
9416         tg3_flag_clear(tp, RESET_TASK_PENDING);
9417 }
9418
9419 #define TG3_STAT_ADD32(PSTAT, REG) \
9420 do {    u32 __val = tr32(REG); \
9421         (PSTAT)->low += __val; \
9422         if ((PSTAT)->low < __val) \
9423                 (PSTAT)->high += 1; \
9424 } while (0)
9425
9426 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9427 {
9428         struct tg3_hw_stats *sp = tp->hw_stats;
9429
9430         if (!netif_carrier_ok(tp->dev))
9431                 return;
9432
9433         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9434         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9435         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9436         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9437         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9438         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9439         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9440         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9441         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9442         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9443         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9444         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9445         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9446
9447         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9448         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9449         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9450         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9451         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9452         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9453         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9454         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9455         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9456         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9457         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9458         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9459         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9460         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9461
9462         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9463         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9464             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9465             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9466                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9467         } else {
9468                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9469                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9470                 if (val) {
9471                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9472                         sp->rx_discards.low += val;
9473                         if (sp->rx_discards.low < val)
9474                                 sp->rx_discards.high += 1;
9475                 }
9476                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9477         }
9478         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9479 }
9480
9481 static void tg3_chk_missed_msi(struct tg3 *tp)
9482 {
9483         u32 i;
9484
9485         for (i = 0; i < tp->irq_cnt; i++) {
9486                 struct tg3_napi *tnapi = &tp->napi[i];
9487
9488                 if (tg3_has_work(tnapi)) {
9489                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9490                             tnapi->last_tx_cons == tnapi->tx_cons) {
9491                                 if (tnapi->chk_msi_cnt < 1) {
9492                                         tnapi->chk_msi_cnt++;
9493                                         return;
9494                                 }
9495                                 tg3_msi(0, tnapi);
9496                         }
9497                 }
9498                 tnapi->chk_msi_cnt = 0;
9499                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9500                 tnapi->last_tx_cons = tnapi->tx_cons;
9501         }
9502 }
9503
9504 static void tg3_timer(unsigned long __opaque)
9505 {
9506         struct tg3 *tp = (struct tg3 *) __opaque;
9507
9508         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9509                 goto restart_timer;
9510
9511         spin_lock(&tp->lock);
9512
9513         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9514             tg3_flag(tp, 57765_CLASS))
9515                 tg3_chk_missed_msi(tp);
9516
9517         if (!tg3_flag(tp, TAGGED_STATUS)) {
9518                 /* All of this garbage is because when using non-tagged
9519                  * IRQ status the mailbox/status_block protocol the chip
9520                  * uses with the cpu is race prone.
9521                  */
9522                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9523                         tw32(GRC_LOCAL_CTRL,
9524                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9525                 } else {
9526                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9527                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9528                 }
9529
9530                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9531                         spin_unlock(&tp->lock);
9532                         tg3_reset_task_schedule(tp);
9533                         goto restart_timer;
9534                 }
9535         }
9536
9537         /* This part only runs once per second. */
9538         if (!--tp->timer_counter) {
9539                 if (tg3_flag(tp, 5705_PLUS))
9540                         tg3_periodic_fetch_stats(tp);
9541
9542                 if (tp->setlpicnt && !--tp->setlpicnt)
9543                         tg3_phy_eee_enable(tp);
9544
9545                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9546                         u32 mac_stat;
9547                         int phy_event;
9548
9549                         mac_stat = tr32(MAC_STATUS);
9550
9551                         phy_event = 0;
9552                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9553                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9554                                         phy_event = 1;
9555                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9556                                 phy_event = 1;
9557
9558                         if (phy_event)
9559                                 tg3_setup_phy(tp, 0);
9560                 } else if (tg3_flag(tp, POLL_SERDES)) {
9561                         u32 mac_stat = tr32(MAC_STATUS);
9562                         int need_setup = 0;
9563
9564                         if (netif_carrier_ok(tp->dev) &&
9565                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9566                                 need_setup = 1;
9567                         }
9568                         if (!netif_carrier_ok(tp->dev) &&
9569                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9570                                          MAC_STATUS_SIGNAL_DET))) {
9571                                 need_setup = 1;
9572                         }
9573                         if (need_setup) {
9574                                 if (!tp->serdes_counter) {
9575                                         tw32_f(MAC_MODE,
9576                                              (tp->mac_mode &
9577                                               ~MAC_MODE_PORT_MODE_MASK));
9578                                         udelay(40);
9579                                         tw32_f(MAC_MODE, tp->mac_mode);
9580                                         udelay(40);
9581                                 }
9582                                 tg3_setup_phy(tp, 0);
9583                         }
9584                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9585                            tg3_flag(tp, 5780_CLASS)) {
9586                         tg3_serdes_parallel_detect(tp);
9587                 }
9588
9589                 tp->timer_counter = tp->timer_multiplier;
9590         }
9591
9592         /* Heartbeat is only sent once every 2 seconds.
9593          *
9594          * The heartbeat is to tell the ASF firmware that the host
9595          * driver is still alive.  In the event that the OS crashes,
9596          * ASF needs to reset the hardware to free up the FIFO space
9597          * that may be filled with rx packets destined for the host.
9598          * If the FIFO is full, ASF will no longer function properly.
9599          *
9600          * Unintended resets have been reported on real time kernels
9601          * where the timer doesn't run on time.  Netpoll will also have
9602          * same problem.
9603          *
9604          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9605          * to check the ring condition when the heartbeat is expiring
9606          * before doing the reset.  This will prevent most unintended
9607          * resets.
9608          */
9609         if (!--tp->asf_counter) {
9610                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9611                         tg3_wait_for_event_ack(tp);
9612
9613                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9614                                       FWCMD_NICDRV_ALIVE3);
9615                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9616                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9617                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9618
9619                         tg3_generate_fw_event(tp);
9620                 }
9621                 tp->asf_counter = tp->asf_multiplier;
9622         }
9623
9624         spin_unlock(&tp->lock);
9625
9626 restart_timer:
9627         tp->timer.expires = jiffies + tp->timer_offset;
9628         add_timer(&tp->timer);
9629 }
9630
9631 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9632 {
9633         irq_handler_t fn;
9634         unsigned long flags;
9635         char *name;
9636         struct tg3_napi *tnapi = &tp->napi[irq_num];
9637
9638         if (tp->irq_cnt == 1)
9639                 name = tp->dev->name;
9640         else {
9641                 name = &tnapi->irq_lbl[0];
9642                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9643                 name[IFNAMSIZ-1] = 0;
9644         }
9645
9646         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9647                 fn = tg3_msi;
9648                 if (tg3_flag(tp, 1SHOT_MSI))
9649                         fn = tg3_msi_1shot;
9650                 flags = 0;
9651         } else {
9652                 fn = tg3_interrupt;
9653                 if (tg3_flag(tp, TAGGED_STATUS))
9654                         fn = tg3_interrupt_tagged;
9655                 flags = IRQF_SHARED;
9656         }
9657
9658         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9659 }
9660
9661 static int tg3_test_interrupt(struct tg3 *tp)
9662 {
9663         struct tg3_napi *tnapi = &tp->napi[0];
9664         struct net_device *dev = tp->dev;
9665         int err, i, intr_ok = 0;
9666         u32 val;
9667
9668         if (!netif_running(dev))
9669                 return -ENODEV;
9670
9671         tg3_disable_ints(tp);
9672
9673         free_irq(tnapi->irq_vec, tnapi);
9674
9675         /*
9676          * Turn off MSI one shot mode.  Otherwise this test has no
9677          * observable way to know whether the interrupt was delivered.
9678          */
9679         if (tg3_flag(tp, 57765_PLUS)) {
9680                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9681                 tw32(MSGINT_MODE, val);
9682         }
9683
9684         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9685                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9686         if (err)
9687                 return err;
9688
9689         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9690         tg3_enable_ints(tp);
9691
9692         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9693                tnapi->coal_now);
9694
9695         for (i = 0; i < 5; i++) {
9696                 u32 int_mbox, misc_host_ctrl;
9697
9698                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9699                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9700
9701                 if ((int_mbox != 0) ||
9702                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9703                         intr_ok = 1;
9704                         break;
9705                 }
9706
9707                 if (tg3_flag(tp, 57765_PLUS) &&
9708                     tnapi->hw_status->status_tag != tnapi->last_tag)
9709                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9710
9711                 msleep(10);
9712         }
9713
9714         tg3_disable_ints(tp);
9715
9716         free_irq(tnapi->irq_vec, tnapi);
9717
9718         err = tg3_request_irq(tp, 0);
9719
9720         if (err)
9721                 return err;
9722
9723         if (intr_ok) {
9724                 /* Reenable MSI one shot mode. */
9725                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9726                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9727                         tw32(MSGINT_MODE, val);
9728                 }
9729                 return 0;
9730         }
9731
9732         return -EIO;
9733 }
9734
9735 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9736  * successfully restored
9737  */
9738 static int tg3_test_msi(struct tg3 *tp)
9739 {
9740         int err;
9741         u16 pci_cmd;
9742
9743         if (!tg3_flag(tp, USING_MSI))
9744                 return 0;
9745
9746         /* Turn off SERR reporting in case MSI terminates with Master
9747          * Abort.
9748          */
9749         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9750         pci_write_config_word(tp->pdev, PCI_COMMAND,
9751                               pci_cmd & ~PCI_COMMAND_SERR);
9752
9753         err = tg3_test_interrupt(tp);
9754
9755         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9756
9757         if (!err)
9758                 return 0;
9759
9760         /* other failures */
9761         if (err != -EIO)
9762                 return err;
9763
9764         /* MSI test failed, go back to INTx mode */
9765         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9766                     "to INTx mode. Please report this failure to the PCI "
9767                     "maintainer and include system chipset information\n");
9768
9769         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9770
9771         pci_disable_msi(tp->pdev);
9772
9773         tg3_flag_clear(tp, USING_MSI);
9774         tp->napi[0].irq_vec = tp->pdev->irq;
9775
9776         err = tg3_request_irq(tp, 0);
9777         if (err)
9778                 return err;
9779
9780         /* Need to reset the chip because the MSI cycle may have terminated
9781          * with Master Abort.
9782          */
9783         tg3_full_lock(tp, 1);
9784
9785         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9786         err = tg3_init_hw(tp, 1);
9787
9788         tg3_full_unlock(tp);
9789
9790         if (err)
9791                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9792
9793         return err;
9794 }
9795
9796 static int tg3_request_firmware(struct tg3 *tp)
9797 {
9798         const __be32 *fw_data;
9799
9800         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9801                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9802                            tp->fw_needed);
9803                 return -ENOENT;
9804         }
9805
9806         fw_data = (void *)tp->fw->data;
9807
9808         /* Firmware blob starts with version numbers, followed by
9809          * start address and _full_ length including BSS sections
9810          * (which must be longer than the actual data, of course
9811          */
9812
9813         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9814         if (tp->fw_len < (tp->fw->size - 12)) {
9815                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9816                            tp->fw_len, tp->fw_needed);
9817                 release_firmware(tp->fw);
9818                 tp->fw = NULL;
9819                 return -EINVAL;
9820         }
9821
9822         /* We no longer need firmware; we have it. */
9823         tp->fw_needed = NULL;
9824         return 0;
9825 }
9826
9827 static bool tg3_enable_msix(struct tg3 *tp)
9828 {
9829         int i, rc;
9830         struct msix_entry msix_ent[tp->irq_max];
9831
9832         tp->irq_cnt = num_online_cpus();
9833         if (tp->irq_cnt > 1) {
9834                 /* We want as many rx rings enabled as there are cpus.
9835                  * In multiqueue MSI-X mode, the first MSI-X vector
9836                  * only deals with link interrupts, etc, so we add
9837                  * one to the number of vectors we are requesting.
9838                  */
9839                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9840         }
9841
9842         for (i = 0; i < tp->irq_max; i++) {
9843                 msix_ent[i].entry  = i;
9844                 msix_ent[i].vector = 0;
9845         }
9846
9847         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9848         if (rc < 0) {
9849                 return false;
9850         } else if (rc != 0) {
9851                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9852                         return false;
9853                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9854                               tp->irq_cnt, rc);
9855                 tp->irq_cnt = rc;
9856         }
9857
9858         for (i = 0; i < tp->irq_max; i++)
9859                 tp->napi[i].irq_vec = msix_ent[i].vector;
9860
9861         netif_set_real_num_tx_queues(tp->dev, 1);
9862         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9863         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9864                 pci_disable_msix(tp->pdev);
9865                 return false;
9866         }
9867
9868         if (tp->irq_cnt > 1) {
9869                 tg3_flag_set(tp, ENABLE_RSS);
9870
9871                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9872                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9873                         tg3_flag_set(tp, ENABLE_TSS);
9874                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9875                 }
9876         }
9877
9878         return true;
9879 }
9880
9881 static void tg3_ints_init(struct tg3 *tp)
9882 {
9883         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9884             !tg3_flag(tp, TAGGED_STATUS)) {
9885                 /* All MSI supporting chips should support tagged
9886                  * status.  Assert that this is the case.
9887                  */
9888                 netdev_warn(tp->dev,
9889                             "MSI without TAGGED_STATUS? Not using MSI\n");
9890                 goto defcfg;
9891         }
9892
9893         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9894                 tg3_flag_set(tp, USING_MSIX);
9895         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9896                 tg3_flag_set(tp, USING_MSI);
9897
9898         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9899                 u32 msi_mode = tr32(MSGINT_MODE);
9900                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9901                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9902                 if (!tg3_flag(tp, 1SHOT_MSI))
9903                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9904                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9905         }
9906 defcfg:
9907         if (!tg3_flag(tp, USING_MSIX)) {
9908                 tp->irq_cnt = 1;
9909                 tp->napi[0].irq_vec = tp->pdev->irq;
9910                 netif_set_real_num_tx_queues(tp->dev, 1);
9911                 netif_set_real_num_rx_queues(tp->dev, 1);
9912         }
9913 }
9914
9915 static void tg3_ints_fini(struct tg3 *tp)
9916 {
9917         if (tg3_flag(tp, USING_MSIX))
9918                 pci_disable_msix(tp->pdev);
9919         else if (tg3_flag(tp, USING_MSI))
9920                 pci_disable_msi(tp->pdev);
9921         tg3_flag_clear(tp, USING_MSI);
9922         tg3_flag_clear(tp, USING_MSIX);
9923         tg3_flag_clear(tp, ENABLE_RSS);
9924         tg3_flag_clear(tp, ENABLE_TSS);
9925 }
9926
9927 static int tg3_open(struct net_device *dev)
9928 {
9929         struct tg3 *tp = netdev_priv(dev);
9930         int i, err;
9931
9932         if (tp->fw_needed) {
9933                 err = tg3_request_firmware(tp);
9934                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9935                         if (err)
9936                                 return err;
9937                 } else if (err) {
9938                         netdev_warn(tp->dev, "TSO capability disabled\n");
9939                         tg3_flag_clear(tp, TSO_CAPABLE);
9940                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9941                         netdev_notice(tp->dev, "TSO capability restored\n");
9942                         tg3_flag_set(tp, TSO_CAPABLE);
9943                 }
9944         }
9945
9946         netif_carrier_off(tp->dev);
9947
9948         err = tg3_power_up(tp);
9949         if (err)
9950                 return err;
9951
9952         tg3_full_lock(tp, 0);
9953
9954         tg3_disable_ints(tp);
9955         tg3_flag_clear(tp, INIT_COMPLETE);
9956
9957         tg3_full_unlock(tp);
9958
9959         /*
9960          * Setup interrupts first so we know how
9961          * many NAPI resources to allocate
9962          */
9963         tg3_ints_init(tp);
9964
9965         tg3_rss_check_indir_tbl(tp);
9966
9967         /* The placement of this call is tied
9968          * to the setup and use of Host TX descriptors.
9969          */
9970         err = tg3_alloc_consistent(tp);
9971         if (err)
9972                 goto err_out1;
9973
9974         tg3_napi_init(tp);
9975
9976         tg3_napi_enable(tp);
9977
9978         for (i = 0; i < tp->irq_cnt; i++) {
9979                 struct tg3_napi *tnapi = &tp->napi[i];
9980                 err = tg3_request_irq(tp, i);
9981                 if (err) {
9982                         for (i--; i >= 0; i--) {
9983                                 tnapi = &tp->napi[i];
9984                                 free_irq(tnapi->irq_vec, tnapi);
9985                         }
9986                         goto err_out2;
9987                 }
9988         }
9989
9990         tg3_full_lock(tp, 0);
9991
9992         err = tg3_init_hw(tp, 1);
9993         if (err) {
9994                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9995                 tg3_free_rings(tp);
9996         } else {
9997                 if (tg3_flag(tp, TAGGED_STATUS) &&
9998                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9999                     !tg3_flag(tp, 57765_CLASS))
10000                         tp->timer_offset = HZ;
10001                 else
10002                         tp->timer_offset = HZ / 10;
10003
10004                 BUG_ON(tp->timer_offset > HZ);
10005                 tp->timer_counter = tp->timer_multiplier =
10006                         (HZ / tp->timer_offset);
10007                 tp->asf_counter = tp->asf_multiplier =
10008                         ((HZ / tp->timer_offset) * 2);
10009
10010                 init_timer(&tp->timer);
10011                 tp->timer.expires = jiffies + tp->timer_offset;
10012                 tp->timer.data = (unsigned long) tp;
10013                 tp->timer.function = tg3_timer;
10014         }
10015
10016         tg3_full_unlock(tp);
10017
10018         if (err)
10019                 goto err_out3;
10020
10021         if (tg3_flag(tp, USING_MSI)) {
10022                 err = tg3_test_msi(tp);
10023
10024                 if (err) {
10025                         tg3_full_lock(tp, 0);
10026                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10027                         tg3_free_rings(tp);
10028                         tg3_full_unlock(tp);
10029
10030                         goto err_out2;
10031                 }
10032
10033                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10034                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10035
10036                         tw32(PCIE_TRANSACTION_CFG,
10037                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10038                 }
10039         }
10040
10041         tg3_phy_start(tp);
10042
10043         tg3_full_lock(tp, 0);
10044
10045         add_timer(&tp->timer);
10046         tg3_flag_set(tp, INIT_COMPLETE);
10047         tg3_enable_ints(tp);
10048
10049         tg3_full_unlock(tp);
10050
10051         netif_tx_start_all_queues(dev);
10052
10053         /*
10054          * Reset loopback feature if it was turned on while the device was down
10055          * make sure that it's installed properly now.
10056          */
10057         if (dev->features & NETIF_F_LOOPBACK)
10058                 tg3_set_loopback(dev, dev->features);
10059
10060         return 0;
10061
10062 err_out3:
10063         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10064                 struct tg3_napi *tnapi = &tp->napi[i];
10065                 free_irq(tnapi->irq_vec, tnapi);
10066         }
10067
10068 err_out2:
10069         tg3_napi_disable(tp);
10070         tg3_napi_fini(tp);
10071         tg3_free_consistent(tp);
10072
10073 err_out1:
10074         tg3_ints_fini(tp);
10075         tg3_frob_aux_power(tp, false);
10076         pci_set_power_state(tp->pdev, PCI_D3hot);
10077         return err;
10078 }
10079
10080 static int tg3_close(struct net_device *dev)
10081 {
10082         int i;
10083         struct tg3 *tp = netdev_priv(dev);
10084
10085         tg3_napi_disable(tp);
10086         tg3_reset_task_cancel(tp);
10087
10088         netif_tx_stop_all_queues(dev);
10089
10090         del_timer_sync(&tp->timer);
10091
10092         tg3_phy_stop(tp);
10093
10094         tg3_full_lock(tp, 1);
10095
10096         tg3_disable_ints(tp);
10097
10098         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10099         tg3_free_rings(tp);
10100         tg3_flag_clear(tp, INIT_COMPLETE);
10101
10102         tg3_full_unlock(tp);
10103
10104         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10105                 struct tg3_napi *tnapi = &tp->napi[i];
10106                 free_irq(tnapi->irq_vec, tnapi);
10107         }
10108
10109         tg3_ints_fini(tp);
10110
10111         /* Clear stats across close / open calls */
10112         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10113         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10114
10115         tg3_napi_fini(tp);
10116
10117         tg3_free_consistent(tp);
10118
10119         tg3_power_down(tp);
10120
10121         netif_carrier_off(tp->dev);
10122
10123         return 0;
10124 }
10125
10126 static inline u64 get_stat64(tg3_stat64_t *val)
10127 {
10128        return ((u64)val->high << 32) | ((u64)val->low);
10129 }
10130
10131 static u64 calc_crc_errors(struct tg3 *tp)
10132 {
10133         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10134
10135         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10136             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10137              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10138                 u32 val;
10139
10140                 spin_lock_bh(&tp->lock);
10141                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10142                         tg3_writephy(tp, MII_TG3_TEST1,
10143                                      val | MII_TG3_TEST1_CRC_EN);
10144                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10145                 } else
10146                         val = 0;
10147                 spin_unlock_bh(&tp->lock);
10148
10149                 tp->phy_crc_errors += val;
10150
10151                 return tp->phy_crc_errors;
10152         }
10153
10154         return get_stat64(&hw_stats->rx_fcs_errors);
10155 }
10156
10157 #define ESTAT_ADD(member) \
10158         estats->member =        old_estats->member + \
10159                                 get_stat64(&hw_stats->member)
10160
10161 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
10162                                                struct tg3_ethtool_stats *estats)
10163 {
10164         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10165         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10166
10167         ESTAT_ADD(rx_octets);
10168         ESTAT_ADD(rx_fragments);
10169         ESTAT_ADD(rx_ucast_packets);
10170         ESTAT_ADD(rx_mcast_packets);
10171         ESTAT_ADD(rx_bcast_packets);
10172         ESTAT_ADD(rx_fcs_errors);
10173         ESTAT_ADD(rx_align_errors);
10174         ESTAT_ADD(rx_xon_pause_rcvd);
10175         ESTAT_ADD(rx_xoff_pause_rcvd);
10176         ESTAT_ADD(rx_mac_ctrl_rcvd);
10177         ESTAT_ADD(rx_xoff_entered);
10178         ESTAT_ADD(rx_frame_too_long_errors);
10179         ESTAT_ADD(rx_jabbers);
10180         ESTAT_ADD(rx_undersize_packets);
10181         ESTAT_ADD(rx_in_length_errors);
10182         ESTAT_ADD(rx_out_length_errors);
10183         ESTAT_ADD(rx_64_or_less_octet_packets);
10184         ESTAT_ADD(rx_65_to_127_octet_packets);
10185         ESTAT_ADD(rx_128_to_255_octet_packets);
10186         ESTAT_ADD(rx_256_to_511_octet_packets);
10187         ESTAT_ADD(rx_512_to_1023_octet_packets);
10188         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10189         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10190         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10191         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10192         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10193
10194         ESTAT_ADD(tx_octets);
10195         ESTAT_ADD(tx_collisions);
10196         ESTAT_ADD(tx_xon_sent);
10197         ESTAT_ADD(tx_xoff_sent);
10198         ESTAT_ADD(tx_flow_control);
10199         ESTAT_ADD(tx_mac_errors);
10200         ESTAT_ADD(tx_single_collisions);
10201         ESTAT_ADD(tx_mult_collisions);
10202         ESTAT_ADD(tx_deferred);
10203         ESTAT_ADD(tx_excessive_collisions);
10204         ESTAT_ADD(tx_late_collisions);
10205         ESTAT_ADD(tx_collide_2times);
10206         ESTAT_ADD(tx_collide_3times);
10207         ESTAT_ADD(tx_collide_4times);
10208         ESTAT_ADD(tx_collide_5times);
10209         ESTAT_ADD(tx_collide_6times);
10210         ESTAT_ADD(tx_collide_7times);
10211         ESTAT_ADD(tx_collide_8times);
10212         ESTAT_ADD(tx_collide_9times);
10213         ESTAT_ADD(tx_collide_10times);
10214         ESTAT_ADD(tx_collide_11times);
10215         ESTAT_ADD(tx_collide_12times);
10216         ESTAT_ADD(tx_collide_13times);
10217         ESTAT_ADD(tx_collide_14times);
10218         ESTAT_ADD(tx_collide_15times);
10219         ESTAT_ADD(tx_ucast_packets);
10220         ESTAT_ADD(tx_mcast_packets);
10221         ESTAT_ADD(tx_bcast_packets);
10222         ESTAT_ADD(tx_carrier_sense_errors);
10223         ESTAT_ADD(tx_discards);
10224         ESTAT_ADD(tx_errors);
10225
10226         ESTAT_ADD(dma_writeq_full);
10227         ESTAT_ADD(dma_write_prioq_full);
10228         ESTAT_ADD(rxbds_empty);
10229         ESTAT_ADD(rx_discards);
10230         ESTAT_ADD(rx_errors);
10231         ESTAT_ADD(rx_threshold_hit);
10232
10233         ESTAT_ADD(dma_readq_full);
10234         ESTAT_ADD(dma_read_prioq_full);
10235         ESTAT_ADD(tx_comp_queue_full);
10236
10237         ESTAT_ADD(ring_set_send_prod_index);
10238         ESTAT_ADD(ring_status_update);
10239         ESTAT_ADD(nic_irqs);
10240         ESTAT_ADD(nic_avoided_irqs);
10241         ESTAT_ADD(nic_tx_threshold_hit);
10242
10243         ESTAT_ADD(mbuf_lwm_thresh_hit);
10244
10245         return estats;
10246 }
10247
10248 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
10249                                                  struct rtnl_link_stats64 *stats)
10250 {
10251         struct tg3 *tp = netdev_priv(dev);
10252         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10253         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10254
10255         if (!hw_stats)
10256                 return old_stats;
10257
10258         stats->rx_packets = old_stats->rx_packets +
10259                 get_stat64(&hw_stats->rx_ucast_packets) +
10260                 get_stat64(&hw_stats->rx_mcast_packets) +
10261                 get_stat64(&hw_stats->rx_bcast_packets);
10262
10263         stats->tx_packets = old_stats->tx_packets +
10264                 get_stat64(&hw_stats->tx_ucast_packets) +
10265                 get_stat64(&hw_stats->tx_mcast_packets) +
10266                 get_stat64(&hw_stats->tx_bcast_packets);
10267
10268         stats->rx_bytes = old_stats->rx_bytes +
10269                 get_stat64(&hw_stats->rx_octets);
10270         stats->tx_bytes = old_stats->tx_bytes +
10271                 get_stat64(&hw_stats->tx_octets);
10272
10273         stats->rx_errors = old_stats->rx_errors +
10274                 get_stat64(&hw_stats->rx_errors);
10275         stats->tx_errors = old_stats->tx_errors +
10276                 get_stat64(&hw_stats->tx_errors) +
10277                 get_stat64(&hw_stats->tx_mac_errors) +
10278                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10279                 get_stat64(&hw_stats->tx_discards);
10280
10281         stats->multicast = old_stats->multicast +
10282                 get_stat64(&hw_stats->rx_mcast_packets);
10283         stats->collisions = old_stats->collisions +
10284                 get_stat64(&hw_stats->tx_collisions);
10285
10286         stats->rx_length_errors = old_stats->rx_length_errors +
10287                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10288                 get_stat64(&hw_stats->rx_undersize_packets);
10289
10290         stats->rx_over_errors = old_stats->rx_over_errors +
10291                 get_stat64(&hw_stats->rxbds_empty);
10292         stats->rx_frame_errors = old_stats->rx_frame_errors +
10293                 get_stat64(&hw_stats->rx_align_errors);
10294         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10295                 get_stat64(&hw_stats->tx_discards);
10296         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10297                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10298
10299         stats->rx_crc_errors = old_stats->rx_crc_errors +
10300                 calc_crc_errors(tp);
10301
10302         stats->rx_missed_errors = old_stats->rx_missed_errors +
10303                 get_stat64(&hw_stats->rx_discards);
10304
10305         stats->rx_dropped = tp->rx_dropped;
10306         stats->tx_dropped = tp->tx_dropped;
10307
10308         return stats;
10309 }
10310
10311 static int tg3_get_regs_len(struct net_device *dev)
10312 {
10313         return TG3_REG_BLK_SIZE;
10314 }
10315
10316 static void tg3_get_regs(struct net_device *dev,
10317                 struct ethtool_regs *regs, void *_p)
10318 {
10319         struct tg3 *tp = netdev_priv(dev);
10320
10321         regs->version = 0;
10322
10323         memset(_p, 0, TG3_REG_BLK_SIZE);
10324
10325         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10326                 return;
10327
10328         tg3_full_lock(tp, 0);
10329
10330         tg3_dump_legacy_regs(tp, (u32 *)_p);
10331
10332         tg3_full_unlock(tp);
10333 }
10334
10335 static int tg3_get_eeprom_len(struct net_device *dev)
10336 {
10337         struct tg3 *tp = netdev_priv(dev);
10338
10339         return tp->nvram_size;
10340 }
10341
10342 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10343 {
10344         struct tg3 *tp = netdev_priv(dev);
10345         int ret;
10346         u8  *pd;
10347         u32 i, offset, len, b_offset, b_count;
10348         __be32 val;
10349
10350         if (tg3_flag(tp, NO_NVRAM))
10351                 return -EINVAL;
10352
10353         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10354                 return -EAGAIN;
10355
10356         offset = eeprom->offset;
10357         len = eeprom->len;
10358         eeprom->len = 0;
10359
10360         eeprom->magic = TG3_EEPROM_MAGIC;
10361
10362         if (offset & 3) {
10363                 /* adjustments to start on required 4 byte boundary */
10364                 b_offset = offset & 3;
10365                 b_count = 4 - b_offset;
10366                 if (b_count > len) {
10367                         /* i.e. offset=1 len=2 */
10368                         b_count = len;
10369                 }
10370                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10371                 if (ret)
10372                         return ret;
10373                 memcpy(data, ((char *)&val) + b_offset, b_count);
10374                 len -= b_count;
10375                 offset += b_count;
10376                 eeprom->len += b_count;
10377         }
10378
10379         /* read bytes up to the last 4 byte boundary */
10380         pd = &data[eeprom->len];
10381         for (i = 0; i < (len - (len & 3)); i += 4) {
10382                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10383                 if (ret) {
10384                         eeprom->len += i;
10385                         return ret;
10386                 }
10387                 memcpy(pd + i, &val, 4);
10388         }
10389         eeprom->len += i;
10390
10391         if (len & 3) {
10392                 /* read last bytes not ending on 4 byte boundary */
10393                 pd = &data[eeprom->len];
10394                 b_count = len & 3;
10395                 b_offset = offset + len - b_count;
10396                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10397                 if (ret)
10398                         return ret;
10399                 memcpy(pd, &val, b_count);
10400                 eeprom->len += b_count;
10401         }
10402         return 0;
10403 }
10404
10405 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10406 {
10407         struct tg3 *tp = netdev_priv(dev);
10408         int ret;
10409         u32 offset, len, b_offset, odd_len;
10410         u8 *buf;
10411         __be32 start, end;
10412
10413         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10414                 return -EAGAIN;
10415
10416         if (tg3_flag(tp, NO_NVRAM) ||
10417             eeprom->magic != TG3_EEPROM_MAGIC)
10418                 return -EINVAL;
10419
10420         offset = eeprom->offset;
10421         len = eeprom->len;
10422
10423         if ((b_offset = (offset & 3))) {
10424                 /* adjustments to start on required 4 byte boundary */
10425                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10426                 if (ret)
10427                         return ret;
10428                 len += b_offset;
10429                 offset &= ~3;
10430                 if (len < 4)
10431                         len = 4;
10432         }
10433
10434         odd_len = 0;
10435         if (len & 3) {
10436                 /* adjustments to end on required 4 byte boundary */
10437                 odd_len = 1;
10438                 len = (len + 3) & ~3;
10439                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10440                 if (ret)
10441                         return ret;
10442         }
10443
10444         buf = data;
10445         if (b_offset || odd_len) {
10446                 buf = kmalloc(len, GFP_KERNEL);
10447                 if (!buf)
10448                         return -ENOMEM;
10449                 if (b_offset)
10450                         memcpy(buf, &start, 4);
10451                 if (odd_len)
10452                         memcpy(buf+len-4, &end, 4);
10453                 memcpy(buf + b_offset, data, eeprom->len);
10454         }
10455
10456         ret = tg3_nvram_write_block(tp, offset, len, buf);
10457
10458         if (buf != data)
10459                 kfree(buf);
10460
10461         return ret;
10462 }
10463
10464 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10465 {
10466         struct tg3 *tp = netdev_priv(dev);
10467
10468         if (tg3_flag(tp, USE_PHYLIB)) {
10469                 struct phy_device *phydev;
10470                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10471                         return -EAGAIN;
10472                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10473                 return phy_ethtool_gset(phydev, cmd);
10474         }
10475
10476         cmd->supported = (SUPPORTED_Autoneg);
10477
10478         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10479                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10480                                    SUPPORTED_1000baseT_Full);
10481
10482         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10483                 cmd->supported |= (SUPPORTED_100baseT_Half |
10484                                   SUPPORTED_100baseT_Full |
10485                                   SUPPORTED_10baseT_Half |
10486                                   SUPPORTED_10baseT_Full |
10487                                   SUPPORTED_TP);
10488                 cmd->port = PORT_TP;
10489         } else {
10490                 cmd->supported |= SUPPORTED_FIBRE;
10491                 cmd->port = PORT_FIBRE;
10492         }
10493
10494         cmd->advertising = tp->link_config.advertising;
10495         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10496                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10497                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10498                                 cmd->advertising |= ADVERTISED_Pause;
10499                         } else {
10500                                 cmd->advertising |= ADVERTISED_Pause |
10501                                                     ADVERTISED_Asym_Pause;
10502                         }
10503                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10504                         cmd->advertising |= ADVERTISED_Asym_Pause;
10505                 }
10506         }
10507         if (netif_running(dev) && netif_carrier_ok(dev)) {
10508                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10509                 cmd->duplex = tp->link_config.active_duplex;
10510                 cmd->lp_advertising = tp->link_config.rmt_adv;
10511                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10512                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10513                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10514                         else
10515                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10516                 }
10517         } else {
10518                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10519                 cmd->duplex = DUPLEX_UNKNOWN;
10520                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10521         }
10522         cmd->phy_address = tp->phy_addr;
10523         cmd->transceiver = XCVR_INTERNAL;
10524         cmd->autoneg = tp->link_config.autoneg;
10525         cmd->maxtxpkt = 0;
10526         cmd->maxrxpkt = 0;
10527         return 0;
10528 }
10529
10530 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10531 {
10532         struct tg3 *tp = netdev_priv(dev);
10533         u32 speed = ethtool_cmd_speed(cmd);
10534
10535         if (tg3_flag(tp, USE_PHYLIB)) {
10536                 struct phy_device *phydev;
10537                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10538                         return -EAGAIN;
10539                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10540                 return phy_ethtool_sset(phydev, cmd);
10541         }
10542
10543         if (cmd->autoneg != AUTONEG_ENABLE &&
10544             cmd->autoneg != AUTONEG_DISABLE)
10545                 return -EINVAL;
10546
10547         if (cmd->autoneg == AUTONEG_DISABLE &&
10548             cmd->duplex != DUPLEX_FULL &&
10549             cmd->duplex != DUPLEX_HALF)
10550                 return -EINVAL;
10551
10552         if (cmd->autoneg == AUTONEG_ENABLE) {
10553                 u32 mask = ADVERTISED_Autoneg |
10554                            ADVERTISED_Pause |
10555                            ADVERTISED_Asym_Pause;
10556
10557                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10558                         mask |= ADVERTISED_1000baseT_Half |
10559                                 ADVERTISED_1000baseT_Full;
10560
10561                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10562                         mask |= ADVERTISED_100baseT_Half |
10563                                 ADVERTISED_100baseT_Full |
10564                                 ADVERTISED_10baseT_Half |
10565                                 ADVERTISED_10baseT_Full |
10566                                 ADVERTISED_TP;
10567                 else
10568                         mask |= ADVERTISED_FIBRE;
10569
10570                 if (cmd->advertising & ~mask)
10571                         return -EINVAL;
10572
10573                 mask &= (ADVERTISED_1000baseT_Half |
10574                          ADVERTISED_1000baseT_Full |
10575                          ADVERTISED_100baseT_Half |
10576                          ADVERTISED_100baseT_Full |
10577                          ADVERTISED_10baseT_Half |
10578                          ADVERTISED_10baseT_Full);
10579
10580                 cmd->advertising &= mask;
10581         } else {
10582                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10583                         if (speed != SPEED_1000)
10584                                 return -EINVAL;
10585
10586                         if (cmd->duplex != DUPLEX_FULL)
10587                                 return -EINVAL;
10588                 } else {
10589                         if (speed != SPEED_100 &&
10590                             speed != SPEED_10)
10591                                 return -EINVAL;
10592                 }
10593         }
10594
10595         tg3_full_lock(tp, 0);
10596
10597         tp->link_config.autoneg = cmd->autoneg;
10598         if (cmd->autoneg == AUTONEG_ENABLE) {
10599                 tp->link_config.advertising = (cmd->advertising |
10600                                               ADVERTISED_Autoneg);
10601                 tp->link_config.speed = SPEED_UNKNOWN;
10602                 tp->link_config.duplex = DUPLEX_UNKNOWN;
10603         } else {
10604                 tp->link_config.advertising = 0;
10605                 tp->link_config.speed = speed;
10606                 tp->link_config.duplex = cmd->duplex;
10607         }
10608
10609         if (netif_running(dev))
10610                 tg3_setup_phy(tp, 1);
10611
10612         tg3_full_unlock(tp);
10613
10614         return 0;
10615 }
10616
10617 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10618 {
10619         struct tg3 *tp = netdev_priv(dev);
10620
10621         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10622         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10623         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10624         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10625 }
10626
10627 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10628 {
10629         struct tg3 *tp = netdev_priv(dev);
10630
10631         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10632                 wol->supported = WAKE_MAGIC;
10633         else
10634                 wol->supported = 0;
10635         wol->wolopts = 0;
10636         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10637                 wol->wolopts = WAKE_MAGIC;
10638         memset(&wol->sopass, 0, sizeof(wol->sopass));
10639 }
10640
10641 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10642 {
10643         struct tg3 *tp = netdev_priv(dev);
10644         struct device *dp = &tp->pdev->dev;
10645
10646         if (wol->wolopts & ~WAKE_MAGIC)
10647                 return -EINVAL;
10648         if ((wol->wolopts & WAKE_MAGIC) &&
10649             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10650                 return -EINVAL;
10651
10652         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10653
10654         spin_lock_bh(&tp->lock);
10655         if (device_may_wakeup(dp))
10656                 tg3_flag_set(tp, WOL_ENABLE);
10657         else
10658                 tg3_flag_clear(tp, WOL_ENABLE);
10659         spin_unlock_bh(&tp->lock);
10660
10661         return 0;
10662 }
10663
10664 static u32 tg3_get_msglevel(struct net_device *dev)
10665 {
10666         struct tg3 *tp = netdev_priv(dev);
10667         return tp->msg_enable;
10668 }
10669
10670 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10671 {
10672         struct tg3 *tp = netdev_priv(dev);
10673         tp->msg_enable = value;
10674 }
10675
10676 static int tg3_nway_reset(struct net_device *dev)
10677 {
10678         struct tg3 *tp = netdev_priv(dev);
10679         int r;
10680
10681         if (!netif_running(dev))
10682                 return -EAGAIN;
10683
10684         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10685                 return -EINVAL;
10686
10687         if (tg3_flag(tp, USE_PHYLIB)) {
10688                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10689                         return -EAGAIN;
10690                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10691         } else {
10692                 u32 bmcr;
10693
10694                 spin_lock_bh(&tp->lock);
10695                 r = -EINVAL;
10696                 tg3_readphy(tp, MII_BMCR, &bmcr);
10697                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10698                     ((bmcr & BMCR_ANENABLE) ||
10699                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10700                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10701                                                    BMCR_ANENABLE);
10702                         r = 0;
10703                 }
10704                 spin_unlock_bh(&tp->lock);
10705         }
10706
10707         return r;
10708 }
10709
10710 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10711 {
10712         struct tg3 *tp = netdev_priv(dev);
10713
10714         ering->rx_max_pending = tp->rx_std_ring_mask;
10715         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10716                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10717         else
10718                 ering->rx_jumbo_max_pending = 0;
10719
10720         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10721
10722         ering->rx_pending = tp->rx_pending;
10723         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10724                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10725         else
10726                 ering->rx_jumbo_pending = 0;
10727
10728         ering->tx_pending = tp->napi[0].tx_pending;
10729 }
10730
10731 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10732 {
10733         struct tg3 *tp = netdev_priv(dev);
10734         int i, irq_sync = 0, err = 0;
10735
10736         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10737             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10738             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10739             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10740             (tg3_flag(tp, TSO_BUG) &&
10741              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10742                 return -EINVAL;
10743
10744         if (netif_running(dev)) {
10745                 tg3_phy_stop(tp);
10746                 tg3_netif_stop(tp);
10747                 irq_sync = 1;
10748         }
10749
10750         tg3_full_lock(tp, irq_sync);
10751
10752         tp->rx_pending = ering->rx_pending;
10753
10754         if (tg3_flag(tp, MAX_RXPEND_64) &&
10755             tp->rx_pending > 63)
10756                 tp->rx_pending = 63;
10757         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10758
10759         for (i = 0; i < tp->irq_max; i++)
10760                 tp->napi[i].tx_pending = ering->tx_pending;
10761
10762         if (netif_running(dev)) {
10763                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10764                 err = tg3_restart_hw(tp, 1);
10765                 if (!err)
10766                         tg3_netif_start(tp);
10767         }
10768
10769         tg3_full_unlock(tp);
10770
10771         if (irq_sync && !err)
10772                 tg3_phy_start(tp);
10773
10774         return err;
10775 }
10776
10777 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10778 {
10779         struct tg3 *tp = netdev_priv(dev);
10780
10781         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10782
10783         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10784                 epause->rx_pause = 1;
10785         else
10786                 epause->rx_pause = 0;
10787
10788         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10789                 epause->tx_pause = 1;
10790         else
10791                 epause->tx_pause = 0;
10792 }
10793
10794 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10795 {
10796         struct tg3 *tp = netdev_priv(dev);
10797         int err = 0;
10798
10799         if (tg3_flag(tp, USE_PHYLIB)) {
10800                 u32 newadv;
10801                 struct phy_device *phydev;
10802
10803                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10804
10805                 if (!(phydev->supported & SUPPORTED_Pause) ||
10806                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10807                      (epause->rx_pause != epause->tx_pause)))
10808                         return -EINVAL;
10809
10810                 tp->link_config.flowctrl = 0;
10811                 if (epause->rx_pause) {
10812                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10813
10814                         if (epause->tx_pause) {
10815                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10816                                 newadv = ADVERTISED_Pause;
10817                         } else
10818                                 newadv = ADVERTISED_Pause |
10819                                          ADVERTISED_Asym_Pause;
10820                 } else if (epause->tx_pause) {
10821                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10822                         newadv = ADVERTISED_Asym_Pause;
10823                 } else
10824                         newadv = 0;
10825
10826                 if (epause->autoneg)
10827                         tg3_flag_set(tp, PAUSE_AUTONEG);
10828                 else
10829                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10830
10831                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10832                         u32 oldadv = phydev->advertising &
10833                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10834                         if (oldadv != newadv) {
10835                                 phydev->advertising &=
10836                                         ~(ADVERTISED_Pause |
10837                                           ADVERTISED_Asym_Pause);
10838                                 phydev->advertising |= newadv;
10839                                 if (phydev->autoneg) {
10840                                         /*
10841                                          * Always renegotiate the link to
10842                                          * inform our link partner of our
10843                                          * flow control settings, even if the
10844                                          * flow control is forced.  Let
10845                                          * tg3_adjust_link() do the final
10846                                          * flow control setup.
10847                                          */
10848                                         return phy_start_aneg(phydev);
10849                                 }
10850                         }
10851
10852                         if (!epause->autoneg)
10853                                 tg3_setup_flow_control(tp, 0, 0);
10854                 } else {
10855                         tp->link_config.advertising &=
10856                                         ~(ADVERTISED_Pause |
10857                                           ADVERTISED_Asym_Pause);
10858                         tp->link_config.advertising |= newadv;
10859                 }
10860         } else {
10861                 int irq_sync = 0;
10862
10863                 if (netif_running(dev)) {
10864                         tg3_netif_stop(tp);
10865                         irq_sync = 1;
10866                 }
10867
10868                 tg3_full_lock(tp, irq_sync);
10869
10870                 if (epause->autoneg)
10871                         tg3_flag_set(tp, PAUSE_AUTONEG);
10872                 else
10873                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10874                 if (epause->rx_pause)
10875                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10876                 else
10877                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10878                 if (epause->tx_pause)
10879                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10880                 else
10881                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10882
10883                 if (netif_running(dev)) {
10884                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10885                         err = tg3_restart_hw(tp, 1);
10886                         if (!err)
10887                                 tg3_netif_start(tp);
10888                 }
10889
10890                 tg3_full_unlock(tp);
10891         }
10892
10893         return err;
10894 }
10895
10896 static int tg3_get_sset_count(struct net_device *dev, int sset)
10897 {
10898         switch (sset) {
10899         case ETH_SS_TEST:
10900                 return TG3_NUM_TEST;
10901         case ETH_SS_STATS:
10902                 return TG3_NUM_STATS;
10903         default:
10904                 return -EOPNOTSUPP;
10905         }
10906 }
10907
10908 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10909                          u32 *rules __always_unused)
10910 {
10911         struct tg3 *tp = netdev_priv(dev);
10912
10913         if (!tg3_flag(tp, SUPPORT_MSIX))
10914                 return -EOPNOTSUPP;
10915
10916         switch (info->cmd) {
10917         case ETHTOOL_GRXRINGS:
10918                 if (netif_running(tp->dev))
10919                         info->data = tp->irq_cnt;
10920                 else {
10921                         info->data = num_online_cpus();
10922                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10923                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10924                 }
10925
10926                 /* The first interrupt vector only
10927                  * handles link interrupts.
10928                  */
10929                 info->data -= 1;
10930                 return 0;
10931
10932         default:
10933                 return -EOPNOTSUPP;
10934         }
10935 }
10936
10937 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10938 {
10939         u32 size = 0;
10940         struct tg3 *tp = netdev_priv(dev);
10941
10942         if (tg3_flag(tp, SUPPORT_MSIX))
10943                 size = TG3_RSS_INDIR_TBL_SIZE;
10944
10945         return size;
10946 }
10947
10948 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10949 {
10950         struct tg3 *tp = netdev_priv(dev);
10951         int i;
10952
10953         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10954                 indir[i] = tp->rss_ind_tbl[i];
10955
10956         return 0;
10957 }
10958
10959 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10960 {
10961         struct tg3 *tp = netdev_priv(dev);
10962         size_t i;
10963
10964         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10965                 tp->rss_ind_tbl[i] = indir[i];
10966
10967         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10968                 return 0;
10969
10970         /* It is legal to write the indirection
10971          * table while the device is running.
10972          */
10973         tg3_full_lock(tp, 0);
10974         tg3_rss_write_indir_tbl(tp);
10975         tg3_full_unlock(tp);
10976
10977         return 0;
10978 }
10979
10980 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10981 {
10982         switch (stringset) {
10983         case ETH_SS_STATS:
10984                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10985                 break;
10986         case ETH_SS_TEST:
10987                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10988                 break;
10989         default:
10990                 WARN_ON(1);     /* we need a WARN() */
10991                 break;
10992         }
10993 }
10994
10995 static int tg3_set_phys_id(struct net_device *dev,
10996                             enum ethtool_phys_id_state state)
10997 {
10998         struct tg3 *tp = netdev_priv(dev);
10999
11000         if (!netif_running(tp->dev))
11001                 return -EAGAIN;
11002
11003         switch (state) {
11004         case ETHTOOL_ID_ACTIVE:
11005                 return 1;       /* cycle on/off once per second */
11006
11007         case ETHTOOL_ID_ON:
11008                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11009                      LED_CTRL_1000MBPS_ON |
11010                      LED_CTRL_100MBPS_ON |
11011                      LED_CTRL_10MBPS_ON |
11012                      LED_CTRL_TRAFFIC_OVERRIDE |
11013                      LED_CTRL_TRAFFIC_BLINK |
11014                      LED_CTRL_TRAFFIC_LED);
11015                 break;
11016
11017         case ETHTOOL_ID_OFF:
11018                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11019                      LED_CTRL_TRAFFIC_OVERRIDE);
11020                 break;
11021
11022         case ETHTOOL_ID_INACTIVE:
11023                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11024                 break;
11025         }
11026
11027         return 0;
11028 }
11029
11030 static void tg3_get_ethtool_stats(struct net_device *dev,
11031                                    struct ethtool_stats *estats, u64 *tmp_stats)
11032 {
11033         struct tg3 *tp = netdev_priv(dev);
11034
11035         if (tp->hw_stats)
11036                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11037         else
11038                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11039 }
11040
11041 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11042 {
11043         int i;
11044         __be32 *buf;
11045         u32 offset = 0, len = 0;
11046         u32 magic, val;
11047
11048         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11049                 return NULL;
11050
11051         if (magic == TG3_EEPROM_MAGIC) {
11052                 for (offset = TG3_NVM_DIR_START;
11053                      offset < TG3_NVM_DIR_END;
11054                      offset += TG3_NVM_DIRENT_SIZE) {
11055                         if (tg3_nvram_read(tp, offset, &val))
11056                                 return NULL;
11057
11058                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11059                             TG3_NVM_DIRTYPE_EXTVPD)
11060                                 break;
11061                 }
11062
11063                 if (offset != TG3_NVM_DIR_END) {
11064                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11065                         if (tg3_nvram_read(tp, offset + 4, &offset))
11066                                 return NULL;
11067
11068                         offset = tg3_nvram_logical_addr(tp, offset);
11069                 }
11070         }
11071
11072         if (!offset || !len) {
11073                 offset = TG3_NVM_VPD_OFF;
11074                 len = TG3_NVM_VPD_LEN;
11075         }
11076
11077         buf = kmalloc(len, GFP_KERNEL);
11078         if (buf == NULL)
11079                 return NULL;
11080
11081         if (magic == TG3_EEPROM_MAGIC) {
11082                 for (i = 0; i < len; i += 4) {
11083                         /* The data is in little-endian format in NVRAM.
11084                          * Use the big-endian read routines to preserve
11085                          * the byte order as it exists in NVRAM.
11086                          */
11087                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11088                                 goto error;
11089                 }
11090         } else {
11091                 u8 *ptr;
11092                 ssize_t cnt;
11093                 unsigned int pos = 0;
11094
11095                 ptr = (u8 *)&buf[0];
11096                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11097                         cnt = pci_read_vpd(tp->pdev, pos,
11098                                            len - pos, ptr);
11099                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11100                                 cnt = 0;
11101                         else if (cnt < 0)
11102                                 goto error;
11103                 }
11104                 if (pos != len)
11105                         goto error;
11106         }
11107
11108         *vpdlen = len;
11109
11110         return buf;
11111
11112 error:
11113         kfree(buf);
11114         return NULL;
11115 }
11116
11117 #define NVRAM_TEST_SIZE 0x100
11118 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11119 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11120 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11121 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11122 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11123 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11124 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11125 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11126
11127 static int tg3_test_nvram(struct tg3 *tp)
11128 {
11129         u32 csum, magic, len;
11130         __be32 *buf;
11131         int i, j, k, err = 0, size;
11132
11133         if (tg3_flag(tp, NO_NVRAM))
11134                 return 0;
11135
11136         if (tg3_nvram_read(tp, 0, &magic) != 0)
11137                 return -EIO;
11138
11139         if (magic == TG3_EEPROM_MAGIC)
11140                 size = NVRAM_TEST_SIZE;
11141         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11142                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11143                     TG3_EEPROM_SB_FORMAT_1) {
11144                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11145                         case TG3_EEPROM_SB_REVISION_0:
11146                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11147                                 break;
11148                         case TG3_EEPROM_SB_REVISION_2:
11149                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11150                                 break;
11151                         case TG3_EEPROM_SB_REVISION_3:
11152                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11153                                 break;
11154                         case TG3_EEPROM_SB_REVISION_4:
11155                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11156                                 break;
11157                         case TG3_EEPROM_SB_REVISION_5:
11158                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11159                                 break;
11160                         case TG3_EEPROM_SB_REVISION_6:
11161                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11162                                 break;
11163                         default:
11164                                 return -EIO;
11165                         }
11166                 } else
11167                         return 0;
11168         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11169                 size = NVRAM_SELFBOOT_HW_SIZE;
11170         else
11171                 return -EIO;
11172
11173         buf = kmalloc(size, GFP_KERNEL);
11174         if (buf == NULL)
11175                 return -ENOMEM;
11176
11177         err = -EIO;
11178         for (i = 0, j = 0; i < size; i += 4, j++) {
11179                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11180                 if (err)
11181                         break;
11182         }
11183         if (i < size)
11184                 goto out;
11185
11186         /* Selfboot format */
11187         magic = be32_to_cpu(buf[0]);
11188         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11189             TG3_EEPROM_MAGIC_FW) {
11190                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11191
11192                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11193                     TG3_EEPROM_SB_REVISION_2) {
11194                         /* For rev 2, the csum doesn't include the MBA. */
11195                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11196                                 csum8 += buf8[i];
11197                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11198                                 csum8 += buf8[i];
11199                 } else {
11200                         for (i = 0; i < size; i++)
11201                                 csum8 += buf8[i];
11202                 }
11203
11204                 if (csum8 == 0) {
11205                         err = 0;
11206                         goto out;
11207                 }
11208
11209                 err = -EIO;
11210                 goto out;
11211         }
11212
11213         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11214             TG3_EEPROM_MAGIC_HW) {
11215                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11216                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11217                 u8 *buf8 = (u8 *) buf;
11218
11219                 /* Separate the parity bits and the data bytes.  */
11220                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11221                         if ((i == 0) || (i == 8)) {
11222                                 int l;
11223                                 u8 msk;
11224
11225                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11226                                         parity[k++] = buf8[i] & msk;
11227                                 i++;
11228                         } else if (i == 16) {
11229                                 int l;
11230                                 u8 msk;
11231
11232                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11233                                         parity[k++] = buf8[i] & msk;
11234                                 i++;
11235
11236                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11237                                         parity[k++] = buf8[i] & msk;
11238                                 i++;
11239                         }
11240                         data[j++] = buf8[i];
11241                 }
11242
11243                 err = -EIO;
11244                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11245                         u8 hw8 = hweight8(data[i]);
11246
11247                         if ((hw8 & 0x1) && parity[i])
11248                                 goto out;
11249                         else if (!(hw8 & 0x1) && !parity[i])
11250                                 goto out;
11251                 }
11252                 err = 0;
11253                 goto out;
11254         }
11255
11256         err = -EIO;
11257
11258         /* Bootstrap checksum at offset 0x10 */
11259         csum = calc_crc((unsigned char *) buf, 0x10);
11260         if (csum != le32_to_cpu(buf[0x10/4]))
11261                 goto out;
11262
11263         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11264         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11265         if (csum != le32_to_cpu(buf[0xfc/4]))
11266                 goto out;
11267
11268         kfree(buf);
11269
11270         buf = tg3_vpd_readblock(tp, &len);
11271         if (!buf)
11272                 return -ENOMEM;
11273
11274         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11275         if (i > 0) {
11276                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11277                 if (j < 0)
11278                         goto out;
11279
11280                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11281                         goto out;
11282
11283                 i += PCI_VPD_LRDT_TAG_SIZE;
11284                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11285                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11286                 if (j > 0) {
11287                         u8 csum8 = 0;
11288
11289                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11290
11291                         for (i = 0; i <= j; i++)
11292                                 csum8 += ((u8 *)buf)[i];
11293
11294                         if (csum8)
11295                                 goto out;
11296                 }
11297         }
11298
11299         err = 0;
11300
11301 out:
11302         kfree(buf);
11303         return err;
11304 }
11305
11306 #define TG3_SERDES_TIMEOUT_SEC  2
11307 #define TG3_COPPER_TIMEOUT_SEC  6
11308
11309 static int tg3_test_link(struct tg3 *tp)
11310 {
11311         int i, max;
11312
11313         if (!netif_running(tp->dev))
11314                 return -ENODEV;
11315
11316         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11317                 max = TG3_SERDES_TIMEOUT_SEC;
11318         else
11319                 max = TG3_COPPER_TIMEOUT_SEC;
11320
11321         for (i = 0; i < max; i++) {
11322                 if (netif_carrier_ok(tp->dev))
11323                         return 0;
11324
11325                 if (msleep_interruptible(1000))
11326                         break;
11327         }
11328
11329         return -EIO;
11330 }
11331
11332 /* Only test the commonly used registers */
11333 static int tg3_test_registers(struct tg3 *tp)
11334 {
11335         int i, is_5705, is_5750;
11336         u32 offset, read_mask, write_mask, val, save_val, read_val;
11337         static struct {
11338                 u16 offset;
11339                 u16 flags;
11340 #define TG3_FL_5705     0x1
11341 #define TG3_FL_NOT_5705 0x2
11342 #define TG3_FL_NOT_5788 0x4
11343 #define TG3_FL_NOT_5750 0x8
11344                 u32 read_mask;
11345                 u32 write_mask;
11346         } reg_tbl[] = {
11347                 /* MAC Control Registers */
11348                 { MAC_MODE, TG3_FL_NOT_5705,
11349                         0x00000000, 0x00ef6f8c },
11350                 { MAC_MODE, TG3_FL_5705,
11351                         0x00000000, 0x01ef6b8c },
11352                 { MAC_STATUS, TG3_FL_NOT_5705,
11353                         0x03800107, 0x00000000 },
11354                 { MAC_STATUS, TG3_FL_5705,
11355                         0x03800100, 0x00000000 },
11356                 { MAC_ADDR_0_HIGH, 0x0000,
11357                         0x00000000, 0x0000ffff },
11358                 { MAC_ADDR_0_LOW, 0x0000,
11359                         0x00000000, 0xffffffff },
11360                 { MAC_RX_MTU_SIZE, 0x0000,
11361                         0x00000000, 0x0000ffff },
11362                 { MAC_TX_MODE, 0x0000,
11363                         0x00000000, 0x00000070 },
11364                 { MAC_TX_LENGTHS, 0x0000,
11365                         0x00000000, 0x00003fff },
11366                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11367                         0x00000000, 0x000007fc },
11368                 { MAC_RX_MODE, TG3_FL_5705,
11369                         0x00000000, 0x000007dc },
11370                 { MAC_HASH_REG_0, 0x0000,
11371                         0x00000000, 0xffffffff },
11372                 { MAC_HASH_REG_1, 0x0000,
11373                         0x00000000, 0xffffffff },
11374                 { MAC_HASH_REG_2, 0x0000,
11375                         0x00000000, 0xffffffff },
11376                 { MAC_HASH_REG_3, 0x0000,
11377                         0x00000000, 0xffffffff },
11378
11379                 /* Receive Data and Receive BD Initiator Control Registers. */
11380                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11381                         0x00000000, 0xffffffff },
11382                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11383                         0x00000000, 0xffffffff },
11384                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11385                         0x00000000, 0x00000003 },
11386                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11387                         0x00000000, 0xffffffff },
11388                 { RCVDBDI_STD_BD+0, 0x0000,
11389                         0x00000000, 0xffffffff },
11390                 { RCVDBDI_STD_BD+4, 0x0000,
11391                         0x00000000, 0xffffffff },
11392                 { RCVDBDI_STD_BD+8, 0x0000,
11393                         0x00000000, 0xffff0002 },
11394                 { RCVDBDI_STD_BD+0xc, 0x0000,
11395                         0x00000000, 0xffffffff },
11396
11397                 /* Receive BD Initiator Control Registers. */
11398                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11399                         0x00000000, 0xffffffff },
11400                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11401                         0x00000000, 0x000003ff },
11402                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11403                         0x00000000, 0xffffffff },
11404
11405                 /* Host Coalescing Control Registers. */
11406                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11407                         0x00000000, 0x00000004 },
11408                 { HOSTCC_MODE, TG3_FL_5705,
11409                         0x00000000, 0x000000f6 },
11410                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11411                         0x00000000, 0xffffffff },
11412                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11413                         0x00000000, 0x000003ff },
11414                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11415                         0x00000000, 0xffffffff },
11416                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11417                         0x00000000, 0x000003ff },
11418                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11419                         0x00000000, 0xffffffff },
11420                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11421                         0x00000000, 0x000000ff },
11422                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11423                         0x00000000, 0xffffffff },
11424                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11425                         0x00000000, 0x000000ff },
11426                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11427                         0x00000000, 0xffffffff },
11428                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11429                         0x00000000, 0xffffffff },
11430                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11431                         0x00000000, 0xffffffff },
11432                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11433                         0x00000000, 0x000000ff },
11434                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11435                         0x00000000, 0xffffffff },
11436                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11437                         0x00000000, 0x000000ff },
11438                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11439                         0x00000000, 0xffffffff },
11440                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11441                         0x00000000, 0xffffffff },
11442                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11443                         0x00000000, 0xffffffff },
11444                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11445                         0x00000000, 0xffffffff },
11446                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11447                         0x00000000, 0xffffffff },
11448                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11449                         0xffffffff, 0x00000000 },
11450                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11451                         0xffffffff, 0x00000000 },
11452
11453                 /* Buffer Manager Control Registers. */
11454                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11455                         0x00000000, 0x007fff80 },
11456                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11457                         0x00000000, 0x007fffff },
11458                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11459                         0x00000000, 0x0000003f },
11460                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11461                         0x00000000, 0x000001ff },
11462                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11463                         0x00000000, 0x000001ff },
11464                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11465                         0xffffffff, 0x00000000 },
11466                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11467                         0xffffffff, 0x00000000 },
11468
11469                 /* Mailbox Registers */
11470                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11471                         0x00000000, 0x000001ff },
11472                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11473                         0x00000000, 0x000001ff },
11474                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11475                         0x00000000, 0x000007ff },
11476                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11477                         0x00000000, 0x000001ff },
11478
11479                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11480         };
11481
11482         is_5705 = is_5750 = 0;
11483         if (tg3_flag(tp, 5705_PLUS)) {
11484                 is_5705 = 1;
11485                 if (tg3_flag(tp, 5750_PLUS))
11486                         is_5750 = 1;
11487         }
11488
11489         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11490                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11491                         continue;
11492
11493                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11494                         continue;
11495
11496                 if (tg3_flag(tp, IS_5788) &&
11497                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11498                         continue;
11499
11500                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11501                         continue;
11502
11503                 offset = (u32) reg_tbl[i].offset;
11504                 read_mask = reg_tbl[i].read_mask;
11505                 write_mask = reg_tbl[i].write_mask;
11506
11507                 /* Save the original register content */
11508                 save_val = tr32(offset);
11509
11510                 /* Determine the read-only value. */
11511                 read_val = save_val & read_mask;
11512
11513                 /* Write zero to the register, then make sure the read-only bits
11514                  * are not changed and the read/write bits are all zeros.
11515                  */
11516                 tw32(offset, 0);
11517
11518                 val = tr32(offset);
11519
11520                 /* Test the read-only and read/write bits. */
11521                 if (((val & read_mask) != read_val) || (val & write_mask))
11522                         goto out;
11523
11524                 /* Write ones to all the bits defined by RdMask and WrMask, then
11525                  * make sure the read-only bits are not changed and the
11526                  * read/write bits are all ones.
11527                  */
11528                 tw32(offset, read_mask | write_mask);
11529
11530                 val = tr32(offset);
11531
11532                 /* Test the read-only bits. */
11533                 if ((val & read_mask) != read_val)
11534                         goto out;
11535
11536                 /* Test the read/write bits. */
11537                 if ((val & write_mask) != write_mask)
11538                         goto out;
11539
11540                 tw32(offset, save_val);
11541         }
11542
11543         return 0;
11544
11545 out:
11546         if (netif_msg_hw(tp))
11547                 netdev_err(tp->dev,
11548                            "Register test failed at offset %x\n", offset);
11549         tw32(offset, save_val);
11550         return -EIO;
11551 }
11552
11553 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11554 {
11555         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11556         int i;
11557         u32 j;
11558
11559         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11560                 for (j = 0; j < len; j += 4) {
11561                         u32 val;
11562
11563                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11564                         tg3_read_mem(tp, offset + j, &val);
11565                         if (val != test_pattern[i])
11566                                 return -EIO;
11567                 }
11568         }
11569         return 0;
11570 }
11571
11572 static int tg3_test_memory(struct tg3 *tp)
11573 {
11574         static struct mem_entry {
11575                 u32 offset;
11576                 u32 len;
11577         } mem_tbl_570x[] = {
11578                 { 0x00000000, 0x00b50},
11579                 { 0x00002000, 0x1c000},
11580                 { 0xffffffff, 0x00000}
11581         }, mem_tbl_5705[] = {
11582                 { 0x00000100, 0x0000c},
11583                 { 0x00000200, 0x00008},
11584                 { 0x00004000, 0x00800},
11585                 { 0x00006000, 0x01000},
11586                 { 0x00008000, 0x02000},
11587                 { 0x00010000, 0x0e000},
11588                 { 0xffffffff, 0x00000}
11589         }, mem_tbl_5755[] = {
11590                 { 0x00000200, 0x00008},
11591                 { 0x00004000, 0x00800},
11592                 { 0x00006000, 0x00800},
11593                 { 0x00008000, 0x02000},
11594                 { 0x00010000, 0x0c000},
11595                 { 0xffffffff, 0x00000}
11596         }, mem_tbl_5906[] = {
11597                 { 0x00000200, 0x00008},
11598                 { 0x00004000, 0x00400},
11599                 { 0x00006000, 0x00400},
11600                 { 0x00008000, 0x01000},
11601                 { 0x00010000, 0x01000},
11602                 { 0xffffffff, 0x00000}
11603         }, mem_tbl_5717[] = {
11604                 { 0x00000200, 0x00008},
11605                 { 0x00010000, 0x0a000},
11606                 { 0x00020000, 0x13c00},
11607                 { 0xffffffff, 0x00000}
11608         }, mem_tbl_57765[] = {
11609                 { 0x00000200, 0x00008},
11610                 { 0x00004000, 0x00800},
11611                 { 0x00006000, 0x09800},
11612                 { 0x00010000, 0x0a000},
11613                 { 0xffffffff, 0x00000}
11614         };
11615         struct mem_entry *mem_tbl;
11616         int err = 0;
11617         int i;
11618
11619         if (tg3_flag(tp, 5717_PLUS))
11620                 mem_tbl = mem_tbl_5717;
11621         else if (tg3_flag(tp, 57765_CLASS))
11622                 mem_tbl = mem_tbl_57765;
11623         else if (tg3_flag(tp, 5755_PLUS))
11624                 mem_tbl = mem_tbl_5755;
11625         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11626                 mem_tbl = mem_tbl_5906;
11627         else if (tg3_flag(tp, 5705_PLUS))
11628                 mem_tbl = mem_tbl_5705;
11629         else
11630                 mem_tbl = mem_tbl_570x;
11631
11632         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11633                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11634                 if (err)
11635                         break;
11636         }
11637
11638         return err;
11639 }
11640
11641 #define TG3_TSO_MSS             500
11642
11643 #define TG3_TSO_IP_HDR_LEN      20
11644 #define TG3_TSO_TCP_HDR_LEN     20
11645 #define TG3_TSO_TCP_OPT_LEN     12
11646
11647 static const u8 tg3_tso_header[] = {
11648 0x08, 0x00,
11649 0x45, 0x00, 0x00, 0x00,
11650 0x00, 0x00, 0x40, 0x00,
11651 0x40, 0x06, 0x00, 0x00,
11652 0x0a, 0x00, 0x00, 0x01,
11653 0x0a, 0x00, 0x00, 0x02,
11654 0x0d, 0x00, 0xe0, 0x00,
11655 0x00, 0x00, 0x01, 0x00,
11656 0x00, 0x00, 0x02, 0x00,
11657 0x80, 0x10, 0x10, 0x00,
11658 0x14, 0x09, 0x00, 0x00,
11659 0x01, 0x01, 0x08, 0x0a,
11660 0x11, 0x11, 0x11, 0x11,
11661 0x11, 0x11, 0x11, 0x11,
11662 };
11663
11664 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11665 {
11666         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11667         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11668         u32 budget;
11669         struct sk_buff *skb;
11670         u8 *tx_data, *rx_data;
11671         dma_addr_t map;
11672         int num_pkts, tx_len, rx_len, i, err;
11673         struct tg3_rx_buffer_desc *desc;
11674         struct tg3_napi *tnapi, *rnapi;
11675         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11676
11677         tnapi = &tp->napi[0];
11678         rnapi = &tp->napi[0];
11679         if (tp->irq_cnt > 1) {
11680                 if (tg3_flag(tp, ENABLE_RSS))
11681                         rnapi = &tp->napi[1];
11682                 if (tg3_flag(tp, ENABLE_TSS))
11683                         tnapi = &tp->napi[1];
11684         }
11685         coal_now = tnapi->coal_now | rnapi->coal_now;
11686
11687         err = -EIO;
11688
11689         tx_len = pktsz;
11690         skb = netdev_alloc_skb(tp->dev, tx_len);
11691         if (!skb)
11692                 return -ENOMEM;
11693
11694         tx_data = skb_put(skb, tx_len);
11695         memcpy(tx_data, tp->dev->dev_addr, 6);
11696         memset(tx_data + 6, 0x0, 8);
11697
11698         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11699
11700         if (tso_loopback) {
11701                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11702
11703                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11704                               TG3_TSO_TCP_OPT_LEN;
11705
11706                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11707                        sizeof(tg3_tso_header));
11708                 mss = TG3_TSO_MSS;
11709
11710                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11711                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11712
11713                 /* Set the total length field in the IP header */
11714                 iph->tot_len = htons((u16)(mss + hdr_len));
11715
11716                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11717                               TXD_FLAG_CPU_POST_DMA);
11718
11719                 if (tg3_flag(tp, HW_TSO_1) ||
11720                     tg3_flag(tp, HW_TSO_2) ||
11721                     tg3_flag(tp, HW_TSO_3)) {
11722                         struct tcphdr *th;
11723                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11724                         th = (struct tcphdr *)&tx_data[val];
11725                         th->check = 0;
11726                 } else
11727                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11728
11729                 if (tg3_flag(tp, HW_TSO_3)) {
11730                         mss |= (hdr_len & 0xc) << 12;
11731                         if (hdr_len & 0x10)
11732                                 base_flags |= 0x00000010;
11733                         base_flags |= (hdr_len & 0x3e0) << 5;
11734                 } else if (tg3_flag(tp, HW_TSO_2))
11735                         mss |= hdr_len << 9;
11736                 else if (tg3_flag(tp, HW_TSO_1) ||
11737                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11738                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11739                 } else {
11740                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11741                 }
11742
11743                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11744         } else {
11745                 num_pkts = 1;
11746                 data_off = ETH_HLEN;
11747         }
11748
11749         for (i = data_off; i < tx_len; i++)
11750                 tx_data[i] = (u8) (i & 0xff);
11751
11752         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11753         if (pci_dma_mapping_error(tp->pdev, map)) {
11754                 dev_kfree_skb(skb);
11755                 return -EIO;
11756         }
11757
11758         val = tnapi->tx_prod;
11759         tnapi->tx_buffers[val].skb = skb;
11760         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11761
11762         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11763                rnapi->coal_now);
11764
11765         udelay(10);
11766
11767         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11768
11769         budget = tg3_tx_avail(tnapi);
11770         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11771                             base_flags | TXD_FLAG_END, mss, 0)) {
11772                 tnapi->tx_buffers[val].skb = NULL;
11773                 dev_kfree_skb(skb);
11774                 return -EIO;
11775         }
11776
11777         tnapi->tx_prod++;
11778
11779         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11780         tr32_mailbox(tnapi->prodmbox);
11781
11782         udelay(10);
11783
11784         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11785         for (i = 0; i < 35; i++) {
11786                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11787                        coal_now);
11788
11789                 udelay(10);
11790
11791                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11792                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11793                 if ((tx_idx == tnapi->tx_prod) &&
11794                     (rx_idx == (rx_start_idx + num_pkts)))
11795                         break;
11796         }
11797
11798         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11799         dev_kfree_skb(skb);
11800
11801         if (tx_idx != tnapi->tx_prod)
11802                 goto out;
11803
11804         if (rx_idx != rx_start_idx + num_pkts)
11805                 goto out;
11806
11807         val = data_off;
11808         while (rx_idx != rx_start_idx) {
11809                 desc = &rnapi->rx_rcb[rx_start_idx++];
11810                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11811                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11812
11813                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11814                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11815                         goto out;
11816
11817                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11818                          - ETH_FCS_LEN;
11819
11820                 if (!tso_loopback) {
11821                         if (rx_len != tx_len)
11822                                 goto out;
11823
11824                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11825                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11826                                         goto out;
11827                         } else {
11828                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11829                                         goto out;
11830                         }
11831                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11832                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11833                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11834                         goto out;
11835                 }
11836
11837                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11838                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11839                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11840                                              mapping);
11841                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11842                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11843                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11844                                              mapping);
11845                 } else
11846                         goto out;
11847
11848                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11849                                             PCI_DMA_FROMDEVICE);
11850
11851                 rx_data += TG3_RX_OFFSET(tp);
11852                 for (i = data_off; i < rx_len; i++, val++) {
11853                         if (*(rx_data + i) != (u8) (val & 0xff))
11854                                 goto out;
11855                 }
11856         }
11857
11858         err = 0;
11859
11860         /* tg3_free_rings will unmap and free the rx_data */
11861 out:
11862         return err;
11863 }
11864
11865 #define TG3_STD_LOOPBACK_FAILED         1
11866 #define TG3_JMB_LOOPBACK_FAILED         2
11867 #define TG3_TSO_LOOPBACK_FAILED         4
11868 #define TG3_LOOPBACK_FAILED \
11869         (TG3_STD_LOOPBACK_FAILED | \
11870          TG3_JMB_LOOPBACK_FAILED | \
11871          TG3_TSO_LOOPBACK_FAILED)
11872
11873 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11874 {
11875         int err = -EIO;
11876         u32 eee_cap;
11877
11878         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11879         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11880
11881         if (!netif_running(tp->dev)) {
11882                 data[0] = TG3_LOOPBACK_FAILED;
11883                 data[1] = TG3_LOOPBACK_FAILED;
11884                 if (do_extlpbk)
11885                         data[2] = TG3_LOOPBACK_FAILED;
11886                 goto done;
11887         }
11888
11889         err = tg3_reset_hw(tp, 1);
11890         if (err) {
11891                 data[0] = TG3_LOOPBACK_FAILED;
11892                 data[1] = TG3_LOOPBACK_FAILED;
11893                 if (do_extlpbk)
11894                         data[2] = TG3_LOOPBACK_FAILED;
11895                 goto done;
11896         }
11897
11898         if (tg3_flag(tp, ENABLE_RSS)) {
11899                 int i;
11900
11901                 /* Reroute all rx packets to the 1st queue */
11902                 for (i = MAC_RSS_INDIR_TBL_0;
11903                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11904                         tw32(i, 0x0);
11905         }
11906
11907         /* HW errata - mac loopback fails in some cases on 5780.
11908          * Normal traffic and PHY loopback are not affected by
11909          * errata.  Also, the MAC loopback test is deprecated for
11910          * all newer ASIC revisions.
11911          */
11912         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11913             !tg3_flag(tp, CPMU_PRESENT)) {
11914                 tg3_mac_loopback(tp, true);
11915
11916                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11917                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11918
11919                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11920                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11921                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11922
11923                 tg3_mac_loopback(tp, false);
11924         }
11925
11926         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11927             !tg3_flag(tp, USE_PHYLIB)) {
11928                 int i;
11929
11930                 tg3_phy_lpbk_set(tp, 0, false);
11931
11932                 /* Wait for link */
11933                 for (i = 0; i < 100; i++) {
11934                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11935                                 break;
11936                         mdelay(1);
11937                 }
11938
11939                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11940                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11941                 if (tg3_flag(tp, TSO_CAPABLE) &&
11942                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11943                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11944                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11945                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11946                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11947
11948                 if (do_extlpbk) {
11949                         tg3_phy_lpbk_set(tp, 0, true);
11950
11951                         /* All link indications report up, but the hardware
11952                          * isn't really ready for about 20 msec.  Double it
11953                          * to be sure.
11954                          */
11955                         mdelay(40);
11956
11957                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11958                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11959                         if (tg3_flag(tp, TSO_CAPABLE) &&
11960                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11961                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11962                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11963                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11964                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11965                 }
11966
11967                 /* Re-enable gphy autopowerdown. */
11968                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11969                         tg3_phy_toggle_apd(tp, true);
11970         }
11971
11972         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11973
11974 done:
11975         tp->phy_flags |= eee_cap;
11976
11977         return err;
11978 }
11979
11980 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11981                           u64 *data)
11982 {
11983         struct tg3 *tp = netdev_priv(dev);
11984         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11985
11986         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11987             tg3_power_up(tp)) {
11988                 etest->flags |= ETH_TEST_FL_FAILED;
11989                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11990                 return;
11991         }
11992
11993         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11994
11995         if (tg3_test_nvram(tp) != 0) {
11996                 etest->flags |= ETH_TEST_FL_FAILED;
11997                 data[0] = 1;
11998         }
11999         if (!doextlpbk && tg3_test_link(tp)) {
12000                 etest->flags |= ETH_TEST_FL_FAILED;
12001                 data[1] = 1;
12002         }
12003         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12004                 int err, err2 = 0, irq_sync = 0;
12005
12006                 if (netif_running(dev)) {
12007                         tg3_phy_stop(tp);
12008                         tg3_netif_stop(tp);
12009                         irq_sync = 1;
12010                 }
12011
12012                 tg3_full_lock(tp, irq_sync);
12013
12014                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12015                 err = tg3_nvram_lock(tp);
12016                 tg3_halt_cpu(tp, RX_CPU_BASE);
12017                 if (!tg3_flag(tp, 5705_PLUS))
12018                         tg3_halt_cpu(tp, TX_CPU_BASE);
12019                 if (!err)
12020                         tg3_nvram_unlock(tp);
12021
12022                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12023                         tg3_phy_reset(tp);
12024
12025                 if (tg3_test_registers(tp) != 0) {
12026                         etest->flags |= ETH_TEST_FL_FAILED;
12027                         data[2] = 1;
12028                 }
12029
12030                 if (tg3_test_memory(tp) != 0) {
12031                         etest->flags |= ETH_TEST_FL_FAILED;
12032                         data[3] = 1;
12033                 }
12034
12035                 if (doextlpbk)
12036                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12037
12038                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12039                         etest->flags |= ETH_TEST_FL_FAILED;
12040
12041                 tg3_full_unlock(tp);
12042
12043                 if (tg3_test_interrupt(tp) != 0) {
12044                         etest->flags |= ETH_TEST_FL_FAILED;
12045                         data[7] = 1;
12046                 }
12047
12048                 tg3_full_lock(tp, 0);
12049
12050                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12051                 if (netif_running(dev)) {
12052                         tg3_flag_set(tp, INIT_COMPLETE);
12053                         err2 = tg3_restart_hw(tp, 1);
12054                         if (!err2)
12055                                 tg3_netif_start(tp);
12056                 }
12057
12058                 tg3_full_unlock(tp);
12059
12060                 if (irq_sync && !err2)
12061                         tg3_phy_start(tp);
12062         }
12063         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12064                 tg3_power_down(tp);
12065
12066 }
12067
12068 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12069 {
12070         struct mii_ioctl_data *data = if_mii(ifr);
12071         struct tg3 *tp = netdev_priv(dev);
12072         int err;
12073
12074         if (tg3_flag(tp, USE_PHYLIB)) {
12075                 struct phy_device *phydev;
12076                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12077                         return -EAGAIN;
12078                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12079                 return phy_mii_ioctl(phydev, ifr, cmd);
12080         }
12081
12082         switch (cmd) {
12083         case SIOCGMIIPHY:
12084                 data->phy_id = tp->phy_addr;
12085
12086                 /* fallthru */
12087         case SIOCGMIIREG: {
12088                 u32 mii_regval;
12089
12090                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12091                         break;                  /* We have no PHY */
12092
12093                 if (!netif_running(dev))
12094                         return -EAGAIN;
12095
12096                 spin_lock_bh(&tp->lock);
12097                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12098                 spin_unlock_bh(&tp->lock);
12099
12100                 data->val_out = mii_regval;
12101
12102                 return err;
12103         }
12104
12105         case SIOCSMIIREG:
12106                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12107                         break;                  /* We have no PHY */
12108
12109                 if (!netif_running(dev))
12110                         return -EAGAIN;
12111
12112                 spin_lock_bh(&tp->lock);
12113                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12114                 spin_unlock_bh(&tp->lock);
12115
12116                 return err;
12117
12118         default:
12119                 /* do nothing */
12120                 break;
12121         }
12122         return -EOPNOTSUPP;
12123 }
12124
12125 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12126 {
12127         struct tg3 *tp = netdev_priv(dev);
12128
12129         memcpy(ec, &tp->coal, sizeof(*ec));
12130         return 0;
12131 }
12132
12133 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12134 {
12135         struct tg3 *tp = netdev_priv(dev);
12136         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12137         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12138
12139         if (!tg3_flag(tp, 5705_PLUS)) {
12140                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12141                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12142                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12143                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12144         }
12145
12146         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12147             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12148             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12149             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12150             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12151             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12152             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12153             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12154             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12155             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12156                 return -EINVAL;
12157
12158         /* No rx interrupts will be generated if both are zero */
12159         if ((ec->rx_coalesce_usecs == 0) &&
12160             (ec->rx_max_coalesced_frames == 0))
12161                 return -EINVAL;
12162
12163         /* No tx interrupts will be generated if both are zero */
12164         if ((ec->tx_coalesce_usecs == 0) &&
12165             (ec->tx_max_coalesced_frames == 0))
12166                 return -EINVAL;
12167
12168         /* Only copy relevant parameters, ignore all others. */
12169         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12170         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12171         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12172         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12173         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12174         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12175         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12176         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12177         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12178
12179         if (netif_running(dev)) {
12180                 tg3_full_lock(tp, 0);
12181                 __tg3_set_coalesce(tp, &tp->coal);
12182                 tg3_full_unlock(tp);
12183         }
12184         return 0;
12185 }
12186
12187 static const struct ethtool_ops tg3_ethtool_ops = {
12188         .get_settings           = tg3_get_settings,
12189         .set_settings           = tg3_set_settings,
12190         .get_drvinfo            = tg3_get_drvinfo,
12191         .get_regs_len           = tg3_get_regs_len,
12192         .get_regs               = tg3_get_regs,
12193         .get_wol                = tg3_get_wol,
12194         .set_wol                = tg3_set_wol,
12195         .get_msglevel           = tg3_get_msglevel,
12196         .set_msglevel           = tg3_set_msglevel,
12197         .nway_reset             = tg3_nway_reset,
12198         .get_link               = ethtool_op_get_link,
12199         .get_eeprom_len         = tg3_get_eeprom_len,
12200         .get_eeprom             = tg3_get_eeprom,
12201         .set_eeprom             = tg3_set_eeprom,
12202         .get_ringparam          = tg3_get_ringparam,
12203         .set_ringparam          = tg3_set_ringparam,
12204         .get_pauseparam         = tg3_get_pauseparam,
12205         .set_pauseparam         = tg3_set_pauseparam,
12206         .self_test              = tg3_self_test,
12207         .get_strings            = tg3_get_strings,
12208         .set_phys_id            = tg3_set_phys_id,
12209         .get_ethtool_stats      = tg3_get_ethtool_stats,
12210         .get_coalesce           = tg3_get_coalesce,
12211         .set_coalesce           = tg3_set_coalesce,
12212         .get_sset_count         = tg3_get_sset_count,
12213         .get_rxnfc              = tg3_get_rxnfc,
12214         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12215         .get_rxfh_indir         = tg3_get_rxfh_indir,
12216         .set_rxfh_indir         = tg3_set_rxfh_indir,
12217 };
12218
12219 static void tg3_set_rx_mode(struct net_device *dev)
12220 {
12221         struct tg3 *tp = netdev_priv(dev);
12222
12223         if (!netif_running(dev))
12224                 return;
12225
12226         tg3_full_lock(tp, 0);
12227         __tg3_set_rx_mode(dev);
12228         tg3_full_unlock(tp);
12229 }
12230
12231 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12232                                int new_mtu)
12233 {
12234         dev->mtu = new_mtu;
12235
12236         if (new_mtu > ETH_DATA_LEN) {
12237                 if (tg3_flag(tp, 5780_CLASS)) {
12238                         netdev_update_features(dev);
12239                         tg3_flag_clear(tp, TSO_CAPABLE);
12240                 } else {
12241                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12242                 }
12243         } else {
12244                 if (tg3_flag(tp, 5780_CLASS)) {
12245                         tg3_flag_set(tp, TSO_CAPABLE);
12246                         netdev_update_features(dev);
12247                 }
12248                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12249         }
12250 }
12251
12252 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12253 {
12254         struct tg3 *tp = netdev_priv(dev);
12255         int err;
12256
12257         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12258                 return -EINVAL;
12259
12260         if (!netif_running(dev)) {
12261                 /* We'll just catch it later when the
12262                  * device is up'd.
12263                  */
12264                 tg3_set_mtu(dev, tp, new_mtu);
12265                 return 0;
12266         }
12267
12268         tg3_phy_stop(tp);
12269
12270         tg3_netif_stop(tp);
12271
12272         tg3_full_lock(tp, 1);
12273
12274         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12275
12276         tg3_set_mtu(dev, tp, new_mtu);
12277
12278         err = tg3_restart_hw(tp, 0);
12279
12280         if (!err)
12281                 tg3_netif_start(tp);
12282
12283         tg3_full_unlock(tp);
12284
12285         if (!err)
12286                 tg3_phy_start(tp);
12287
12288         return err;
12289 }
12290
12291 static const struct net_device_ops tg3_netdev_ops = {
12292         .ndo_open               = tg3_open,
12293         .ndo_stop               = tg3_close,
12294         .ndo_start_xmit         = tg3_start_xmit,
12295         .ndo_get_stats64        = tg3_get_stats64,
12296         .ndo_validate_addr      = eth_validate_addr,
12297         .ndo_set_rx_mode        = tg3_set_rx_mode,
12298         .ndo_set_mac_address    = tg3_set_mac_addr,
12299         .ndo_do_ioctl           = tg3_ioctl,
12300         .ndo_tx_timeout         = tg3_tx_timeout,
12301         .ndo_change_mtu         = tg3_change_mtu,
12302         .ndo_fix_features       = tg3_fix_features,
12303         .ndo_set_features       = tg3_set_features,
12304 #ifdef CONFIG_NET_POLL_CONTROLLER
12305         .ndo_poll_controller    = tg3_poll_controller,
12306 #endif
12307 };
12308
12309 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12310 {
12311         u32 cursize, val, magic;
12312
12313         tp->nvram_size = EEPROM_CHIP_SIZE;
12314
12315         if (tg3_nvram_read(tp, 0, &magic) != 0)
12316                 return;
12317
12318         if ((magic != TG3_EEPROM_MAGIC) &&
12319             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12320             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12321                 return;
12322
12323         /*
12324          * Size the chip by reading offsets at increasing powers of two.
12325          * When we encounter our validation signature, we know the addressing
12326          * has wrapped around, and thus have our chip size.
12327          */
12328         cursize = 0x10;
12329
12330         while (cursize < tp->nvram_size) {
12331                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12332                         return;
12333
12334                 if (val == magic)
12335                         break;
12336
12337                 cursize <<= 1;
12338         }
12339
12340         tp->nvram_size = cursize;
12341 }
12342
12343 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12344 {
12345         u32 val;
12346
12347         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12348                 return;
12349
12350         /* Selfboot format */
12351         if (val != TG3_EEPROM_MAGIC) {
12352                 tg3_get_eeprom_size(tp);
12353                 return;
12354         }
12355
12356         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12357                 if (val != 0) {
12358                         /* This is confusing.  We want to operate on the
12359                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12360                          * call will read from NVRAM and byteswap the data
12361                          * according to the byteswapping settings for all
12362                          * other register accesses.  This ensures the data we
12363                          * want will always reside in the lower 16-bits.
12364                          * However, the data in NVRAM is in LE format, which
12365                          * means the data from the NVRAM read will always be
12366                          * opposite the endianness of the CPU.  The 16-bit
12367                          * byteswap then brings the data to CPU endianness.
12368                          */
12369                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12370                         return;
12371                 }
12372         }
12373         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12374 }
12375
12376 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12377 {
12378         u32 nvcfg1;
12379
12380         nvcfg1 = tr32(NVRAM_CFG1);
12381         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12382                 tg3_flag_set(tp, FLASH);
12383         } else {
12384                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12385                 tw32(NVRAM_CFG1, nvcfg1);
12386         }
12387
12388         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12389             tg3_flag(tp, 5780_CLASS)) {
12390                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12391                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12392                         tp->nvram_jedecnum = JEDEC_ATMEL;
12393                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12394                         tg3_flag_set(tp, NVRAM_BUFFERED);
12395                         break;
12396                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12397                         tp->nvram_jedecnum = JEDEC_ATMEL;
12398                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12399                         break;
12400                 case FLASH_VENDOR_ATMEL_EEPROM:
12401                         tp->nvram_jedecnum = JEDEC_ATMEL;
12402                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12403                         tg3_flag_set(tp, NVRAM_BUFFERED);
12404                         break;
12405                 case FLASH_VENDOR_ST:
12406                         tp->nvram_jedecnum = JEDEC_ST;
12407                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12408                         tg3_flag_set(tp, NVRAM_BUFFERED);
12409                         break;
12410                 case FLASH_VENDOR_SAIFUN:
12411                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12412                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12413                         break;
12414                 case FLASH_VENDOR_SST_SMALL:
12415                 case FLASH_VENDOR_SST_LARGE:
12416                         tp->nvram_jedecnum = JEDEC_SST;
12417                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12418                         break;
12419                 }
12420         } else {
12421                 tp->nvram_jedecnum = JEDEC_ATMEL;
12422                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12423                 tg3_flag_set(tp, NVRAM_BUFFERED);
12424         }
12425 }
12426
12427 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12428 {
12429         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12430         case FLASH_5752PAGE_SIZE_256:
12431                 tp->nvram_pagesize = 256;
12432                 break;
12433         case FLASH_5752PAGE_SIZE_512:
12434                 tp->nvram_pagesize = 512;
12435                 break;
12436         case FLASH_5752PAGE_SIZE_1K:
12437                 tp->nvram_pagesize = 1024;
12438                 break;
12439         case FLASH_5752PAGE_SIZE_2K:
12440                 tp->nvram_pagesize = 2048;
12441                 break;
12442         case FLASH_5752PAGE_SIZE_4K:
12443                 tp->nvram_pagesize = 4096;
12444                 break;
12445         case FLASH_5752PAGE_SIZE_264:
12446                 tp->nvram_pagesize = 264;
12447                 break;
12448         case FLASH_5752PAGE_SIZE_528:
12449                 tp->nvram_pagesize = 528;
12450                 break;
12451         }
12452 }
12453
12454 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12455 {
12456         u32 nvcfg1;
12457
12458         nvcfg1 = tr32(NVRAM_CFG1);
12459
12460         /* NVRAM protection for TPM */
12461         if (nvcfg1 & (1 << 27))
12462                 tg3_flag_set(tp, PROTECTED_NVRAM);
12463
12464         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12465         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12466         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12467                 tp->nvram_jedecnum = JEDEC_ATMEL;
12468                 tg3_flag_set(tp, NVRAM_BUFFERED);
12469                 break;
12470         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12471                 tp->nvram_jedecnum = JEDEC_ATMEL;
12472                 tg3_flag_set(tp, NVRAM_BUFFERED);
12473                 tg3_flag_set(tp, FLASH);
12474                 break;
12475         case FLASH_5752VENDOR_ST_M45PE10:
12476         case FLASH_5752VENDOR_ST_M45PE20:
12477         case FLASH_5752VENDOR_ST_M45PE40:
12478                 tp->nvram_jedecnum = JEDEC_ST;
12479                 tg3_flag_set(tp, NVRAM_BUFFERED);
12480                 tg3_flag_set(tp, FLASH);
12481                 break;
12482         }
12483
12484         if (tg3_flag(tp, FLASH)) {
12485                 tg3_nvram_get_pagesize(tp, nvcfg1);
12486         } else {
12487                 /* For eeprom, set pagesize to maximum eeprom size */
12488                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12489
12490                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12491                 tw32(NVRAM_CFG1, nvcfg1);
12492         }
12493 }
12494
12495 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12496 {
12497         u32 nvcfg1, protect = 0;
12498
12499         nvcfg1 = tr32(NVRAM_CFG1);
12500
12501         /* NVRAM protection for TPM */
12502         if (nvcfg1 & (1 << 27)) {
12503                 tg3_flag_set(tp, PROTECTED_NVRAM);
12504                 protect = 1;
12505         }
12506
12507         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12508         switch (nvcfg1) {
12509         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12510         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12511         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12512         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12513                 tp->nvram_jedecnum = JEDEC_ATMEL;
12514                 tg3_flag_set(tp, NVRAM_BUFFERED);
12515                 tg3_flag_set(tp, FLASH);
12516                 tp->nvram_pagesize = 264;
12517                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12518                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12519                         tp->nvram_size = (protect ? 0x3e200 :
12520                                           TG3_NVRAM_SIZE_512KB);
12521                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12522                         tp->nvram_size = (protect ? 0x1f200 :
12523                                           TG3_NVRAM_SIZE_256KB);
12524                 else
12525                         tp->nvram_size = (protect ? 0x1f200 :
12526                                           TG3_NVRAM_SIZE_128KB);
12527                 break;
12528         case FLASH_5752VENDOR_ST_M45PE10:
12529         case FLASH_5752VENDOR_ST_M45PE20:
12530         case FLASH_5752VENDOR_ST_M45PE40:
12531                 tp->nvram_jedecnum = JEDEC_ST;
12532                 tg3_flag_set(tp, NVRAM_BUFFERED);
12533                 tg3_flag_set(tp, FLASH);
12534                 tp->nvram_pagesize = 256;
12535                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12536                         tp->nvram_size = (protect ?
12537                                           TG3_NVRAM_SIZE_64KB :
12538                                           TG3_NVRAM_SIZE_128KB);
12539                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12540                         tp->nvram_size = (protect ?
12541                                           TG3_NVRAM_SIZE_64KB :
12542                                           TG3_NVRAM_SIZE_256KB);
12543                 else
12544                         tp->nvram_size = (protect ?
12545                                           TG3_NVRAM_SIZE_128KB :
12546                                           TG3_NVRAM_SIZE_512KB);
12547                 break;
12548         }
12549 }
12550
12551 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12552 {
12553         u32 nvcfg1;
12554
12555         nvcfg1 = tr32(NVRAM_CFG1);
12556
12557         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12558         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12559         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12560         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12561         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12562                 tp->nvram_jedecnum = JEDEC_ATMEL;
12563                 tg3_flag_set(tp, NVRAM_BUFFERED);
12564                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12565
12566                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12567                 tw32(NVRAM_CFG1, nvcfg1);
12568                 break;
12569         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12570         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12571         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12572         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12573                 tp->nvram_jedecnum = JEDEC_ATMEL;
12574                 tg3_flag_set(tp, NVRAM_BUFFERED);
12575                 tg3_flag_set(tp, FLASH);
12576                 tp->nvram_pagesize = 264;
12577                 break;
12578         case FLASH_5752VENDOR_ST_M45PE10:
12579         case FLASH_5752VENDOR_ST_M45PE20:
12580         case FLASH_5752VENDOR_ST_M45PE40:
12581                 tp->nvram_jedecnum = JEDEC_ST;
12582                 tg3_flag_set(tp, NVRAM_BUFFERED);
12583                 tg3_flag_set(tp, FLASH);
12584                 tp->nvram_pagesize = 256;
12585                 break;
12586         }
12587 }
12588
12589 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12590 {
12591         u32 nvcfg1, protect = 0;
12592
12593         nvcfg1 = tr32(NVRAM_CFG1);
12594
12595         /* NVRAM protection for TPM */
12596         if (nvcfg1 & (1 << 27)) {
12597                 tg3_flag_set(tp, PROTECTED_NVRAM);
12598                 protect = 1;
12599         }
12600
12601         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12602         switch (nvcfg1) {
12603         case FLASH_5761VENDOR_ATMEL_ADB021D:
12604         case FLASH_5761VENDOR_ATMEL_ADB041D:
12605         case FLASH_5761VENDOR_ATMEL_ADB081D:
12606         case FLASH_5761VENDOR_ATMEL_ADB161D:
12607         case FLASH_5761VENDOR_ATMEL_MDB021D:
12608         case FLASH_5761VENDOR_ATMEL_MDB041D:
12609         case FLASH_5761VENDOR_ATMEL_MDB081D:
12610         case FLASH_5761VENDOR_ATMEL_MDB161D:
12611                 tp->nvram_jedecnum = JEDEC_ATMEL;
12612                 tg3_flag_set(tp, NVRAM_BUFFERED);
12613                 tg3_flag_set(tp, FLASH);
12614                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12615                 tp->nvram_pagesize = 256;
12616                 break;
12617         case FLASH_5761VENDOR_ST_A_M45PE20:
12618         case FLASH_5761VENDOR_ST_A_M45PE40:
12619         case FLASH_5761VENDOR_ST_A_M45PE80:
12620         case FLASH_5761VENDOR_ST_A_M45PE16:
12621         case FLASH_5761VENDOR_ST_M_M45PE20:
12622         case FLASH_5761VENDOR_ST_M_M45PE40:
12623         case FLASH_5761VENDOR_ST_M_M45PE80:
12624         case FLASH_5761VENDOR_ST_M_M45PE16:
12625                 tp->nvram_jedecnum = JEDEC_ST;
12626                 tg3_flag_set(tp, NVRAM_BUFFERED);
12627                 tg3_flag_set(tp, FLASH);
12628                 tp->nvram_pagesize = 256;
12629                 break;
12630         }
12631
12632         if (protect) {
12633                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12634         } else {
12635                 switch (nvcfg1) {
12636                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12637                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12638                 case FLASH_5761VENDOR_ST_A_M45PE16:
12639                 case FLASH_5761VENDOR_ST_M_M45PE16:
12640                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12641                         break;
12642                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12643                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12644                 case FLASH_5761VENDOR_ST_A_M45PE80:
12645                 case FLASH_5761VENDOR_ST_M_M45PE80:
12646                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12647                         break;
12648                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12649                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12650                 case FLASH_5761VENDOR_ST_A_M45PE40:
12651                 case FLASH_5761VENDOR_ST_M_M45PE40:
12652                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12653                         break;
12654                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12655                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12656                 case FLASH_5761VENDOR_ST_A_M45PE20:
12657                 case FLASH_5761VENDOR_ST_M_M45PE20:
12658                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12659                         break;
12660                 }
12661         }
12662 }
12663
12664 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12665 {
12666         tp->nvram_jedecnum = JEDEC_ATMEL;
12667         tg3_flag_set(tp, NVRAM_BUFFERED);
12668         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12669 }
12670
12671 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12672 {
12673         u32 nvcfg1;
12674
12675         nvcfg1 = tr32(NVRAM_CFG1);
12676
12677         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12678         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12679         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12680                 tp->nvram_jedecnum = JEDEC_ATMEL;
12681                 tg3_flag_set(tp, NVRAM_BUFFERED);
12682                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12683
12684                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12685                 tw32(NVRAM_CFG1, nvcfg1);
12686                 return;
12687         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12688         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12689         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12690         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12691         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12692         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12693         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12694                 tp->nvram_jedecnum = JEDEC_ATMEL;
12695                 tg3_flag_set(tp, NVRAM_BUFFERED);
12696                 tg3_flag_set(tp, FLASH);
12697
12698                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12699                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12700                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12701                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12702                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12703                         break;
12704                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12705                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12706                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12707                         break;
12708                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12709                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12710                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12711                         break;
12712                 }
12713                 break;
12714         case FLASH_5752VENDOR_ST_M45PE10:
12715         case FLASH_5752VENDOR_ST_M45PE20:
12716         case FLASH_5752VENDOR_ST_M45PE40:
12717                 tp->nvram_jedecnum = JEDEC_ST;
12718                 tg3_flag_set(tp, NVRAM_BUFFERED);
12719                 tg3_flag_set(tp, FLASH);
12720
12721                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12722                 case FLASH_5752VENDOR_ST_M45PE10:
12723                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12724                         break;
12725                 case FLASH_5752VENDOR_ST_M45PE20:
12726                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12727                         break;
12728                 case FLASH_5752VENDOR_ST_M45PE40:
12729                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12730                         break;
12731                 }
12732                 break;
12733         default:
12734                 tg3_flag_set(tp, NO_NVRAM);
12735                 return;
12736         }
12737
12738         tg3_nvram_get_pagesize(tp, nvcfg1);
12739         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12740                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12741 }
12742
12743
12744 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12745 {
12746         u32 nvcfg1;
12747
12748         nvcfg1 = tr32(NVRAM_CFG1);
12749
12750         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12751         case FLASH_5717VENDOR_ATMEL_EEPROM:
12752         case FLASH_5717VENDOR_MICRO_EEPROM:
12753                 tp->nvram_jedecnum = JEDEC_ATMEL;
12754                 tg3_flag_set(tp, NVRAM_BUFFERED);
12755                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12756
12757                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12758                 tw32(NVRAM_CFG1, nvcfg1);
12759                 return;
12760         case FLASH_5717VENDOR_ATMEL_MDB011D:
12761         case FLASH_5717VENDOR_ATMEL_ADB011B:
12762         case FLASH_5717VENDOR_ATMEL_ADB011D:
12763         case FLASH_5717VENDOR_ATMEL_MDB021D:
12764         case FLASH_5717VENDOR_ATMEL_ADB021B:
12765         case FLASH_5717VENDOR_ATMEL_ADB021D:
12766         case FLASH_5717VENDOR_ATMEL_45USPT:
12767                 tp->nvram_jedecnum = JEDEC_ATMEL;
12768                 tg3_flag_set(tp, NVRAM_BUFFERED);
12769                 tg3_flag_set(tp, FLASH);
12770
12771                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12772                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12773                         /* Detect size with tg3_nvram_get_size() */
12774                         break;
12775                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12776                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12777                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12778                         break;
12779                 default:
12780                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12781                         break;
12782                 }
12783                 break;
12784         case FLASH_5717VENDOR_ST_M_M25PE10:
12785         case FLASH_5717VENDOR_ST_A_M25PE10:
12786         case FLASH_5717VENDOR_ST_M_M45PE10:
12787         case FLASH_5717VENDOR_ST_A_M45PE10:
12788         case FLASH_5717VENDOR_ST_M_M25PE20:
12789         case FLASH_5717VENDOR_ST_A_M25PE20:
12790         case FLASH_5717VENDOR_ST_M_M45PE20:
12791         case FLASH_5717VENDOR_ST_A_M45PE20:
12792         case FLASH_5717VENDOR_ST_25USPT:
12793         case FLASH_5717VENDOR_ST_45USPT:
12794                 tp->nvram_jedecnum = JEDEC_ST;
12795                 tg3_flag_set(tp, NVRAM_BUFFERED);
12796                 tg3_flag_set(tp, FLASH);
12797
12798                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12799                 case FLASH_5717VENDOR_ST_M_M25PE20:
12800                 case FLASH_5717VENDOR_ST_M_M45PE20:
12801                         /* Detect size with tg3_nvram_get_size() */
12802                         break;
12803                 case FLASH_5717VENDOR_ST_A_M25PE20:
12804                 case FLASH_5717VENDOR_ST_A_M45PE20:
12805                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12806                         break;
12807                 default:
12808                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12809                         break;
12810                 }
12811                 break;
12812         default:
12813                 tg3_flag_set(tp, NO_NVRAM);
12814                 return;
12815         }
12816
12817         tg3_nvram_get_pagesize(tp, nvcfg1);
12818         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12819                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12820 }
12821
12822 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12823 {
12824         u32 nvcfg1, nvmpinstrp;
12825
12826         nvcfg1 = tr32(NVRAM_CFG1);
12827         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12828
12829         switch (nvmpinstrp) {
12830         case FLASH_5720_EEPROM_HD:
12831         case FLASH_5720_EEPROM_LD:
12832                 tp->nvram_jedecnum = JEDEC_ATMEL;
12833                 tg3_flag_set(tp, NVRAM_BUFFERED);
12834
12835                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12836                 tw32(NVRAM_CFG1, nvcfg1);
12837                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12838                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12839                 else
12840                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12841                 return;
12842         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12843         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12844         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12845         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12846         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12847         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12848         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12849         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12850         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12851         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12852         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12853         case FLASH_5720VENDOR_ATMEL_45USPT:
12854                 tp->nvram_jedecnum = JEDEC_ATMEL;
12855                 tg3_flag_set(tp, NVRAM_BUFFERED);
12856                 tg3_flag_set(tp, FLASH);
12857
12858                 switch (nvmpinstrp) {
12859                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12860                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12861                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12862                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12863                         break;
12864                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12865                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12866                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12867                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12868                         break;
12869                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12870                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12871                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12872                         break;
12873                 default:
12874                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12875                         break;
12876                 }
12877                 break;
12878         case FLASH_5720VENDOR_M_ST_M25PE10:
12879         case FLASH_5720VENDOR_M_ST_M45PE10:
12880         case FLASH_5720VENDOR_A_ST_M25PE10:
12881         case FLASH_5720VENDOR_A_ST_M45PE10:
12882         case FLASH_5720VENDOR_M_ST_M25PE20:
12883         case FLASH_5720VENDOR_M_ST_M45PE20:
12884         case FLASH_5720VENDOR_A_ST_M25PE20:
12885         case FLASH_5720VENDOR_A_ST_M45PE20:
12886         case FLASH_5720VENDOR_M_ST_M25PE40:
12887         case FLASH_5720VENDOR_M_ST_M45PE40:
12888         case FLASH_5720VENDOR_A_ST_M25PE40:
12889         case FLASH_5720VENDOR_A_ST_M45PE40:
12890         case FLASH_5720VENDOR_M_ST_M25PE80:
12891         case FLASH_5720VENDOR_M_ST_M45PE80:
12892         case FLASH_5720VENDOR_A_ST_M25PE80:
12893         case FLASH_5720VENDOR_A_ST_M45PE80:
12894         case FLASH_5720VENDOR_ST_25USPT:
12895         case FLASH_5720VENDOR_ST_45USPT:
12896                 tp->nvram_jedecnum = JEDEC_ST;
12897                 tg3_flag_set(tp, NVRAM_BUFFERED);
12898                 tg3_flag_set(tp, FLASH);
12899
12900                 switch (nvmpinstrp) {
12901                 case FLASH_5720VENDOR_M_ST_M25PE20:
12902                 case FLASH_5720VENDOR_M_ST_M45PE20:
12903                 case FLASH_5720VENDOR_A_ST_M25PE20:
12904                 case FLASH_5720VENDOR_A_ST_M45PE20:
12905                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12906                         break;
12907                 case FLASH_5720VENDOR_M_ST_M25PE40:
12908                 case FLASH_5720VENDOR_M_ST_M45PE40:
12909                 case FLASH_5720VENDOR_A_ST_M25PE40:
12910                 case FLASH_5720VENDOR_A_ST_M45PE40:
12911                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12912                         break;
12913                 case FLASH_5720VENDOR_M_ST_M25PE80:
12914                 case FLASH_5720VENDOR_M_ST_M45PE80:
12915                 case FLASH_5720VENDOR_A_ST_M25PE80:
12916                 case FLASH_5720VENDOR_A_ST_M45PE80:
12917                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12918                         break;
12919                 default:
12920                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12921                         break;
12922                 }
12923                 break;
12924         default:
12925                 tg3_flag_set(tp, NO_NVRAM);
12926                 return;
12927         }
12928
12929         tg3_nvram_get_pagesize(tp, nvcfg1);
12930         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12931                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12932 }
12933
12934 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12935 static void __devinit tg3_nvram_init(struct tg3 *tp)
12936 {
12937         tw32_f(GRC_EEPROM_ADDR,
12938              (EEPROM_ADDR_FSM_RESET |
12939               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12940                EEPROM_ADDR_CLKPERD_SHIFT)));
12941
12942         msleep(1);
12943
12944         /* Enable seeprom accesses. */
12945         tw32_f(GRC_LOCAL_CTRL,
12946              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12947         udelay(100);
12948
12949         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12950             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12951                 tg3_flag_set(tp, NVRAM);
12952
12953                 if (tg3_nvram_lock(tp)) {
12954                         netdev_warn(tp->dev,
12955                                     "Cannot get nvram lock, %s failed\n",
12956                                     __func__);
12957                         return;
12958                 }
12959                 tg3_enable_nvram_access(tp);
12960
12961                 tp->nvram_size = 0;
12962
12963                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12964                         tg3_get_5752_nvram_info(tp);
12965                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12966                         tg3_get_5755_nvram_info(tp);
12967                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12968                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12969                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12970                         tg3_get_5787_nvram_info(tp);
12971                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12972                         tg3_get_5761_nvram_info(tp);
12973                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12974                         tg3_get_5906_nvram_info(tp);
12975                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12976                          tg3_flag(tp, 57765_CLASS))
12977                         tg3_get_57780_nvram_info(tp);
12978                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12979                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12980                         tg3_get_5717_nvram_info(tp);
12981                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12982                         tg3_get_5720_nvram_info(tp);
12983                 else
12984                         tg3_get_nvram_info(tp);
12985
12986                 if (tp->nvram_size == 0)
12987                         tg3_get_nvram_size(tp);
12988
12989                 tg3_disable_nvram_access(tp);
12990                 tg3_nvram_unlock(tp);
12991
12992         } else {
12993                 tg3_flag_clear(tp, NVRAM);
12994                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12995
12996                 tg3_get_eeprom_size(tp);
12997         }
12998 }
12999
13000 struct subsys_tbl_ent {
13001         u16 subsys_vendor, subsys_devid;
13002         u32 phy_id;
13003 };
13004
13005 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13006         /* Broadcom boards. */
13007         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13008           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13009         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13010           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13011         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13012           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13013         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13014           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13015         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13016           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13017         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13018           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13019         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13020           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13021         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13022           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13023         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13024           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13025         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13026           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13027         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13028           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13029
13030         /* 3com boards. */
13031         { TG3PCI_SUBVENDOR_ID_3COM,
13032           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13033         { TG3PCI_SUBVENDOR_ID_3COM,
13034           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13035         { TG3PCI_SUBVENDOR_ID_3COM,
13036           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13037         { TG3PCI_SUBVENDOR_ID_3COM,
13038           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13039         { TG3PCI_SUBVENDOR_ID_3COM,
13040           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13041
13042         /* DELL boards. */
13043         { TG3PCI_SUBVENDOR_ID_DELL,
13044           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13045         { TG3PCI_SUBVENDOR_ID_DELL,
13046           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13047         { TG3PCI_SUBVENDOR_ID_DELL,
13048           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13049         { TG3PCI_SUBVENDOR_ID_DELL,
13050           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13051
13052         /* Compaq boards. */
13053         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13054           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13055         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13056           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13057         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13058           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13059         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13060           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13061         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13062           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13063
13064         /* IBM boards. */
13065         { TG3PCI_SUBVENDOR_ID_IBM,
13066           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13067 };
13068
13069 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13070 {
13071         int i;
13072
13073         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13074                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13075                      tp->pdev->subsystem_vendor) &&
13076                     (subsys_id_to_phy_id[i].subsys_devid ==
13077                      tp->pdev->subsystem_device))
13078                         return &subsys_id_to_phy_id[i];
13079         }
13080         return NULL;
13081 }
13082
13083 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13084 {
13085         u32 val;
13086
13087         tp->phy_id = TG3_PHY_ID_INVALID;
13088         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13089
13090         /* Assume an onboard device and WOL capable by default.  */
13091         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13092         tg3_flag_set(tp, WOL_CAP);
13093
13094         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13095                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13096                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13097                         tg3_flag_set(tp, IS_NIC);
13098                 }
13099                 val = tr32(VCPU_CFGSHDW);
13100                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13101                         tg3_flag_set(tp, ASPM_WORKAROUND);
13102                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13103                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13104                         tg3_flag_set(tp, WOL_ENABLE);
13105                         device_set_wakeup_enable(&tp->pdev->dev, true);
13106                 }
13107                 goto done;
13108         }
13109
13110         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13111         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13112                 u32 nic_cfg, led_cfg;
13113                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13114                 int eeprom_phy_serdes = 0;
13115
13116                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13117                 tp->nic_sram_data_cfg = nic_cfg;
13118
13119                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13120                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13121                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13122                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13123                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13124                     (ver > 0) && (ver < 0x100))
13125                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13126
13127                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13128                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13129
13130                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13131                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13132                         eeprom_phy_serdes = 1;
13133
13134                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13135                 if (nic_phy_id != 0) {
13136                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13137                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13138
13139                         eeprom_phy_id  = (id1 >> 16) << 10;
13140                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13141                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13142                 } else
13143                         eeprom_phy_id = 0;
13144
13145                 tp->phy_id = eeprom_phy_id;
13146                 if (eeprom_phy_serdes) {
13147                         if (!tg3_flag(tp, 5705_PLUS))
13148                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13149                         else
13150                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13151                 }
13152
13153                 if (tg3_flag(tp, 5750_PLUS))
13154                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13155                                     SHASTA_EXT_LED_MODE_MASK);
13156                 else
13157                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13158
13159                 switch (led_cfg) {
13160                 default:
13161                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13162                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13163                         break;
13164
13165                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13166                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13167                         break;
13168
13169                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13170                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13171
13172                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13173                          * read on some older 5700/5701 bootcode.
13174                          */
13175                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13176                             ASIC_REV_5700 ||
13177                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13178                             ASIC_REV_5701)
13179                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13180
13181                         break;
13182
13183                 case SHASTA_EXT_LED_SHARED:
13184                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13185                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13186                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13187                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13188                                                  LED_CTRL_MODE_PHY_2);
13189                         break;
13190
13191                 case SHASTA_EXT_LED_MAC:
13192                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13193                         break;
13194
13195                 case SHASTA_EXT_LED_COMBO:
13196                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13197                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13198                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13199                                                  LED_CTRL_MODE_PHY_2);
13200                         break;
13201
13202                 }
13203
13204                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13205                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13206                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13207                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13208
13209                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13210                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13211
13212                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13213                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13214                         if ((tp->pdev->subsystem_vendor ==
13215                              PCI_VENDOR_ID_ARIMA) &&
13216                             (tp->pdev->subsystem_device == 0x205a ||
13217                              tp->pdev->subsystem_device == 0x2063))
13218                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13219                 } else {
13220                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13221                         tg3_flag_set(tp, IS_NIC);
13222                 }
13223
13224                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13225                         tg3_flag_set(tp, ENABLE_ASF);
13226                         if (tg3_flag(tp, 5750_PLUS))
13227                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13228                 }
13229
13230                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13231                     tg3_flag(tp, 5750_PLUS))
13232                         tg3_flag_set(tp, ENABLE_APE);
13233
13234                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13235                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13236                         tg3_flag_clear(tp, WOL_CAP);
13237
13238                 if (tg3_flag(tp, WOL_CAP) &&
13239                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13240                         tg3_flag_set(tp, WOL_ENABLE);
13241                         device_set_wakeup_enable(&tp->pdev->dev, true);
13242                 }
13243
13244                 if (cfg2 & (1 << 17))
13245                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13246
13247                 /* serdes signal pre-emphasis in register 0x590 set by */
13248                 /* bootcode if bit 18 is set */
13249                 if (cfg2 & (1 << 18))
13250                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13251
13252                 if ((tg3_flag(tp, 57765_PLUS) ||
13253                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13254                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13255                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13256                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13257
13258                 if (tg3_flag(tp, PCI_EXPRESS) &&
13259                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13260                     !tg3_flag(tp, 57765_PLUS)) {
13261                         u32 cfg3;
13262
13263                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13264                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13265                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13266                 }
13267
13268                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13269                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13270                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13271                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13272                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13273                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13274         }
13275 done:
13276         if (tg3_flag(tp, WOL_CAP))
13277                 device_set_wakeup_enable(&tp->pdev->dev,
13278                                          tg3_flag(tp, WOL_ENABLE));
13279         else
13280                 device_set_wakeup_capable(&tp->pdev->dev, false);
13281 }
13282
13283 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13284 {
13285         int i;
13286         u32 val;
13287
13288         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13289         tw32(OTP_CTRL, cmd);
13290
13291         /* Wait for up to 1 ms for command to execute. */
13292         for (i = 0; i < 100; i++) {
13293                 val = tr32(OTP_STATUS);
13294                 if (val & OTP_STATUS_CMD_DONE)
13295                         break;
13296                 udelay(10);
13297         }
13298
13299         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13300 }
13301
13302 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13303  * configuration is a 32-bit value that straddles the alignment boundary.
13304  * We do two 32-bit reads and then shift and merge the results.
13305  */
13306 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13307 {
13308         u32 bhalf_otp, thalf_otp;
13309
13310         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13311
13312         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13313                 return 0;
13314
13315         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13316
13317         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13318                 return 0;
13319
13320         thalf_otp = tr32(OTP_READ_DATA);
13321
13322         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13323
13324         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13325                 return 0;
13326
13327         bhalf_otp = tr32(OTP_READ_DATA);
13328
13329         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13330 }
13331
13332 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13333 {
13334         u32 adv = ADVERTISED_Autoneg;
13335
13336         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13337                 adv |= ADVERTISED_1000baseT_Half |
13338                        ADVERTISED_1000baseT_Full;
13339
13340         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13341                 adv |= ADVERTISED_100baseT_Half |
13342                        ADVERTISED_100baseT_Full |
13343                        ADVERTISED_10baseT_Half |
13344                        ADVERTISED_10baseT_Full |
13345                        ADVERTISED_TP;
13346         else
13347                 adv |= ADVERTISED_FIBRE;
13348
13349         tp->link_config.advertising = adv;
13350         tp->link_config.speed = SPEED_UNKNOWN;
13351         tp->link_config.duplex = DUPLEX_UNKNOWN;
13352         tp->link_config.autoneg = AUTONEG_ENABLE;
13353         tp->link_config.active_speed = SPEED_UNKNOWN;
13354         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13355 }
13356
13357 static int __devinit tg3_phy_probe(struct tg3 *tp)
13358 {
13359         u32 hw_phy_id_1, hw_phy_id_2;
13360         u32 hw_phy_id, hw_phy_id_masked;
13361         int err;
13362
13363         /* flow control autonegotiation is default behavior */
13364         tg3_flag_set(tp, PAUSE_AUTONEG);
13365         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13366
13367         if (tg3_flag(tp, USE_PHYLIB))
13368                 return tg3_phy_init(tp);
13369
13370         /* Reading the PHY ID register can conflict with ASF
13371          * firmware access to the PHY hardware.
13372          */
13373         err = 0;
13374         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13375                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13376         } else {
13377                 /* Now read the physical PHY_ID from the chip and verify
13378                  * that it is sane.  If it doesn't look good, we fall back
13379                  * to either the hard-coded table based PHY_ID and failing
13380                  * that the value found in the eeprom area.
13381                  */
13382                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13383                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13384
13385                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13386                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13387                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13388
13389                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13390         }
13391
13392         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13393                 tp->phy_id = hw_phy_id;
13394                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13395                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13396                 else
13397                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13398         } else {
13399                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13400                         /* Do nothing, phy ID already set up in
13401                          * tg3_get_eeprom_hw_cfg().
13402                          */
13403                 } else {
13404                         struct subsys_tbl_ent *p;
13405
13406                         /* No eeprom signature?  Try the hardcoded
13407                          * subsys device table.
13408                          */
13409                         p = tg3_lookup_by_subsys(tp);
13410                         if (!p)
13411                                 return -ENODEV;
13412
13413                         tp->phy_id = p->phy_id;
13414                         if (!tp->phy_id ||
13415                             tp->phy_id == TG3_PHY_ID_BCM8002)
13416                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13417                 }
13418         }
13419
13420         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13421             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13422              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13423              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13424               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13425              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13426               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13427                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13428
13429         tg3_phy_init_link_config(tp);
13430
13431         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13432             !tg3_flag(tp, ENABLE_APE) &&
13433             !tg3_flag(tp, ENABLE_ASF)) {
13434                 u32 bmsr, dummy;
13435
13436                 tg3_readphy(tp, MII_BMSR, &bmsr);
13437                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13438                     (bmsr & BMSR_LSTATUS))
13439                         goto skip_phy_reset;
13440
13441                 err = tg3_phy_reset(tp);
13442                 if (err)
13443                         return err;
13444
13445                 tg3_phy_set_wirespeed(tp);
13446
13447                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13448                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13449                                             tp->link_config.flowctrl);
13450
13451                         tg3_writephy(tp, MII_BMCR,
13452                                      BMCR_ANENABLE | BMCR_ANRESTART);
13453                 }
13454         }
13455
13456 skip_phy_reset:
13457         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13458                 err = tg3_init_5401phy_dsp(tp);
13459                 if (err)
13460                         return err;
13461
13462                 err = tg3_init_5401phy_dsp(tp);
13463         }
13464
13465         return err;
13466 }
13467
13468 static void __devinit tg3_read_vpd(struct tg3 *tp)
13469 {
13470         u8 *vpd_data;
13471         unsigned int block_end, rosize, len;
13472         u32 vpdlen;
13473         int j, i = 0;
13474
13475         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13476         if (!vpd_data)
13477                 goto out_no_vpd;
13478
13479         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13480         if (i < 0)
13481                 goto out_not_found;
13482
13483         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13484         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13485         i += PCI_VPD_LRDT_TAG_SIZE;
13486
13487         if (block_end > vpdlen)
13488                 goto out_not_found;
13489
13490         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13491                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13492         if (j > 0) {
13493                 len = pci_vpd_info_field_size(&vpd_data[j]);
13494
13495                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13496                 if (j + len > block_end || len != 4 ||
13497                     memcmp(&vpd_data[j], "1028", 4))
13498                         goto partno;
13499
13500                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13501                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13502                 if (j < 0)
13503                         goto partno;
13504
13505                 len = pci_vpd_info_field_size(&vpd_data[j]);
13506
13507                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13508                 if (j + len > block_end)
13509                         goto partno;
13510
13511                 memcpy(tp->fw_ver, &vpd_data[j], len);
13512                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13513         }
13514
13515 partno:
13516         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13517                                       PCI_VPD_RO_KEYWORD_PARTNO);
13518         if (i < 0)
13519                 goto out_not_found;
13520
13521         len = pci_vpd_info_field_size(&vpd_data[i]);
13522
13523         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13524         if (len > TG3_BPN_SIZE ||
13525             (len + i) > vpdlen)
13526                 goto out_not_found;
13527
13528         memcpy(tp->board_part_number, &vpd_data[i], len);
13529
13530 out_not_found:
13531         kfree(vpd_data);
13532         if (tp->board_part_number[0])
13533                 return;
13534
13535 out_no_vpd:
13536         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13537                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13538                         strcpy(tp->board_part_number, "BCM5717");
13539                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13540                         strcpy(tp->board_part_number, "BCM5718");
13541                 else
13542                         goto nomatch;
13543         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13544                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13545                         strcpy(tp->board_part_number, "BCM57780");
13546                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13547                         strcpy(tp->board_part_number, "BCM57760");
13548                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13549                         strcpy(tp->board_part_number, "BCM57790");
13550                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13551                         strcpy(tp->board_part_number, "BCM57788");
13552                 else
13553                         goto nomatch;
13554         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13555                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13556                         strcpy(tp->board_part_number, "BCM57761");
13557                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13558                         strcpy(tp->board_part_number, "BCM57765");
13559                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13560                         strcpy(tp->board_part_number, "BCM57781");
13561                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13562                         strcpy(tp->board_part_number, "BCM57785");
13563                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13564                         strcpy(tp->board_part_number, "BCM57791");
13565                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13566                         strcpy(tp->board_part_number, "BCM57795");
13567                 else
13568                         goto nomatch;
13569         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13570                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13571                         strcpy(tp->board_part_number, "BCM57762");
13572                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13573                         strcpy(tp->board_part_number, "BCM57766");
13574                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13575                         strcpy(tp->board_part_number, "BCM57782");
13576                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13577                         strcpy(tp->board_part_number, "BCM57786");
13578                 else
13579                         goto nomatch;
13580         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13581                 strcpy(tp->board_part_number, "BCM95906");
13582         } else {
13583 nomatch:
13584                 strcpy(tp->board_part_number, "none");
13585         }
13586 }
13587
13588 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13589 {
13590         u32 val;
13591
13592         if (tg3_nvram_read(tp, offset, &val) ||
13593             (val & 0xfc000000) != 0x0c000000 ||
13594             tg3_nvram_read(tp, offset + 4, &val) ||
13595             val != 0)
13596                 return 0;
13597
13598         return 1;
13599 }
13600
13601 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13602 {
13603         u32 val, offset, start, ver_offset;
13604         int i, dst_off;
13605         bool newver = false;
13606
13607         if (tg3_nvram_read(tp, 0xc, &offset) ||
13608             tg3_nvram_read(tp, 0x4, &start))
13609                 return;
13610
13611         offset = tg3_nvram_logical_addr(tp, offset);
13612
13613         if (tg3_nvram_read(tp, offset, &val))
13614                 return;
13615
13616         if ((val & 0xfc000000) == 0x0c000000) {
13617                 if (tg3_nvram_read(tp, offset + 4, &val))
13618                         return;
13619
13620                 if (val == 0)
13621                         newver = true;
13622         }
13623
13624         dst_off = strlen(tp->fw_ver);
13625
13626         if (newver) {
13627                 if (TG3_VER_SIZE - dst_off < 16 ||
13628                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13629                         return;
13630
13631                 offset = offset + ver_offset - start;
13632                 for (i = 0; i < 16; i += 4) {
13633                         __be32 v;
13634                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13635                                 return;
13636
13637                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13638                 }
13639         } else {
13640                 u32 major, minor;
13641
13642                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13643                         return;
13644
13645                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13646                         TG3_NVM_BCVER_MAJSFT;
13647                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13648                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13649                          "v%d.%02d", major, minor);
13650         }
13651 }
13652
13653 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13654 {
13655         u32 val, major, minor;
13656
13657         /* Use native endian representation */
13658         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13659                 return;
13660
13661         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13662                 TG3_NVM_HWSB_CFG1_MAJSFT;
13663         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13664                 TG3_NVM_HWSB_CFG1_MINSFT;
13665
13666         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13667 }
13668
13669 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13670 {
13671         u32 offset, major, minor, build;
13672
13673         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13674
13675         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13676                 return;
13677
13678         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13679         case TG3_EEPROM_SB_REVISION_0:
13680                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13681                 break;
13682         case TG3_EEPROM_SB_REVISION_2:
13683                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13684                 break;
13685         case TG3_EEPROM_SB_REVISION_3:
13686                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13687                 break;
13688         case TG3_EEPROM_SB_REVISION_4:
13689                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13690                 break;
13691         case TG3_EEPROM_SB_REVISION_5:
13692                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13693                 break;
13694         case TG3_EEPROM_SB_REVISION_6:
13695                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13696                 break;
13697         default:
13698                 return;
13699         }
13700
13701         if (tg3_nvram_read(tp, offset, &val))
13702                 return;
13703
13704         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13705                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13706         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13707                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13708         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13709
13710         if (minor > 99 || build > 26)
13711                 return;
13712
13713         offset = strlen(tp->fw_ver);
13714         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13715                  " v%d.%02d", major, minor);
13716
13717         if (build > 0) {
13718                 offset = strlen(tp->fw_ver);
13719                 if (offset < TG3_VER_SIZE - 1)
13720                         tp->fw_ver[offset] = 'a' + build - 1;
13721         }
13722 }
13723
13724 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13725 {
13726         u32 val, offset, start;
13727         int i, vlen;
13728
13729         for (offset = TG3_NVM_DIR_START;
13730              offset < TG3_NVM_DIR_END;
13731              offset += TG3_NVM_DIRENT_SIZE) {
13732                 if (tg3_nvram_read(tp, offset, &val))
13733                         return;
13734
13735                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13736                         break;
13737         }
13738
13739         if (offset == TG3_NVM_DIR_END)
13740                 return;
13741
13742         if (!tg3_flag(tp, 5705_PLUS))
13743                 start = 0x08000000;
13744         else if (tg3_nvram_read(tp, offset - 4, &start))
13745                 return;
13746
13747         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13748             !tg3_fw_img_is_valid(tp, offset) ||
13749             tg3_nvram_read(tp, offset + 8, &val))
13750                 return;
13751
13752         offset += val - start;
13753
13754         vlen = strlen(tp->fw_ver);
13755
13756         tp->fw_ver[vlen++] = ',';
13757         tp->fw_ver[vlen++] = ' ';
13758
13759         for (i = 0; i < 4; i++) {
13760                 __be32 v;
13761                 if (tg3_nvram_read_be32(tp, offset, &v))
13762                         return;
13763
13764                 offset += sizeof(v);
13765
13766                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13767                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13768                         break;
13769                 }
13770
13771                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13772                 vlen += sizeof(v);
13773         }
13774 }
13775
13776 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13777 {
13778         int vlen;
13779         u32 apedata;
13780         char *fwtype;
13781
13782         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13783                 return;
13784
13785         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13786         if (apedata != APE_SEG_SIG_MAGIC)
13787                 return;
13788
13789         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13790         if (!(apedata & APE_FW_STATUS_READY))
13791                 return;
13792
13793         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13794
13795         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13796                 tg3_flag_set(tp, APE_HAS_NCSI);
13797                 fwtype = "NCSI";
13798         } else {
13799                 fwtype = "DASH";
13800         }
13801
13802         vlen = strlen(tp->fw_ver);
13803
13804         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13805                  fwtype,
13806                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13807                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13808                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13809                  (apedata & APE_FW_VERSION_BLDMSK));
13810 }
13811
13812 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13813 {
13814         u32 val;
13815         bool vpd_vers = false;
13816
13817         if (tp->fw_ver[0] != 0)
13818                 vpd_vers = true;
13819
13820         if (tg3_flag(tp, NO_NVRAM)) {
13821                 strcat(tp->fw_ver, "sb");
13822                 return;
13823         }
13824
13825         if (tg3_nvram_read(tp, 0, &val))
13826                 return;
13827
13828         if (val == TG3_EEPROM_MAGIC)
13829                 tg3_read_bc_ver(tp);
13830         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13831                 tg3_read_sb_ver(tp, val);
13832         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13833                 tg3_read_hwsb_ver(tp);
13834         else
13835                 return;
13836
13837         if (vpd_vers)
13838                 goto done;
13839
13840         if (tg3_flag(tp, ENABLE_APE)) {
13841                 if (tg3_flag(tp, ENABLE_ASF))
13842                         tg3_read_dash_ver(tp);
13843         } else if (tg3_flag(tp, ENABLE_ASF)) {
13844                 tg3_read_mgmtfw_ver(tp);
13845         }
13846
13847 done:
13848         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13849 }
13850
13851 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13852 {
13853         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13854                 return TG3_RX_RET_MAX_SIZE_5717;
13855         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13856                 return TG3_RX_RET_MAX_SIZE_5700;
13857         else
13858                 return TG3_RX_RET_MAX_SIZE_5705;
13859 }
13860
13861 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13862         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13863         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13864         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13865         { },
13866 };
13867
13868 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13869 {
13870         struct pci_dev *peer;
13871         unsigned int func, devnr = tp->pdev->devfn & ~7;
13872
13873         for (func = 0; func < 8; func++) {
13874                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13875                 if (peer && peer != tp->pdev)
13876                         break;
13877                 pci_dev_put(peer);
13878         }
13879         /* 5704 can be configured in single-port mode, set peer to
13880          * tp->pdev in that case.
13881          */
13882         if (!peer) {
13883                 peer = tp->pdev;
13884                 return peer;
13885         }
13886
13887         /*
13888          * We don't need to keep the refcount elevated; there's no way
13889          * to remove one half of this device without removing the other
13890          */
13891         pci_dev_put(peer);
13892
13893         return peer;
13894 }
13895
13896 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13897 {
13898         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13899         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13900                 u32 reg;
13901
13902                 /* All devices that use the alternate
13903                  * ASIC REV location have a CPMU.
13904                  */
13905                 tg3_flag_set(tp, CPMU_PRESENT);
13906
13907                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13908                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13909                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13910                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13911                         reg = TG3PCI_GEN2_PRODID_ASICREV;
13912                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13913                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13914                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13915                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13916                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13917                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13918                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13919                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13920                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13921                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13922                         reg = TG3PCI_GEN15_PRODID_ASICREV;
13923                 else
13924                         reg = TG3PCI_PRODID_ASICREV;
13925
13926                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
13927         }
13928
13929         /* Wrong chip ID in 5752 A0. This code can be removed later
13930          * as A0 is not in production.
13931          */
13932         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13933                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13934
13935         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13936             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13937             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13938                 tg3_flag_set(tp, 5717_PLUS);
13939
13940         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13941             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13942                 tg3_flag_set(tp, 57765_CLASS);
13943
13944         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
13945                 tg3_flag_set(tp, 57765_PLUS);
13946
13947         /* Intentionally exclude ASIC_REV_5906 */
13948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13950             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13951             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13952             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13953             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13954             tg3_flag(tp, 57765_PLUS))
13955                 tg3_flag_set(tp, 5755_PLUS);
13956
13957         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13958             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13959                 tg3_flag_set(tp, 5780_CLASS);
13960
13961         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13962             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13963             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13964             tg3_flag(tp, 5755_PLUS) ||
13965             tg3_flag(tp, 5780_CLASS))
13966                 tg3_flag_set(tp, 5750_PLUS);
13967
13968         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13969             tg3_flag(tp, 5750_PLUS))
13970                 tg3_flag_set(tp, 5705_PLUS);
13971 }
13972
13973 static int __devinit tg3_get_invariants(struct tg3 *tp)
13974 {
13975         u32 misc_ctrl_reg;
13976         u32 pci_state_reg, grc_misc_cfg;
13977         u32 val;
13978         u16 pci_cmd;
13979         int err;
13980
13981         /* Force memory write invalidate off.  If we leave it on,
13982          * then on 5700_BX chips we have to enable a workaround.
13983          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13984          * to match the cacheline size.  The Broadcom driver have this
13985          * workaround but turns MWI off all the times so never uses
13986          * it.  This seems to suggest that the workaround is insufficient.
13987          */
13988         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13989         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13990         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13991
13992         /* Important! -- Make sure register accesses are byteswapped
13993          * correctly.  Also, for those chips that require it, make
13994          * sure that indirect register accesses are enabled before
13995          * the first operation.
13996          */
13997         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13998                               &misc_ctrl_reg);
13999         tp->misc_host_ctrl |= (misc_ctrl_reg &
14000                                MISC_HOST_CTRL_CHIPREV);
14001         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14002                                tp->misc_host_ctrl);
14003
14004         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14005
14006         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14007          * we need to disable memory and use config. cycles
14008          * only to access all registers. The 5702/03 chips
14009          * can mistakenly decode the special cycles from the
14010          * ICH chipsets as memory write cycles, causing corruption
14011          * of register and memory space. Only certain ICH bridges
14012          * will drive special cycles with non-zero data during the
14013          * address phase which can fall within the 5703's address
14014          * range. This is not an ICH bug as the PCI spec allows
14015          * non-zero address during special cycles. However, only
14016          * these ICH bridges are known to drive non-zero addresses
14017          * during special cycles.
14018          *
14019          * Since special cycles do not cross PCI bridges, we only
14020          * enable this workaround if the 5703 is on the secondary
14021          * bus of these ICH bridges.
14022          */
14023         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14024             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14025                 static struct tg3_dev_id {
14026                         u32     vendor;
14027                         u32     device;
14028                         u32     rev;
14029                 } ich_chipsets[] = {
14030                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14031                           PCI_ANY_ID },
14032                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14033                           PCI_ANY_ID },
14034                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14035                           0xa },
14036                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14037                           PCI_ANY_ID },
14038                         { },
14039                 };
14040                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14041                 struct pci_dev *bridge = NULL;
14042
14043                 while (pci_id->vendor != 0) {
14044                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14045                                                 bridge);
14046                         if (!bridge) {
14047                                 pci_id++;
14048                                 continue;
14049                         }
14050                         if (pci_id->rev != PCI_ANY_ID) {
14051                                 if (bridge->revision > pci_id->rev)
14052                                         continue;
14053                         }
14054                         if (bridge->subordinate &&
14055                             (bridge->subordinate->number ==
14056                              tp->pdev->bus->number)) {
14057                                 tg3_flag_set(tp, ICH_WORKAROUND);
14058                                 pci_dev_put(bridge);
14059                                 break;
14060                         }
14061                 }
14062         }
14063
14064         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14065                 static struct tg3_dev_id {
14066                         u32     vendor;
14067                         u32     device;
14068                 } bridge_chipsets[] = {
14069                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14070                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14071                         { },
14072                 };
14073                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14074                 struct pci_dev *bridge = NULL;
14075
14076                 while (pci_id->vendor != 0) {
14077                         bridge = pci_get_device(pci_id->vendor,
14078                                                 pci_id->device,
14079                                                 bridge);
14080                         if (!bridge) {
14081                                 pci_id++;
14082                                 continue;
14083                         }
14084                         if (bridge->subordinate &&
14085                             (bridge->subordinate->number <=
14086                              tp->pdev->bus->number) &&
14087                             (bridge->subordinate->subordinate >=
14088                              tp->pdev->bus->number)) {
14089                                 tg3_flag_set(tp, 5701_DMA_BUG);
14090                                 pci_dev_put(bridge);
14091                                 break;
14092                         }
14093                 }
14094         }
14095
14096         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14097          * DMA addresses > 40-bit. This bridge may have other additional
14098          * 57xx devices behind it in some 4-port NIC designs for example.
14099          * Any tg3 device found behind the bridge will also need the 40-bit
14100          * DMA workaround.
14101          */
14102         if (tg3_flag(tp, 5780_CLASS)) {
14103                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14104                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14105         } else {
14106                 struct pci_dev *bridge = NULL;
14107
14108                 do {
14109                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14110                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14111                                                 bridge);
14112                         if (bridge && bridge->subordinate &&
14113                             (bridge->subordinate->number <=
14114                              tp->pdev->bus->number) &&
14115                             (bridge->subordinate->subordinate >=
14116                              tp->pdev->bus->number)) {
14117                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14118                                 pci_dev_put(bridge);
14119                                 break;
14120                         }
14121                 } while (bridge);
14122         }
14123
14124         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14125             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14126                 tp->pdev_peer = tg3_find_peer(tp);
14127
14128         /* Determine TSO capabilities */
14129         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14130                 ; /* Do nothing. HW bug. */
14131         else if (tg3_flag(tp, 57765_PLUS))
14132                 tg3_flag_set(tp, HW_TSO_3);
14133         else if (tg3_flag(tp, 5755_PLUS) ||
14134                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14135                 tg3_flag_set(tp, HW_TSO_2);
14136         else if (tg3_flag(tp, 5750_PLUS)) {
14137                 tg3_flag_set(tp, HW_TSO_1);
14138                 tg3_flag_set(tp, TSO_BUG);
14139                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14140                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14141                         tg3_flag_clear(tp, TSO_BUG);
14142         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14143                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14144                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14145                         tg3_flag_set(tp, TSO_BUG);
14146                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14147                         tp->fw_needed = FIRMWARE_TG3TSO5;
14148                 else
14149                         tp->fw_needed = FIRMWARE_TG3TSO;
14150         }
14151
14152         /* Selectively allow TSO based on operating conditions */
14153         if (tg3_flag(tp, HW_TSO_1) ||
14154             tg3_flag(tp, HW_TSO_2) ||
14155             tg3_flag(tp, HW_TSO_3) ||
14156             tp->fw_needed) {
14157                 /* For firmware TSO, assume ASF is disabled.
14158                  * We'll disable TSO later if we discover ASF
14159                  * is enabled in tg3_get_eeprom_hw_cfg().
14160                  */
14161                 tg3_flag_set(tp, TSO_CAPABLE);
14162         } else {
14163                 tg3_flag_clear(tp, TSO_CAPABLE);
14164                 tg3_flag_clear(tp, TSO_BUG);
14165                 tp->fw_needed = NULL;
14166         }
14167
14168         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14169                 tp->fw_needed = FIRMWARE_TG3;
14170
14171         tp->irq_max = 1;
14172
14173         if (tg3_flag(tp, 5750_PLUS)) {
14174                 tg3_flag_set(tp, SUPPORT_MSI);
14175                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14176                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14177                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14178                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14179                      tp->pdev_peer == tp->pdev))
14180                         tg3_flag_clear(tp, SUPPORT_MSI);
14181
14182                 if (tg3_flag(tp, 5755_PLUS) ||
14183                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14184                         tg3_flag_set(tp, 1SHOT_MSI);
14185                 }
14186
14187                 if (tg3_flag(tp, 57765_PLUS)) {
14188                         tg3_flag_set(tp, SUPPORT_MSIX);
14189                         tp->irq_max = TG3_IRQ_MAX_VECS;
14190                         tg3_rss_init_dflt_indir_tbl(tp);
14191                 }
14192         }
14193
14194         if (tg3_flag(tp, 5755_PLUS))
14195                 tg3_flag_set(tp, SHORT_DMA_BUG);
14196
14197         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14198                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14199
14200         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14201             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14202             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14203                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14204
14205         if (tg3_flag(tp, 57765_PLUS) &&
14206             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14207                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14208
14209         if (!tg3_flag(tp, 5705_PLUS) ||
14210             tg3_flag(tp, 5780_CLASS) ||
14211             tg3_flag(tp, USE_JUMBO_BDFLAG))
14212                 tg3_flag_set(tp, JUMBO_CAPABLE);
14213
14214         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14215                               &pci_state_reg);
14216
14217         if (pci_is_pcie(tp->pdev)) {
14218                 u16 lnkctl;
14219
14220                 tg3_flag_set(tp, PCI_EXPRESS);
14221
14222                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14223                         int readrq = pcie_get_readrq(tp->pdev);
14224                         if (readrq > 2048)
14225                                 pcie_set_readrq(tp->pdev, 2048);
14226                 }
14227
14228                 pci_read_config_word(tp->pdev,
14229                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14230                                      &lnkctl);
14231                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14232                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14233                             ASIC_REV_5906) {
14234                                 tg3_flag_clear(tp, HW_TSO_2);
14235                                 tg3_flag_clear(tp, TSO_CAPABLE);
14236                         }
14237                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14238                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14239                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14240                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14241                                 tg3_flag_set(tp, CLKREQ_BUG);
14242                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14243                         tg3_flag_set(tp, L1PLLPD_EN);
14244                 }
14245         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14246                 /* BCM5785 devices are effectively PCIe devices, and should
14247                  * follow PCIe codepaths, but do not have a PCIe capabilities
14248                  * section.
14249                  */
14250                 tg3_flag_set(tp, PCI_EXPRESS);
14251         } else if (!tg3_flag(tp, 5705_PLUS) ||
14252                    tg3_flag(tp, 5780_CLASS)) {
14253                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14254                 if (!tp->pcix_cap) {
14255                         dev_err(&tp->pdev->dev,
14256                                 "Cannot find PCI-X capability, aborting\n");
14257                         return -EIO;
14258                 }
14259
14260                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14261                         tg3_flag_set(tp, PCIX_MODE);
14262         }
14263
14264         /* If we have an AMD 762 or VIA K8T800 chipset, write
14265          * reordering to the mailbox registers done by the host
14266          * controller can cause major troubles.  We read back from
14267          * every mailbox register write to force the writes to be
14268          * posted to the chip in order.
14269          */
14270         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14271             !tg3_flag(tp, PCI_EXPRESS))
14272                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14273
14274         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14275                              &tp->pci_cacheline_sz);
14276         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14277                              &tp->pci_lat_timer);
14278         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14279             tp->pci_lat_timer < 64) {
14280                 tp->pci_lat_timer = 64;
14281                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14282                                       tp->pci_lat_timer);
14283         }
14284
14285         /* Important! -- It is critical that the PCI-X hw workaround
14286          * situation is decided before the first MMIO register access.
14287          */
14288         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14289                 /* 5700 BX chips need to have their TX producer index
14290                  * mailboxes written twice to workaround a bug.
14291                  */
14292                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14293
14294                 /* If we are in PCI-X mode, enable register write workaround.
14295                  *
14296                  * The workaround is to use indirect register accesses
14297                  * for all chip writes not to mailbox registers.
14298                  */
14299                 if (tg3_flag(tp, PCIX_MODE)) {
14300                         u32 pm_reg;
14301
14302                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14303
14304                         /* The chip can have it's power management PCI config
14305                          * space registers clobbered due to this bug.
14306                          * So explicitly force the chip into D0 here.
14307                          */
14308                         pci_read_config_dword(tp->pdev,
14309                                               tp->pm_cap + PCI_PM_CTRL,
14310                                               &pm_reg);
14311                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14312                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14313                         pci_write_config_dword(tp->pdev,
14314                                                tp->pm_cap + PCI_PM_CTRL,
14315                                                pm_reg);
14316
14317                         /* Also, force SERR#/PERR# in PCI command. */
14318                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14319                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14320                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14321                 }
14322         }
14323
14324         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14325                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14326         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14327                 tg3_flag_set(tp, PCI_32BIT);
14328
14329         /* Chip-specific fixup from Broadcom driver */
14330         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14331             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14332                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14333                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14334         }
14335
14336         /* Default fast path register access methods */
14337         tp->read32 = tg3_read32;
14338         tp->write32 = tg3_write32;
14339         tp->read32_mbox = tg3_read32;
14340         tp->write32_mbox = tg3_write32;
14341         tp->write32_tx_mbox = tg3_write32;
14342         tp->write32_rx_mbox = tg3_write32;
14343
14344         /* Various workaround register access methods */
14345         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14346                 tp->write32 = tg3_write_indirect_reg32;
14347         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14348                  (tg3_flag(tp, PCI_EXPRESS) &&
14349                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14350                 /*
14351                  * Back to back register writes can cause problems on these
14352                  * chips, the workaround is to read back all reg writes
14353                  * except those to mailbox regs.
14354                  *
14355                  * See tg3_write_indirect_reg32().
14356                  */
14357                 tp->write32 = tg3_write_flush_reg32;
14358         }
14359
14360         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14361                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14362                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14363                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14364         }
14365
14366         if (tg3_flag(tp, ICH_WORKAROUND)) {
14367                 tp->read32 = tg3_read_indirect_reg32;
14368                 tp->write32 = tg3_write_indirect_reg32;
14369                 tp->read32_mbox = tg3_read_indirect_mbox;
14370                 tp->write32_mbox = tg3_write_indirect_mbox;
14371                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14372                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14373
14374                 iounmap(tp->regs);
14375                 tp->regs = NULL;
14376
14377                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14378                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14379                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14380         }
14381         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14382                 tp->read32_mbox = tg3_read32_mbox_5906;
14383                 tp->write32_mbox = tg3_write32_mbox_5906;
14384                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14385                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14386         }
14387
14388         if (tp->write32 == tg3_write_indirect_reg32 ||
14389             (tg3_flag(tp, PCIX_MODE) &&
14390              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14391               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14392                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14393
14394         /* The memory arbiter has to be enabled in order for SRAM accesses
14395          * to succeed.  Normally on powerup the tg3 chip firmware will make
14396          * sure it is enabled, but other entities such as system netboot
14397          * code might disable it.
14398          */
14399         val = tr32(MEMARB_MODE);
14400         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14401
14402         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14403         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14404             tg3_flag(tp, 5780_CLASS)) {
14405                 if (tg3_flag(tp, PCIX_MODE)) {
14406                         pci_read_config_dword(tp->pdev,
14407                                               tp->pcix_cap + PCI_X_STATUS,
14408                                               &val);
14409                         tp->pci_fn = val & 0x7;
14410                 }
14411         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14412                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14413                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14414                     NIC_SRAM_CPMUSTAT_SIG) {
14415                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14416                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14417                 }
14418         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14419                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14420                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14421                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14422                     NIC_SRAM_CPMUSTAT_SIG) {
14423                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14424                                      TG3_CPMU_STATUS_FSHFT_5719;
14425                 }
14426         }
14427
14428         /* Get eeprom hw config before calling tg3_set_power_state().
14429          * In particular, the TG3_FLAG_IS_NIC flag must be
14430          * determined before calling tg3_set_power_state() so that
14431          * we know whether or not to switch out of Vaux power.
14432          * When the flag is set, it means that GPIO1 is used for eeprom
14433          * write protect and also implies that it is a LOM where GPIOs
14434          * are not used to switch power.
14435          */
14436         tg3_get_eeprom_hw_cfg(tp);
14437
14438         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14439                 tg3_flag_clear(tp, TSO_CAPABLE);
14440                 tg3_flag_clear(tp, TSO_BUG);
14441                 tp->fw_needed = NULL;
14442         }
14443
14444         if (tg3_flag(tp, ENABLE_APE)) {
14445                 /* Allow reads and writes to the
14446                  * APE register and memory space.
14447                  */
14448                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14449                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14450                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14451                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14452                                        pci_state_reg);
14453
14454                 tg3_ape_lock_init(tp);
14455         }
14456
14457         /* Set up tp->grc_local_ctrl before calling
14458          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14459          * will bring 5700's external PHY out of reset.
14460          * It is also used as eeprom write protect on LOMs.
14461          */
14462         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14463         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14464             tg3_flag(tp, EEPROM_WRITE_PROT))
14465                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14466                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14467         /* Unused GPIO3 must be driven as output on 5752 because there
14468          * are no pull-up resistors on unused GPIO pins.
14469          */
14470         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14471                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14472
14473         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14474             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14475             tg3_flag(tp, 57765_CLASS))
14476                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14477
14478         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14479             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14480                 /* Turn off the debug UART. */
14481                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14482                 if (tg3_flag(tp, IS_NIC))
14483                         /* Keep VMain power. */
14484                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14485                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14486         }
14487
14488         /* Switch out of Vaux if it is a NIC */
14489         tg3_pwrsrc_switch_to_vmain(tp);
14490
14491         /* Derive initial jumbo mode from MTU assigned in
14492          * ether_setup() via the alloc_etherdev() call
14493          */
14494         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14495                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14496
14497         /* Determine WakeOnLan speed to use. */
14498         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14499             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14500             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14501             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14502                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14503         } else {
14504                 tg3_flag_set(tp, WOL_SPEED_100MB);
14505         }
14506
14507         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14508                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14509
14510         /* A few boards don't want Ethernet@WireSpeed phy feature */
14511         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14512             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14513              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14514              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14515             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14516             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14517                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14518
14519         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14520             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14521                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14522         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14523                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14524
14525         if (tg3_flag(tp, 5705_PLUS) &&
14526             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14527             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14528             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14529             !tg3_flag(tp, 57765_PLUS)) {
14530                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14531                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14532                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14533                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14534                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14535                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14536                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14537                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14538                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14539                 } else
14540                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14541         }
14542
14543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14544             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14545                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14546                 if (tp->phy_otp == 0)
14547                         tp->phy_otp = TG3_OTP_DEFAULT;
14548         }
14549
14550         if (tg3_flag(tp, CPMU_PRESENT))
14551                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14552         else
14553                 tp->mi_mode = MAC_MI_MODE_BASE;
14554
14555         tp->coalesce_mode = 0;
14556         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14557             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14558                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14559
14560         /* Set these bits to enable statistics workaround. */
14561         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14562             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14563             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14564                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14565                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14566         }
14567
14568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14569             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14570                 tg3_flag_set(tp, USE_PHYLIB);
14571
14572         err = tg3_mdio_init(tp);
14573         if (err)
14574                 return err;
14575
14576         /* Initialize data/descriptor byte/word swapping. */
14577         val = tr32(GRC_MODE);
14578         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14579                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14580                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14581                         GRC_MODE_B2HRX_ENABLE |
14582                         GRC_MODE_HTX2B_ENABLE |
14583                         GRC_MODE_HOST_STACKUP);
14584         else
14585                 val &= GRC_MODE_HOST_STACKUP;
14586
14587         tw32(GRC_MODE, val | tp->grc_mode);
14588
14589         tg3_switch_clocks(tp);
14590
14591         /* Clear this out for sanity. */
14592         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14593
14594         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14595                               &pci_state_reg);
14596         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14597             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14598                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14599
14600                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14601                     chiprevid == CHIPREV_ID_5701_B0 ||
14602                     chiprevid == CHIPREV_ID_5701_B2 ||
14603                     chiprevid == CHIPREV_ID_5701_B5) {
14604                         void __iomem *sram_base;
14605
14606                         /* Write some dummy words into the SRAM status block
14607                          * area, see if it reads back correctly.  If the return
14608                          * value is bad, force enable the PCIX workaround.
14609                          */
14610                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14611
14612                         writel(0x00000000, sram_base);
14613                         writel(0x00000000, sram_base + 4);
14614                         writel(0xffffffff, sram_base + 4);
14615                         if (readl(sram_base) != 0x00000000)
14616                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14617                 }
14618         }
14619
14620         udelay(50);
14621         tg3_nvram_init(tp);
14622
14623         grc_misc_cfg = tr32(GRC_MISC_CFG);
14624         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14625
14626         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14627             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14628              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14629                 tg3_flag_set(tp, IS_5788);
14630
14631         if (!tg3_flag(tp, IS_5788) &&
14632             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14633                 tg3_flag_set(tp, TAGGED_STATUS);
14634         if (tg3_flag(tp, TAGGED_STATUS)) {
14635                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14636                                       HOSTCC_MODE_CLRTICK_TXBD);
14637
14638                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14639                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14640                                        tp->misc_host_ctrl);
14641         }
14642
14643         /* Preserve the APE MAC_MODE bits */
14644         if (tg3_flag(tp, ENABLE_APE))
14645                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14646         else
14647                 tp->mac_mode = 0;
14648
14649         /* these are limited to 10/100 only */
14650         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14651              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14652             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14653              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14654              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14655               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14656               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14657             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14658              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14659               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14660               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14661             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14662             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14663             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14664             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14665                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14666
14667         err = tg3_phy_probe(tp);
14668         if (err) {
14669                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14670                 /* ... but do not return immediately ... */
14671                 tg3_mdio_fini(tp);
14672         }
14673
14674         tg3_read_vpd(tp);
14675         tg3_read_fw_ver(tp);
14676
14677         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14678                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14679         } else {
14680                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14681                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14682                 else
14683                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14684         }
14685
14686         /* 5700 {AX,BX} chips have a broken status block link
14687          * change bit implementation, so we must use the
14688          * status register in those cases.
14689          */
14690         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14691                 tg3_flag_set(tp, USE_LINKCHG_REG);
14692         else
14693                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14694
14695         /* The led_ctrl is set during tg3_phy_probe, here we might
14696          * have to force the link status polling mechanism based
14697          * upon subsystem IDs.
14698          */
14699         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14700             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14701             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14702                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14703                 tg3_flag_set(tp, USE_LINKCHG_REG);
14704         }
14705
14706         /* For all SERDES we poll the MAC status register. */
14707         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14708                 tg3_flag_set(tp, POLL_SERDES);
14709         else
14710                 tg3_flag_clear(tp, POLL_SERDES);
14711
14712         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14713         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14714         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14715             tg3_flag(tp, PCIX_MODE)) {
14716                 tp->rx_offset = NET_SKB_PAD;
14717 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14718                 tp->rx_copy_thresh = ~(u16)0;
14719 #endif
14720         }
14721
14722         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14723         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14724         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14725
14726         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14727
14728         /* Increment the rx prod index on the rx std ring by at most
14729          * 8 for these chips to workaround hw errata.
14730          */
14731         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14732             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14733             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14734                 tp->rx_std_max_post = 8;
14735
14736         if (tg3_flag(tp, ASPM_WORKAROUND))
14737                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14738                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14739
14740         return err;
14741 }
14742
14743 #ifdef CONFIG_SPARC
14744 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14745 {
14746         struct net_device *dev = tp->dev;
14747         struct pci_dev *pdev = tp->pdev;
14748         struct device_node *dp = pci_device_to_OF_node(pdev);
14749         const unsigned char *addr;
14750         int len;
14751
14752         addr = of_get_property(dp, "local-mac-address", &len);
14753         if (addr && len == 6) {
14754                 memcpy(dev->dev_addr, addr, 6);
14755                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14756                 return 0;
14757         }
14758         return -ENODEV;
14759 }
14760
14761 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14762 {
14763         struct net_device *dev = tp->dev;
14764
14765         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14766         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14767         return 0;
14768 }
14769 #endif
14770
14771 static int __devinit tg3_get_device_address(struct tg3 *tp)
14772 {
14773         struct net_device *dev = tp->dev;
14774         u32 hi, lo, mac_offset;
14775         int addr_ok = 0;
14776
14777 #ifdef CONFIG_SPARC
14778         if (!tg3_get_macaddr_sparc(tp))
14779                 return 0;
14780 #endif
14781
14782         mac_offset = 0x7c;
14783         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14784             tg3_flag(tp, 5780_CLASS)) {
14785                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14786                         mac_offset = 0xcc;
14787                 if (tg3_nvram_lock(tp))
14788                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14789                 else
14790                         tg3_nvram_unlock(tp);
14791         } else if (tg3_flag(tp, 5717_PLUS)) {
14792                 if (tp->pci_fn & 1)
14793                         mac_offset = 0xcc;
14794                 if (tp->pci_fn > 1)
14795                         mac_offset += 0x18c;
14796         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14797                 mac_offset = 0x10;
14798
14799         /* First try to get it from MAC address mailbox. */
14800         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14801         if ((hi >> 16) == 0x484b) {
14802                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14803                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14804
14805                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14806                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14807                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14808                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14809                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14810
14811                 /* Some old bootcode may report a 0 MAC address in SRAM */
14812                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14813         }
14814         if (!addr_ok) {
14815                 /* Next, try NVRAM. */
14816                 if (!tg3_flag(tp, NO_NVRAM) &&
14817                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14818                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14819                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14820                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14821                 }
14822                 /* Finally just fetch it out of the MAC control regs. */
14823                 else {
14824                         hi = tr32(MAC_ADDR_0_HIGH);
14825                         lo = tr32(MAC_ADDR_0_LOW);
14826
14827                         dev->dev_addr[5] = lo & 0xff;
14828                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14829                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14830                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14831                         dev->dev_addr[1] = hi & 0xff;
14832                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14833                 }
14834         }
14835
14836         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14837 #ifdef CONFIG_SPARC
14838                 if (!tg3_get_default_macaddr_sparc(tp))
14839                         return 0;
14840 #endif
14841                 return -EINVAL;
14842         }
14843         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14844         return 0;
14845 }
14846
14847 #define BOUNDARY_SINGLE_CACHELINE       1
14848 #define BOUNDARY_MULTI_CACHELINE        2
14849
14850 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14851 {
14852         int cacheline_size;
14853         u8 byte;
14854         int goal;
14855
14856         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14857         if (byte == 0)
14858                 cacheline_size = 1024;
14859         else
14860                 cacheline_size = (int) byte * 4;
14861
14862         /* On 5703 and later chips, the boundary bits have no
14863          * effect.
14864          */
14865         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14866             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14867             !tg3_flag(tp, PCI_EXPRESS))
14868                 goto out;
14869
14870 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14871         goal = BOUNDARY_MULTI_CACHELINE;
14872 #else
14873 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14874         goal = BOUNDARY_SINGLE_CACHELINE;
14875 #else
14876         goal = 0;
14877 #endif
14878 #endif
14879
14880         if (tg3_flag(tp, 57765_PLUS)) {
14881                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14882                 goto out;
14883         }
14884
14885         if (!goal)
14886                 goto out;
14887
14888         /* PCI controllers on most RISC systems tend to disconnect
14889          * when a device tries to burst across a cache-line boundary.
14890          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14891          *
14892          * Unfortunately, for PCI-E there are only limited
14893          * write-side controls for this, and thus for reads
14894          * we will still get the disconnects.  We'll also waste
14895          * these PCI cycles for both read and write for chips
14896          * other than 5700 and 5701 which do not implement the
14897          * boundary bits.
14898          */
14899         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14900                 switch (cacheline_size) {
14901                 case 16:
14902                 case 32:
14903                 case 64:
14904                 case 128:
14905                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14906                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14907                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14908                         } else {
14909                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14910                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14911                         }
14912                         break;
14913
14914                 case 256:
14915                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14916                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14917                         break;
14918
14919                 default:
14920                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14921                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14922                         break;
14923                 }
14924         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14925                 switch (cacheline_size) {
14926                 case 16:
14927                 case 32:
14928                 case 64:
14929                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14930                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14931                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14932                                 break;
14933                         }
14934                         /* fallthrough */
14935                 case 128:
14936                 default:
14937                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14938                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14939                         break;
14940                 }
14941         } else {
14942                 switch (cacheline_size) {
14943                 case 16:
14944                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14945                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14946                                         DMA_RWCTRL_WRITE_BNDRY_16);
14947                                 break;
14948                         }
14949                         /* fallthrough */
14950                 case 32:
14951                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14952                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14953                                         DMA_RWCTRL_WRITE_BNDRY_32);
14954                                 break;
14955                         }
14956                         /* fallthrough */
14957                 case 64:
14958                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14959                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14960                                         DMA_RWCTRL_WRITE_BNDRY_64);
14961                                 break;
14962                         }
14963                         /* fallthrough */
14964                 case 128:
14965                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14966                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14967                                         DMA_RWCTRL_WRITE_BNDRY_128);
14968                                 break;
14969                         }
14970                         /* fallthrough */
14971                 case 256:
14972                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14973                                 DMA_RWCTRL_WRITE_BNDRY_256);
14974                         break;
14975                 case 512:
14976                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14977                                 DMA_RWCTRL_WRITE_BNDRY_512);
14978                         break;
14979                 case 1024:
14980                 default:
14981                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14982                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14983                         break;
14984                 }
14985         }
14986
14987 out:
14988         return val;
14989 }
14990
14991 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14992 {
14993         struct tg3_internal_buffer_desc test_desc;
14994         u32 sram_dma_descs;
14995         int i, ret;
14996
14997         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14998
14999         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15000         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15001         tw32(RDMAC_STATUS, 0);
15002         tw32(WDMAC_STATUS, 0);
15003
15004         tw32(BUFMGR_MODE, 0);
15005         tw32(FTQ_RESET, 0);
15006
15007         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15008         test_desc.addr_lo = buf_dma & 0xffffffff;
15009         test_desc.nic_mbuf = 0x00002100;
15010         test_desc.len = size;
15011
15012         /*
15013          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15014          * the *second* time the tg3 driver was getting loaded after an
15015          * initial scan.
15016          *
15017          * Broadcom tells me:
15018          *   ...the DMA engine is connected to the GRC block and a DMA
15019          *   reset may affect the GRC block in some unpredictable way...
15020          *   The behavior of resets to individual blocks has not been tested.
15021          *
15022          * Broadcom noted the GRC reset will also reset all sub-components.
15023          */
15024         if (to_device) {
15025                 test_desc.cqid_sqid = (13 << 8) | 2;
15026
15027                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15028                 udelay(40);
15029         } else {
15030                 test_desc.cqid_sqid = (16 << 8) | 7;
15031
15032                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15033                 udelay(40);
15034         }
15035         test_desc.flags = 0x00000005;
15036
15037         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15038                 u32 val;
15039
15040                 val = *(((u32 *)&test_desc) + i);
15041                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15042                                        sram_dma_descs + (i * sizeof(u32)));
15043                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15044         }
15045         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15046
15047         if (to_device)
15048                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15049         else
15050                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15051
15052         ret = -ENODEV;
15053         for (i = 0; i < 40; i++) {
15054                 u32 val;
15055
15056                 if (to_device)
15057                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15058                 else
15059                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15060                 if ((val & 0xffff) == sram_dma_descs) {
15061                         ret = 0;
15062                         break;
15063                 }
15064
15065                 udelay(100);
15066         }
15067
15068         return ret;
15069 }
15070
15071 #define TEST_BUFFER_SIZE        0x2000
15072
15073 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15074         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15075         { },
15076 };
15077
15078 static int __devinit tg3_test_dma(struct tg3 *tp)
15079 {
15080         dma_addr_t buf_dma;
15081         u32 *buf, saved_dma_rwctrl;
15082         int ret = 0;
15083
15084         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15085                                  &buf_dma, GFP_KERNEL);
15086         if (!buf) {
15087                 ret = -ENOMEM;
15088                 goto out_nofree;
15089         }
15090
15091         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15092                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15093
15094         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15095
15096         if (tg3_flag(tp, 57765_PLUS))
15097                 goto out;
15098
15099         if (tg3_flag(tp, PCI_EXPRESS)) {
15100                 /* DMA read watermark not used on PCIE */
15101                 tp->dma_rwctrl |= 0x00180000;
15102         } else if (!tg3_flag(tp, PCIX_MODE)) {
15103                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15104                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15105                         tp->dma_rwctrl |= 0x003f0000;
15106                 else
15107                         tp->dma_rwctrl |= 0x003f000f;
15108         } else {
15109                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15110                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15111                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15112                         u32 read_water = 0x7;
15113
15114                         /* If the 5704 is behind the EPB bridge, we can
15115                          * do the less restrictive ONE_DMA workaround for
15116                          * better performance.
15117                          */
15118                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15119                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15120                                 tp->dma_rwctrl |= 0x8000;
15121                         else if (ccval == 0x6 || ccval == 0x7)
15122                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15123
15124                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15125                                 read_water = 4;
15126                         /* Set bit 23 to enable PCIX hw bug fix */
15127                         tp->dma_rwctrl |=
15128                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15129                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15130                                 (1 << 23);
15131                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15132                         /* 5780 always in PCIX mode */
15133                         tp->dma_rwctrl |= 0x00144000;
15134                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15135                         /* 5714 always in PCIX mode */
15136                         tp->dma_rwctrl |= 0x00148000;
15137                 } else {
15138                         tp->dma_rwctrl |= 0x001b000f;
15139                 }
15140         }
15141
15142         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15143             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15144                 tp->dma_rwctrl &= 0xfffffff0;
15145
15146         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15147             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15148                 /* Remove this if it causes problems for some boards. */
15149                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15150
15151                 /* On 5700/5701 chips, we need to set this bit.
15152                  * Otherwise the chip will issue cacheline transactions
15153                  * to streamable DMA memory with not all the byte
15154                  * enables turned on.  This is an error on several
15155                  * RISC PCI controllers, in particular sparc64.
15156                  *
15157                  * On 5703/5704 chips, this bit has been reassigned
15158                  * a different meaning.  In particular, it is used
15159                  * on those chips to enable a PCI-X workaround.
15160                  */
15161                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15162         }
15163
15164         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15165
15166 #if 0
15167         /* Unneeded, already done by tg3_get_invariants.  */
15168         tg3_switch_clocks(tp);
15169 #endif
15170
15171         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15172             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15173                 goto out;
15174
15175         /* It is best to perform DMA test with maximum write burst size
15176          * to expose the 5700/5701 write DMA bug.
15177          */
15178         saved_dma_rwctrl = tp->dma_rwctrl;
15179         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15180         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15181
15182         while (1) {
15183                 u32 *p = buf, i;
15184
15185                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15186                         p[i] = i;
15187
15188                 /* Send the buffer to the chip. */
15189                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15190                 if (ret) {
15191                         dev_err(&tp->pdev->dev,
15192                                 "%s: Buffer write failed. err = %d\n",
15193                                 __func__, ret);
15194                         break;
15195                 }
15196
15197 #if 0
15198                 /* validate data reached card RAM correctly. */
15199                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15200                         u32 val;
15201                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15202                         if (le32_to_cpu(val) != p[i]) {
15203                                 dev_err(&tp->pdev->dev,
15204                                         "%s: Buffer corrupted on device! "
15205                                         "(%d != %d)\n", __func__, val, i);
15206                                 /* ret = -ENODEV here? */
15207                         }
15208                         p[i] = 0;
15209                 }
15210 #endif
15211                 /* Now read it back. */
15212                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15213                 if (ret) {
15214                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15215                                 "err = %d\n", __func__, ret);
15216                         break;
15217                 }
15218
15219                 /* Verify it. */
15220                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15221                         if (p[i] == i)
15222                                 continue;
15223
15224                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15225                             DMA_RWCTRL_WRITE_BNDRY_16) {
15226                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15227                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15228                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15229                                 break;
15230                         } else {
15231                                 dev_err(&tp->pdev->dev,
15232                                         "%s: Buffer corrupted on read back! "
15233                                         "(%d != %d)\n", __func__, p[i], i);
15234                                 ret = -ENODEV;
15235                                 goto out;
15236                         }
15237                 }
15238
15239                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15240                         /* Success. */
15241                         ret = 0;
15242                         break;
15243                 }
15244         }
15245         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15246             DMA_RWCTRL_WRITE_BNDRY_16) {
15247                 /* DMA test passed without adjusting DMA boundary,
15248                  * now look for chipsets that are known to expose the
15249                  * DMA bug without failing the test.
15250                  */
15251                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15252                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15253                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15254                 } else {
15255                         /* Safe to use the calculated DMA boundary. */
15256                         tp->dma_rwctrl = saved_dma_rwctrl;
15257                 }
15258
15259                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15260         }
15261
15262 out:
15263         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15264 out_nofree:
15265         return ret;
15266 }
15267
15268 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15269 {
15270         if (tg3_flag(tp, 57765_PLUS)) {
15271                 tp->bufmgr_config.mbuf_read_dma_low_water =
15272                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15273                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15274                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15275                 tp->bufmgr_config.mbuf_high_water =
15276                         DEFAULT_MB_HIGH_WATER_57765;
15277
15278                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15279                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15280                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15281                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15282                 tp->bufmgr_config.mbuf_high_water_jumbo =
15283                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15284         } else if (tg3_flag(tp, 5705_PLUS)) {
15285                 tp->bufmgr_config.mbuf_read_dma_low_water =
15286                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15287                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15288                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15289                 tp->bufmgr_config.mbuf_high_water =
15290                         DEFAULT_MB_HIGH_WATER_5705;
15291                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15292                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15293                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15294                         tp->bufmgr_config.mbuf_high_water =
15295                                 DEFAULT_MB_HIGH_WATER_5906;
15296                 }
15297
15298                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15299                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15300                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15301                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15302                 tp->bufmgr_config.mbuf_high_water_jumbo =
15303                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15304         } else {
15305                 tp->bufmgr_config.mbuf_read_dma_low_water =
15306                         DEFAULT_MB_RDMA_LOW_WATER;
15307                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15308                         DEFAULT_MB_MACRX_LOW_WATER;
15309                 tp->bufmgr_config.mbuf_high_water =
15310                         DEFAULT_MB_HIGH_WATER;
15311
15312                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15313                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15314                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15315                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15316                 tp->bufmgr_config.mbuf_high_water_jumbo =
15317                         DEFAULT_MB_HIGH_WATER_JUMBO;
15318         }
15319
15320         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15321         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15322 }
15323
15324 static char * __devinit tg3_phy_string(struct tg3 *tp)
15325 {
15326         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15327         case TG3_PHY_ID_BCM5400:        return "5400";
15328         case TG3_PHY_ID_BCM5401:        return "5401";
15329         case TG3_PHY_ID_BCM5411:        return "5411";
15330         case TG3_PHY_ID_BCM5701:        return "5701";
15331         case TG3_PHY_ID_BCM5703:        return "5703";
15332         case TG3_PHY_ID_BCM5704:        return "5704";
15333         case TG3_PHY_ID_BCM5705:        return "5705";
15334         case TG3_PHY_ID_BCM5750:        return "5750";
15335         case TG3_PHY_ID_BCM5752:        return "5752";
15336         case TG3_PHY_ID_BCM5714:        return "5714";
15337         case TG3_PHY_ID_BCM5780:        return "5780";
15338         case TG3_PHY_ID_BCM5755:        return "5755";
15339         case TG3_PHY_ID_BCM5787:        return "5787";
15340         case TG3_PHY_ID_BCM5784:        return "5784";
15341         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15342         case TG3_PHY_ID_BCM5906:        return "5906";
15343         case TG3_PHY_ID_BCM5761:        return "5761";
15344         case TG3_PHY_ID_BCM5718C:       return "5718C";
15345         case TG3_PHY_ID_BCM5718S:       return "5718S";
15346         case TG3_PHY_ID_BCM57765:       return "57765";
15347         case TG3_PHY_ID_BCM5719C:       return "5719C";
15348         case TG3_PHY_ID_BCM5720C:       return "5720C";
15349         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15350         case 0:                 return "serdes";
15351         default:                return "unknown";
15352         }
15353 }
15354
15355 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15356 {
15357         if (tg3_flag(tp, PCI_EXPRESS)) {
15358                 strcpy(str, "PCI Express");
15359                 return str;
15360         } else if (tg3_flag(tp, PCIX_MODE)) {
15361                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15362
15363                 strcpy(str, "PCIX:");
15364
15365                 if ((clock_ctrl == 7) ||
15366                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15367                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15368                         strcat(str, "133MHz");
15369                 else if (clock_ctrl == 0)
15370                         strcat(str, "33MHz");
15371                 else if (clock_ctrl == 2)
15372                         strcat(str, "50MHz");
15373                 else if (clock_ctrl == 4)
15374                         strcat(str, "66MHz");
15375                 else if (clock_ctrl == 6)
15376                         strcat(str, "100MHz");
15377         } else {
15378                 strcpy(str, "PCI:");
15379                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15380                         strcat(str, "66MHz");
15381                 else
15382                         strcat(str, "33MHz");
15383         }
15384         if (tg3_flag(tp, PCI_32BIT))
15385                 strcat(str, ":32-bit");
15386         else
15387                 strcat(str, ":64-bit");
15388         return str;
15389 }
15390
15391 static void __devinit tg3_init_coal(struct tg3 *tp)
15392 {
15393         struct ethtool_coalesce *ec = &tp->coal;
15394
15395         memset(ec, 0, sizeof(*ec));
15396         ec->cmd = ETHTOOL_GCOALESCE;
15397         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15398         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15399         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15400         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15401         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15402         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15403         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15404         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15405         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15406
15407         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15408                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15409                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15410                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15411                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15412                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15413         }
15414
15415         if (tg3_flag(tp, 5705_PLUS)) {
15416                 ec->rx_coalesce_usecs_irq = 0;
15417                 ec->tx_coalesce_usecs_irq = 0;
15418                 ec->stats_block_coalesce_usecs = 0;
15419         }
15420 }
15421
15422 static int __devinit tg3_init_one(struct pci_dev *pdev,
15423                                   const struct pci_device_id *ent)
15424 {
15425         struct net_device *dev;
15426         struct tg3 *tp;
15427         int i, err, pm_cap;
15428         u32 sndmbx, rcvmbx, intmbx;
15429         char str[40];
15430         u64 dma_mask, persist_dma_mask;
15431         netdev_features_t features = 0;
15432
15433         printk_once(KERN_INFO "%s\n", version);
15434
15435         err = pci_enable_device(pdev);
15436         if (err) {
15437                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15438                 return err;
15439         }
15440
15441         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15442         if (err) {
15443                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15444                 goto err_out_disable_pdev;
15445         }
15446
15447         pci_set_master(pdev);
15448
15449         /* Find power-management capability. */
15450         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15451         if (pm_cap == 0) {
15452                 dev_err(&pdev->dev,
15453                         "Cannot find Power Management capability, aborting\n");
15454                 err = -EIO;
15455                 goto err_out_free_res;
15456         }
15457
15458         err = pci_set_power_state(pdev, PCI_D0);
15459         if (err) {
15460                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15461                 goto err_out_free_res;
15462         }
15463
15464         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15465         if (!dev) {
15466                 err = -ENOMEM;
15467                 goto err_out_power_down;
15468         }
15469
15470         SET_NETDEV_DEV(dev, &pdev->dev);
15471
15472         tp = netdev_priv(dev);
15473         tp->pdev = pdev;
15474         tp->dev = dev;
15475         tp->pm_cap = pm_cap;
15476         tp->rx_mode = TG3_DEF_RX_MODE;
15477         tp->tx_mode = TG3_DEF_TX_MODE;
15478
15479         if (tg3_debug > 0)
15480                 tp->msg_enable = tg3_debug;
15481         else
15482                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15483
15484         /* The word/byte swap controls here control register access byte
15485          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15486          * setting below.
15487          */
15488         tp->misc_host_ctrl =
15489                 MISC_HOST_CTRL_MASK_PCI_INT |
15490                 MISC_HOST_CTRL_WORD_SWAP |
15491                 MISC_HOST_CTRL_INDIR_ACCESS |
15492                 MISC_HOST_CTRL_PCISTATE_RW;
15493
15494         /* The NONFRM (non-frame) byte/word swap controls take effect
15495          * on descriptor entries, anything which isn't packet data.
15496          *
15497          * The StrongARM chips on the board (one for tx, one for rx)
15498          * are running in big-endian mode.
15499          */
15500         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15501                         GRC_MODE_WSWAP_NONFRM_DATA);
15502 #ifdef __BIG_ENDIAN
15503         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15504 #endif
15505         spin_lock_init(&tp->lock);
15506         spin_lock_init(&tp->indirect_lock);
15507         INIT_WORK(&tp->reset_task, tg3_reset_task);
15508
15509         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15510         if (!tp->regs) {
15511                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15512                 err = -ENOMEM;
15513                 goto err_out_free_dev;
15514         }
15515
15516         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15517             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15518             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15519             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15520             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15521             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15522             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15523             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15524                 tg3_flag_set(tp, ENABLE_APE);
15525                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15526                 if (!tp->aperegs) {
15527                         dev_err(&pdev->dev,
15528                                 "Cannot map APE registers, aborting\n");
15529                         err = -ENOMEM;
15530                         goto err_out_iounmap;
15531                 }
15532         }
15533
15534         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15535         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15536
15537         dev->ethtool_ops = &tg3_ethtool_ops;
15538         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15539         dev->netdev_ops = &tg3_netdev_ops;
15540         dev->irq = pdev->irq;
15541
15542         err = tg3_get_invariants(tp);
15543         if (err) {
15544                 dev_err(&pdev->dev,
15545                         "Problem fetching invariants of chip, aborting\n");
15546                 goto err_out_apeunmap;
15547         }
15548
15549         /* The EPB bridge inside 5714, 5715, and 5780 and any
15550          * device behind the EPB cannot support DMA addresses > 40-bit.
15551          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15552          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15553          * do DMA address check in tg3_start_xmit().
15554          */
15555         if (tg3_flag(tp, IS_5788))
15556                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15557         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15558                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15559 #ifdef CONFIG_HIGHMEM
15560                 dma_mask = DMA_BIT_MASK(64);
15561 #endif
15562         } else
15563                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15564
15565         /* Configure DMA attributes. */
15566         if (dma_mask > DMA_BIT_MASK(32)) {
15567                 err = pci_set_dma_mask(pdev, dma_mask);
15568                 if (!err) {
15569                         features |= NETIF_F_HIGHDMA;
15570                         err = pci_set_consistent_dma_mask(pdev,
15571                                                           persist_dma_mask);
15572                         if (err < 0) {
15573                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15574                                         "DMA for consistent allocations\n");
15575                                 goto err_out_apeunmap;
15576                         }
15577                 }
15578         }
15579         if (err || dma_mask == DMA_BIT_MASK(32)) {
15580                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15581                 if (err) {
15582                         dev_err(&pdev->dev,
15583                                 "No usable DMA configuration, aborting\n");
15584                         goto err_out_apeunmap;
15585                 }
15586         }
15587
15588         tg3_init_bufmgr_config(tp);
15589
15590         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15591
15592         /* 5700 B0 chips do not support checksumming correctly due
15593          * to hardware bugs.
15594          */
15595         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15596                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15597
15598                 if (tg3_flag(tp, 5755_PLUS))
15599                         features |= NETIF_F_IPV6_CSUM;
15600         }
15601
15602         /* TSO is on by default on chips that support hardware TSO.
15603          * Firmware TSO on older chips gives lower performance, so it
15604          * is off by default, but can be enabled using ethtool.
15605          */
15606         if ((tg3_flag(tp, HW_TSO_1) ||
15607              tg3_flag(tp, HW_TSO_2) ||
15608              tg3_flag(tp, HW_TSO_3)) &&
15609             (features & NETIF_F_IP_CSUM))
15610                 features |= NETIF_F_TSO;
15611         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15612                 if (features & NETIF_F_IPV6_CSUM)
15613                         features |= NETIF_F_TSO6;
15614                 if (tg3_flag(tp, HW_TSO_3) ||
15615                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15616                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15617                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15618                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15619                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15620                         features |= NETIF_F_TSO_ECN;
15621         }
15622
15623         dev->features |= features;
15624         dev->vlan_features |= features;
15625
15626         /*
15627          * Add loopback capability only for a subset of devices that support
15628          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15629          * loopback for the remaining devices.
15630          */
15631         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15632             !tg3_flag(tp, CPMU_PRESENT))
15633                 /* Add the loopback capability */
15634                 features |= NETIF_F_LOOPBACK;
15635
15636         dev->hw_features |= features;
15637
15638         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15639             !tg3_flag(tp, TSO_CAPABLE) &&
15640             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15641                 tg3_flag_set(tp, MAX_RXPEND_64);
15642                 tp->rx_pending = 63;
15643         }
15644
15645         err = tg3_get_device_address(tp);
15646         if (err) {
15647                 dev_err(&pdev->dev,
15648                         "Could not obtain valid ethernet address, aborting\n");
15649                 goto err_out_apeunmap;
15650         }
15651
15652         /*
15653          * Reset chip in case UNDI or EFI driver did not shutdown
15654          * DMA self test will enable WDMAC and we'll see (spurious)
15655          * pending DMA on the PCI bus at that point.
15656          */
15657         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15658             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15659                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15660                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15661         }
15662
15663         err = tg3_test_dma(tp);
15664         if (err) {
15665                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15666                 goto err_out_apeunmap;
15667         }
15668
15669         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15670         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15671         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15672         for (i = 0; i < tp->irq_max; i++) {
15673                 struct tg3_napi *tnapi = &tp->napi[i];
15674
15675                 tnapi->tp = tp;
15676                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15677
15678                 tnapi->int_mbox = intmbx;
15679                 if (i <= 4)
15680                         intmbx += 0x8;
15681                 else
15682                         intmbx += 0x4;
15683
15684                 tnapi->consmbox = rcvmbx;
15685                 tnapi->prodmbox = sndmbx;
15686
15687                 if (i)
15688                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15689                 else
15690                         tnapi->coal_now = HOSTCC_MODE_NOW;
15691
15692                 if (!tg3_flag(tp, SUPPORT_MSIX))
15693                         break;
15694
15695                 /*
15696                  * If we support MSIX, we'll be using RSS.  If we're using
15697                  * RSS, the first vector only handles link interrupts and the
15698                  * remaining vectors handle rx and tx interrupts.  Reuse the
15699                  * mailbox values for the next iteration.  The values we setup
15700                  * above are still useful for the single vectored mode.
15701                  */
15702                 if (!i)
15703                         continue;
15704
15705                 rcvmbx += 0x8;
15706
15707                 if (sndmbx & 0x4)
15708                         sndmbx -= 0x4;
15709                 else
15710                         sndmbx += 0xc;
15711         }
15712
15713         tg3_init_coal(tp);
15714
15715         pci_set_drvdata(pdev, dev);
15716
15717         if (tg3_flag(tp, 5717_PLUS)) {
15718                 /* Resume a low-power mode */
15719                 tg3_frob_aux_power(tp, false);
15720         }
15721
15722         err = register_netdev(dev);
15723         if (err) {
15724                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15725                 goto err_out_apeunmap;
15726         }
15727
15728         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15729                     tp->board_part_number,
15730                     tp->pci_chip_rev_id,
15731                     tg3_bus_string(tp, str),
15732                     dev->dev_addr);
15733
15734         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15735                 struct phy_device *phydev;
15736                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15737                 netdev_info(dev,
15738                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15739                             phydev->drv->name, dev_name(&phydev->dev));
15740         } else {
15741                 char *ethtype;
15742
15743                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15744                         ethtype = "10/100Base-TX";
15745                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15746                         ethtype = "1000Base-SX";
15747                 else
15748                         ethtype = "10/100/1000Base-T";
15749
15750                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15751                             "(WireSpeed[%d], EEE[%d])\n",
15752                             tg3_phy_string(tp), ethtype,
15753                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15754                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15755         }
15756
15757         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15758                     (dev->features & NETIF_F_RXCSUM) != 0,
15759                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15760                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15761                     tg3_flag(tp, ENABLE_ASF) != 0,
15762                     tg3_flag(tp, TSO_CAPABLE) != 0);
15763         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15764                     tp->dma_rwctrl,
15765                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15766                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15767
15768         pci_save_state(pdev);
15769
15770         return 0;
15771
15772 err_out_apeunmap:
15773         if (tp->aperegs) {
15774                 iounmap(tp->aperegs);
15775                 tp->aperegs = NULL;
15776         }
15777
15778 err_out_iounmap:
15779         if (tp->regs) {
15780                 iounmap(tp->regs);
15781                 tp->regs = NULL;
15782         }
15783
15784 err_out_free_dev:
15785         free_netdev(dev);
15786
15787 err_out_power_down:
15788         pci_set_power_state(pdev, PCI_D3hot);
15789
15790 err_out_free_res:
15791         pci_release_regions(pdev);
15792
15793 err_out_disable_pdev:
15794         pci_disable_device(pdev);
15795         pci_set_drvdata(pdev, NULL);
15796         return err;
15797 }
15798
15799 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15800 {
15801         struct net_device *dev = pci_get_drvdata(pdev);
15802
15803         if (dev) {
15804                 struct tg3 *tp = netdev_priv(dev);
15805
15806                 if (tp->fw)
15807                         release_firmware(tp->fw);
15808
15809                 tg3_reset_task_cancel(tp);
15810
15811                 if (tg3_flag(tp, USE_PHYLIB)) {
15812                         tg3_phy_fini(tp);
15813                         tg3_mdio_fini(tp);
15814                 }
15815
15816                 unregister_netdev(dev);
15817                 if (tp->aperegs) {
15818                         iounmap(tp->aperegs);
15819                         tp->aperegs = NULL;
15820                 }
15821                 if (tp->regs) {
15822                         iounmap(tp->regs);
15823                         tp->regs = NULL;
15824                 }
15825                 free_netdev(dev);
15826                 pci_release_regions(pdev);
15827                 pci_disable_device(pdev);
15828                 pci_set_drvdata(pdev, NULL);
15829         }
15830 }
15831
15832 #ifdef CONFIG_PM_SLEEP
15833 static int tg3_suspend(struct device *device)
15834 {
15835         struct pci_dev *pdev = to_pci_dev(device);
15836         struct net_device *dev = pci_get_drvdata(pdev);
15837         struct tg3 *tp = netdev_priv(dev);
15838         int err;
15839
15840         if (!netif_running(dev))
15841                 return 0;
15842
15843         tg3_reset_task_cancel(tp);
15844         tg3_phy_stop(tp);
15845         tg3_netif_stop(tp);
15846
15847         del_timer_sync(&tp->timer);
15848
15849         tg3_full_lock(tp, 1);
15850         tg3_disable_ints(tp);
15851         tg3_full_unlock(tp);
15852
15853         netif_device_detach(dev);
15854
15855         tg3_full_lock(tp, 0);
15856         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15857         tg3_flag_clear(tp, INIT_COMPLETE);
15858         tg3_full_unlock(tp);
15859
15860         err = tg3_power_down_prepare(tp);
15861         if (err) {
15862                 int err2;
15863
15864                 tg3_full_lock(tp, 0);
15865
15866                 tg3_flag_set(tp, INIT_COMPLETE);
15867                 err2 = tg3_restart_hw(tp, 1);
15868                 if (err2)
15869                         goto out;
15870
15871                 tp->timer.expires = jiffies + tp->timer_offset;
15872                 add_timer(&tp->timer);
15873
15874                 netif_device_attach(dev);
15875                 tg3_netif_start(tp);
15876
15877 out:
15878                 tg3_full_unlock(tp);
15879
15880                 if (!err2)
15881                         tg3_phy_start(tp);
15882         }
15883
15884         return err;
15885 }
15886
15887 static int tg3_resume(struct device *device)
15888 {
15889         struct pci_dev *pdev = to_pci_dev(device);
15890         struct net_device *dev = pci_get_drvdata(pdev);
15891         struct tg3 *tp = netdev_priv(dev);
15892         int err;
15893
15894         if (!netif_running(dev))
15895                 return 0;
15896
15897         netif_device_attach(dev);
15898
15899         tg3_full_lock(tp, 0);
15900
15901         tg3_flag_set(tp, INIT_COMPLETE);
15902         err = tg3_restart_hw(tp, 1);
15903         if (err)
15904                 goto out;
15905
15906         tp->timer.expires = jiffies + tp->timer_offset;
15907         add_timer(&tp->timer);
15908
15909         tg3_netif_start(tp);
15910
15911 out:
15912         tg3_full_unlock(tp);
15913
15914         if (!err)
15915                 tg3_phy_start(tp);
15916
15917         return err;
15918 }
15919
15920 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15921 #define TG3_PM_OPS (&tg3_pm_ops)
15922
15923 #else
15924
15925 #define TG3_PM_OPS NULL
15926
15927 #endif /* CONFIG_PM_SLEEP */
15928
15929 /**
15930  * tg3_io_error_detected - called when PCI error is detected
15931  * @pdev: Pointer to PCI device
15932  * @state: The current pci connection state
15933  *
15934  * This function is called after a PCI bus error affecting
15935  * this device has been detected.
15936  */
15937 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15938                                               pci_channel_state_t state)
15939 {
15940         struct net_device *netdev = pci_get_drvdata(pdev);
15941         struct tg3 *tp = netdev_priv(netdev);
15942         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15943
15944         netdev_info(netdev, "PCI I/O error detected\n");
15945
15946         rtnl_lock();
15947
15948         if (!netif_running(netdev))
15949                 goto done;
15950
15951         tg3_phy_stop(tp);
15952
15953         tg3_netif_stop(tp);
15954
15955         del_timer_sync(&tp->timer);
15956
15957         /* Want to make sure that the reset task doesn't run */
15958         tg3_reset_task_cancel(tp);
15959         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15960
15961         netif_device_detach(netdev);
15962
15963         /* Clean up software state, even if MMIO is blocked */
15964         tg3_full_lock(tp, 0);
15965         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15966         tg3_full_unlock(tp);
15967
15968 done:
15969         if (state == pci_channel_io_perm_failure)
15970                 err = PCI_ERS_RESULT_DISCONNECT;
15971         else
15972                 pci_disable_device(pdev);
15973
15974         rtnl_unlock();
15975
15976         return err;
15977 }
15978
15979 /**
15980  * tg3_io_slot_reset - called after the pci bus has been reset.
15981  * @pdev: Pointer to PCI device
15982  *
15983  * Restart the card from scratch, as if from a cold-boot.
15984  * At this point, the card has exprienced a hard reset,
15985  * followed by fixups by BIOS, and has its config space
15986  * set up identically to what it was at cold boot.
15987  */
15988 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15989 {
15990         struct net_device *netdev = pci_get_drvdata(pdev);
15991         struct tg3 *tp = netdev_priv(netdev);
15992         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15993         int err;
15994
15995         rtnl_lock();
15996
15997         if (pci_enable_device(pdev)) {
15998                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15999                 goto done;
16000         }
16001
16002         pci_set_master(pdev);
16003         pci_restore_state(pdev);
16004         pci_save_state(pdev);
16005
16006         if (!netif_running(netdev)) {
16007                 rc = PCI_ERS_RESULT_RECOVERED;
16008                 goto done;
16009         }
16010
16011         err = tg3_power_up(tp);
16012         if (err)
16013                 goto done;
16014
16015         rc = PCI_ERS_RESULT_RECOVERED;
16016
16017 done:
16018         rtnl_unlock();
16019
16020         return rc;
16021 }
16022
16023 /**
16024  * tg3_io_resume - called when traffic can start flowing again.
16025  * @pdev: Pointer to PCI device
16026  *
16027  * This callback is called when the error recovery driver tells
16028  * us that its OK to resume normal operation.
16029  */
16030 static void tg3_io_resume(struct pci_dev *pdev)
16031 {
16032         struct net_device *netdev = pci_get_drvdata(pdev);
16033         struct tg3 *tp = netdev_priv(netdev);
16034         int err;
16035
16036         rtnl_lock();
16037
16038         if (!netif_running(netdev))
16039                 goto done;
16040
16041         tg3_full_lock(tp, 0);
16042         tg3_flag_set(tp, INIT_COMPLETE);
16043         err = tg3_restart_hw(tp, 1);
16044         tg3_full_unlock(tp);
16045         if (err) {
16046                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16047                 goto done;
16048         }
16049
16050         netif_device_attach(netdev);
16051
16052         tp->timer.expires = jiffies + tp->timer_offset;
16053         add_timer(&tp->timer);
16054
16055         tg3_netif_start(tp);
16056
16057         tg3_phy_start(tp);
16058
16059 done:
16060         rtnl_unlock();
16061 }
16062
16063 static struct pci_error_handlers tg3_err_handler = {
16064         .error_detected = tg3_io_error_detected,
16065         .slot_reset     = tg3_io_slot_reset,
16066         .resume         = tg3_io_resume
16067 };
16068
16069 static struct pci_driver tg3_driver = {
16070         .name           = DRV_MODULE_NAME,
16071         .id_table       = tg3_pci_tbl,
16072         .probe          = tg3_init_one,
16073         .remove         = __devexit_p(tg3_remove_one),
16074         .err_handler    = &tg3_err_handler,
16075         .driver.pm      = TG3_PM_OPS,
16076 };
16077
16078 static int __init tg3_init(void)
16079 {
16080         return pci_register_driver(&tg3_driver);
16081 }
16082
16083 static void __exit tg3_cleanup(void)
16084 {
16085         pci_unregister_driver(&tg3_driver);
16086 }
16087
16088 module_init(tg3_init);
16089 module_exit(tg3_cleanup);