Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / net / ethernet / chelsio / cxgb / cxgb2.c
1 /*****************************************************************************
2  *                                                                           *
3  * File: cxgb2.c                                                             *
4  * $Revision: 1.25 $                                                         *
5  * $Date: 2005/06/22 00:43:25 $                                              *
6  * Description:                                                              *
7  *  Chelsio 10Gb Ethernet Driver.                                            *
8  *                                                                           *
9  * This program is free software; you can redistribute it and/or modify      *
10  * it under the terms of the GNU General Public License, version 2, as       *
11  * published by the Free Software Foundation.                                *
12  *                                                                           *
13  * You should have received a copy of the GNU General Public License along   *
14  * with this program; if not, see <http://www.gnu.org/licenses/>.            *
15  *                                                                           *
16  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
17  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
18  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
19  *                                                                           *
20  * http://www.chelsio.com                                                    *
21  *                                                                           *
22  * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
23  * All rights reserved.                                                      *
24  *                                                                           *
25  * Maintainers: maintainers@chelsio.com                                      *
26  *                                                                           *
27  * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
28  *          Tina Yang               <tainay@chelsio.com>                     *
29  *          Felix Marti             <felix@chelsio.com>                      *
30  *          Scott Bardone           <sbardone@chelsio.com>                   *
31  *          Kurt Ottaway            <kottaway@chelsio.com>                   *
32  *          Frank DiMambro          <frank@chelsio.com>                      *
33  *                                                                           *
34  * History:                                                                  *
35  *                                                                           *
36  ****************************************************************************/
37
38 #include "common.h"
39 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/etherdevice.h>
44 #include <linux/if_vlan.h>
45 #include <linux/mii.h>
46 #include <linux/sockios.h>
47 #include <linux/dma-mapping.h>
48 #include <asm/uaccess.h>
49
50 #include "cpl5_cmd.h"
51 #include "regs.h"
52 #include "gmac.h"
53 #include "cphy.h"
54 #include "sge.h"
55 #include "tp.h"
56 #include "espi.h"
57 #include "elmer0.h"
58
59 #include <linux/workqueue.h>
60
61 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
62 {
63         schedule_delayed_work(&ap->stats_update_task, secs * HZ);
64 }
65
66 static inline void cancel_mac_stats_update(struct adapter *ap)
67 {
68         cancel_delayed_work(&ap->stats_update_task);
69 }
70
71 #define MAX_CMDQ_ENTRIES        16384
72 #define MAX_CMDQ1_ENTRIES       1024
73 #define MAX_RX_BUFFERS          16384
74 #define MAX_RX_JUMBO_BUFFERS    16384
75 #define MAX_TX_BUFFERS_HIGH     16384U
76 #define MAX_TX_BUFFERS_LOW      1536U
77 #define MAX_TX_BUFFERS          1460U
78 #define MIN_FL_ENTRIES          32
79
80 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
81                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
82                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
83
84 /*
85  * The EEPROM is actually bigger but only the first few bytes are used so we
86  * only report those.
87  */
88 #define EEPROM_SIZE 32
89
90 MODULE_DESCRIPTION(DRV_DESCRIPTION);
91 MODULE_AUTHOR("Chelsio Communications");
92 MODULE_LICENSE("GPL");
93
94 static int dflt_msg_enable = DFLT_MSG_ENABLE;
95
96 module_param(dflt_msg_enable, int, 0);
97 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
98
99 #define HCLOCK 0x0
100 #define LCLOCK 0x1
101
102 /* T1 cards powersave mode */
103 static int t1_clock(struct adapter *adapter, int mode);
104 static int t1powersave = 1;     /* HW default is powersave mode. */
105
106 module_param(t1powersave, int, 0);
107 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
108
109 static int disable_msi = 0;
110 module_param(disable_msi, int, 0);
111 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
112
113 static const char pci_speed[][4] = {
114         "33", "66", "100", "133"
115 };
116
117 /*
118  * Setup MAC to receive the types of packets we want.
119  */
120 static void t1_set_rxmode(struct net_device *dev)
121 {
122         struct adapter *adapter = dev->ml_priv;
123         struct cmac *mac = adapter->port[dev->if_port].mac;
124         struct t1_rx_mode rm;
125
126         rm.dev = dev;
127         mac->ops->set_rx_mode(mac, &rm);
128 }
129
130 static void link_report(struct port_info *p)
131 {
132         if (!netif_carrier_ok(p->dev))
133                 netdev_info(p->dev, "link down\n");
134         else {
135                 const char *s = "10Mbps";
136
137                 switch (p->link_config.speed) {
138                         case SPEED_10000: s = "10Gbps"; break;
139                         case SPEED_1000:  s = "1000Mbps"; break;
140                         case SPEED_100:   s = "100Mbps"; break;
141                 }
142
143                 netdev_info(p->dev, "link up, %s, %s-duplex\n",
144                             s, p->link_config.duplex == DUPLEX_FULL
145                             ? "full" : "half");
146         }
147 }
148
149 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
150                         int speed, int duplex, int pause)
151 {
152         struct port_info *p = &adapter->port[port_id];
153
154         if (link_stat != netif_carrier_ok(p->dev)) {
155                 if (link_stat)
156                         netif_carrier_on(p->dev);
157                 else
158                         netif_carrier_off(p->dev);
159                 link_report(p);
160
161                 /* multi-ports: inform toe */
162                 if ((speed > 0) && (adapter->params.nports > 1)) {
163                         unsigned int sched_speed = 10;
164                         switch (speed) {
165                         case SPEED_1000:
166                                 sched_speed = 1000;
167                                 break;
168                         case SPEED_100:
169                                 sched_speed = 100;
170                                 break;
171                         case SPEED_10:
172                                 sched_speed = 10;
173                                 break;
174                         }
175                         t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
176                 }
177         }
178 }
179
180 static void link_start(struct port_info *p)
181 {
182         struct cmac *mac = p->mac;
183
184         mac->ops->reset(mac);
185         if (mac->ops->macaddress_set)
186                 mac->ops->macaddress_set(mac, p->dev->dev_addr);
187         t1_set_rxmode(p->dev);
188         t1_link_start(p->phy, mac, &p->link_config);
189         mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
190 }
191
192 static void enable_hw_csum(struct adapter *adapter)
193 {
194         if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
195                 t1_tp_set_ip_checksum_offload(adapter->tp, 1);  /* for TSO only */
196         t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
197 }
198
199 /*
200  * Things to do upon first use of a card.
201  * This must run with the rtnl lock held.
202  */
203 static int cxgb_up(struct adapter *adapter)
204 {
205         int err = 0;
206
207         if (!(adapter->flags & FULL_INIT_DONE)) {
208                 err = t1_init_hw_modules(adapter);
209                 if (err)
210                         goto out_err;
211
212                 enable_hw_csum(adapter);
213                 adapter->flags |= FULL_INIT_DONE;
214         }
215
216         t1_interrupts_clear(adapter);
217
218         adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
219         err = request_irq(adapter->pdev->irq, t1_interrupt,
220                           adapter->params.has_msi ? 0 : IRQF_SHARED,
221                           adapter->name, adapter);
222         if (err) {
223                 if (adapter->params.has_msi)
224                         pci_disable_msi(adapter->pdev);
225
226                 goto out_err;
227         }
228
229         t1_sge_start(adapter->sge);
230         t1_interrupts_enable(adapter);
231 out_err:
232         return err;
233 }
234
235 /*
236  * Release resources when all the ports have been stopped.
237  */
238 static void cxgb_down(struct adapter *adapter)
239 {
240         t1_sge_stop(adapter->sge);
241         t1_interrupts_disable(adapter);
242         free_irq(adapter->pdev->irq, adapter);
243         if (adapter->params.has_msi)
244                 pci_disable_msi(adapter->pdev);
245 }
246
247 static int cxgb_open(struct net_device *dev)
248 {
249         int err;
250         struct adapter *adapter = dev->ml_priv;
251         int other_ports = adapter->open_device_map & PORT_MASK;
252
253         napi_enable(&adapter->napi);
254         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
255                 napi_disable(&adapter->napi);
256                 return err;
257         }
258
259         __set_bit(dev->if_port, &adapter->open_device_map);
260         link_start(&adapter->port[dev->if_port]);
261         netif_start_queue(dev);
262         if (!other_ports && adapter->params.stats_update_period)
263                 schedule_mac_stats_update(adapter,
264                                           adapter->params.stats_update_period);
265
266         t1_vlan_mode(adapter, dev->features);
267         return 0;
268 }
269
270 static int cxgb_close(struct net_device *dev)
271 {
272         struct adapter *adapter = dev->ml_priv;
273         struct port_info *p = &adapter->port[dev->if_port];
274         struct cmac *mac = p->mac;
275
276         netif_stop_queue(dev);
277         napi_disable(&adapter->napi);
278         mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
279         netif_carrier_off(dev);
280
281         clear_bit(dev->if_port, &adapter->open_device_map);
282         if (adapter->params.stats_update_period &&
283             !(adapter->open_device_map & PORT_MASK)) {
284                 /* Stop statistics accumulation. */
285                 smp_mb__after_clear_bit();
286                 spin_lock(&adapter->work_lock);   /* sync with update task */
287                 spin_unlock(&adapter->work_lock);
288                 cancel_mac_stats_update(adapter);
289         }
290
291         if (!adapter->open_device_map)
292                 cxgb_down(adapter);
293         return 0;
294 }
295
296 static struct net_device_stats *t1_get_stats(struct net_device *dev)
297 {
298         struct adapter *adapter = dev->ml_priv;
299         struct port_info *p = &adapter->port[dev->if_port];
300         struct net_device_stats *ns = &p->netstats;
301         const struct cmac_statistics *pstats;
302
303         /* Do a full update of the MAC stats */
304         pstats = p->mac->ops->statistics_update(p->mac,
305                                                 MAC_STATS_UPDATE_FULL);
306
307         ns->tx_packets = pstats->TxUnicastFramesOK +
308                 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
309
310         ns->rx_packets = pstats->RxUnicastFramesOK +
311                 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
312
313         ns->tx_bytes = pstats->TxOctetsOK;
314         ns->rx_bytes = pstats->RxOctetsOK;
315
316         ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
317                 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
318         ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
319                 pstats->RxFCSErrors + pstats->RxAlignErrors +
320                 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
321                 pstats->RxSymbolErrors + pstats->RxRuntErrors;
322
323         ns->multicast  = pstats->RxMulticastFramesOK;
324         ns->collisions = pstats->TxTotalCollisions;
325
326         /* detailed rx_errors */
327         ns->rx_length_errors = pstats->RxFrameTooLongErrors +
328                 pstats->RxJabberErrors;
329         ns->rx_over_errors   = 0;
330         ns->rx_crc_errors    = pstats->RxFCSErrors;
331         ns->rx_frame_errors  = pstats->RxAlignErrors;
332         ns->rx_fifo_errors   = 0;
333         ns->rx_missed_errors = 0;
334
335         /* detailed tx_errors */
336         ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
337         ns->tx_carrier_errors   = 0;
338         ns->tx_fifo_errors      = pstats->TxUnderrun;
339         ns->tx_heartbeat_errors = 0;
340         ns->tx_window_errors    = pstats->TxLateCollisions;
341         return ns;
342 }
343
344 static u32 get_msglevel(struct net_device *dev)
345 {
346         struct adapter *adapter = dev->ml_priv;
347
348         return adapter->msg_enable;
349 }
350
351 static void set_msglevel(struct net_device *dev, u32 val)
352 {
353         struct adapter *adapter = dev->ml_priv;
354
355         adapter->msg_enable = val;
356 }
357
358 static char stats_strings[][ETH_GSTRING_LEN] = {
359         "TxOctetsOK",
360         "TxOctetsBad",
361         "TxUnicastFramesOK",
362         "TxMulticastFramesOK",
363         "TxBroadcastFramesOK",
364         "TxPauseFrames",
365         "TxFramesWithDeferredXmissions",
366         "TxLateCollisions",
367         "TxTotalCollisions",
368         "TxFramesAbortedDueToXSCollisions",
369         "TxUnderrun",
370         "TxLengthErrors",
371         "TxInternalMACXmitError",
372         "TxFramesWithExcessiveDeferral",
373         "TxFCSErrors",
374         "TxJumboFramesOk",
375         "TxJumboOctetsOk",
376         
377         "RxOctetsOK",
378         "RxOctetsBad",
379         "RxUnicastFramesOK",
380         "RxMulticastFramesOK",
381         "RxBroadcastFramesOK",
382         "RxPauseFrames",
383         "RxFCSErrors",
384         "RxAlignErrors",
385         "RxSymbolErrors",
386         "RxDataErrors",
387         "RxSequenceErrors",
388         "RxRuntErrors",
389         "RxJabberErrors",
390         "RxInternalMACRcvError",
391         "RxInRangeLengthErrors",
392         "RxOutOfRangeLengthField",
393         "RxFrameTooLongErrors",
394         "RxJumboFramesOk",
395         "RxJumboOctetsOk",
396
397         /* Port stats */
398         "RxCsumGood",
399         "TxCsumOffload",
400         "TxTso",
401         "RxVlan",
402         "TxVlan",
403         "TxNeedHeadroom", 
404         
405         /* Interrupt stats */
406         "rx drops",
407         "pure_rsps",
408         "unhandled irqs",
409         "respQ_empty",
410         "respQ_overflow",
411         "freelistQ_empty",
412         "pkt_too_big",
413         "pkt_mismatch",
414         "cmdQ_full0",
415         "cmdQ_full1",
416
417         "espi_DIP2ParityErr",
418         "espi_DIP4Err",
419         "espi_RxDrops",
420         "espi_TxDrops",
421         "espi_RxOvfl",
422         "espi_ParityErr"
423 };
424
425 #define T2_REGMAP_SIZE (3 * 1024)
426
427 static int get_regs_len(struct net_device *dev)
428 {
429         return T2_REGMAP_SIZE;
430 }
431
432 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
433 {
434         struct adapter *adapter = dev->ml_priv;
435
436         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
437         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
438         strlcpy(info->bus_info, pci_name(adapter->pdev),
439                 sizeof(info->bus_info));
440 }
441
442 static int get_sset_count(struct net_device *dev, int sset)
443 {
444         switch (sset) {
445         case ETH_SS_STATS:
446                 return ARRAY_SIZE(stats_strings);
447         default:
448                 return -EOPNOTSUPP;
449         }
450 }
451
452 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
453 {
454         if (stringset == ETH_SS_STATS)
455                 memcpy(data, stats_strings, sizeof(stats_strings));
456 }
457
458 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
459                       u64 *data)
460 {
461         struct adapter *adapter = dev->ml_priv;
462         struct cmac *mac = adapter->port[dev->if_port].mac;
463         const struct cmac_statistics *s;
464         const struct sge_intr_counts *t;
465         struct sge_port_stats ss;
466
467         s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
468         t = t1_sge_get_intr_counts(adapter->sge);
469         t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
470
471         *data++ = s->TxOctetsOK;
472         *data++ = s->TxOctetsBad;
473         *data++ = s->TxUnicastFramesOK;
474         *data++ = s->TxMulticastFramesOK;
475         *data++ = s->TxBroadcastFramesOK;
476         *data++ = s->TxPauseFrames;
477         *data++ = s->TxFramesWithDeferredXmissions;
478         *data++ = s->TxLateCollisions;
479         *data++ = s->TxTotalCollisions;
480         *data++ = s->TxFramesAbortedDueToXSCollisions;
481         *data++ = s->TxUnderrun;
482         *data++ = s->TxLengthErrors;
483         *data++ = s->TxInternalMACXmitError;
484         *data++ = s->TxFramesWithExcessiveDeferral;
485         *data++ = s->TxFCSErrors;
486         *data++ = s->TxJumboFramesOK;
487         *data++ = s->TxJumboOctetsOK;
488
489         *data++ = s->RxOctetsOK;
490         *data++ = s->RxOctetsBad;
491         *data++ = s->RxUnicastFramesOK;
492         *data++ = s->RxMulticastFramesOK;
493         *data++ = s->RxBroadcastFramesOK;
494         *data++ = s->RxPauseFrames;
495         *data++ = s->RxFCSErrors;
496         *data++ = s->RxAlignErrors;
497         *data++ = s->RxSymbolErrors;
498         *data++ = s->RxDataErrors;
499         *data++ = s->RxSequenceErrors;
500         *data++ = s->RxRuntErrors;
501         *data++ = s->RxJabberErrors;
502         *data++ = s->RxInternalMACRcvError;
503         *data++ = s->RxInRangeLengthErrors;
504         *data++ = s->RxOutOfRangeLengthField;
505         *data++ = s->RxFrameTooLongErrors;
506         *data++ = s->RxJumboFramesOK;
507         *data++ = s->RxJumboOctetsOK;
508
509         *data++ = ss.rx_cso_good;
510         *data++ = ss.tx_cso;
511         *data++ = ss.tx_tso;
512         *data++ = ss.vlan_xtract;
513         *data++ = ss.vlan_insert;
514         *data++ = ss.tx_need_hdrroom;
515         
516         *data++ = t->rx_drops;
517         *data++ = t->pure_rsps;
518         *data++ = t->unhandled_irqs;
519         *data++ = t->respQ_empty;
520         *data++ = t->respQ_overflow;
521         *data++ = t->freelistQ_empty;
522         *data++ = t->pkt_too_big;
523         *data++ = t->pkt_mismatch;
524         *data++ = t->cmdQ_full[0];
525         *data++ = t->cmdQ_full[1];
526
527         if (adapter->espi) {
528                 const struct espi_intr_counts *e;
529
530                 e = t1_espi_get_intr_counts(adapter->espi);
531                 *data++ = e->DIP2_parity_err;
532                 *data++ = e->DIP4_err;
533                 *data++ = e->rx_drops;
534                 *data++ = e->tx_drops;
535                 *data++ = e->rx_ovflw;
536                 *data++ = e->parity_err;
537         }
538 }
539
540 static inline void reg_block_dump(struct adapter *ap, void *buf,
541                                   unsigned int start, unsigned int end)
542 {
543         u32 *p = buf + start;
544
545         for ( ; start <= end; start += sizeof(u32))
546                 *p++ = readl(ap->regs + start);
547 }
548
549 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
550                      void *buf)
551 {
552         struct adapter *ap = dev->ml_priv;
553
554         /*
555          * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
556          */
557         regs->version = 2;
558
559         memset(buf, 0, T2_REGMAP_SIZE);
560         reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
561         reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
562         reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
563         reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
564         reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
565         reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
566         reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
567         reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
568         reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
569         reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
570 }
571
572 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
573 {
574         struct adapter *adapter = dev->ml_priv;
575         struct port_info *p = &adapter->port[dev->if_port];
576
577         cmd->supported = p->link_config.supported;
578         cmd->advertising = p->link_config.advertising;
579
580         if (netif_carrier_ok(dev)) {
581                 ethtool_cmd_speed_set(cmd, p->link_config.speed);
582                 cmd->duplex = p->link_config.duplex;
583         } else {
584                 ethtool_cmd_speed_set(cmd, -1);
585                 cmd->duplex = -1;
586         }
587
588         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
589         cmd->phy_address = p->phy->mdio.prtad;
590         cmd->transceiver = XCVR_EXTERNAL;
591         cmd->autoneg = p->link_config.autoneg;
592         cmd->maxtxpkt = 0;
593         cmd->maxrxpkt = 0;
594         return 0;
595 }
596
597 static int speed_duplex_to_caps(int speed, int duplex)
598 {
599         int cap = 0;
600
601         switch (speed) {
602         case SPEED_10:
603                 if (duplex == DUPLEX_FULL)
604                         cap = SUPPORTED_10baseT_Full;
605                 else
606                         cap = SUPPORTED_10baseT_Half;
607                 break;
608         case SPEED_100:
609                 if (duplex == DUPLEX_FULL)
610                         cap = SUPPORTED_100baseT_Full;
611                 else
612                         cap = SUPPORTED_100baseT_Half;
613                 break;
614         case SPEED_1000:
615                 if (duplex == DUPLEX_FULL)
616                         cap = SUPPORTED_1000baseT_Full;
617                 else
618                         cap = SUPPORTED_1000baseT_Half;
619                 break;
620         case SPEED_10000:
621                 if (duplex == DUPLEX_FULL)
622                         cap = SUPPORTED_10000baseT_Full;
623         }
624         return cap;
625 }
626
627 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
628                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
629                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
630                       ADVERTISED_10000baseT_Full)
631
632 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
633 {
634         struct adapter *adapter = dev->ml_priv;
635         struct port_info *p = &adapter->port[dev->if_port];
636         struct link_config *lc = &p->link_config;
637
638         if (!(lc->supported & SUPPORTED_Autoneg))
639                 return -EOPNOTSUPP;             /* can't change speed/duplex */
640
641         if (cmd->autoneg == AUTONEG_DISABLE) {
642                 u32 speed = ethtool_cmd_speed(cmd);
643                 int cap = speed_duplex_to_caps(speed, cmd->duplex);
644
645                 if (!(lc->supported & cap) || (speed == SPEED_1000))
646                         return -EINVAL;
647                 lc->requested_speed = speed;
648                 lc->requested_duplex = cmd->duplex;
649                 lc->advertising = 0;
650         } else {
651                 cmd->advertising &= ADVERTISED_MASK;
652                 if (cmd->advertising & (cmd->advertising - 1))
653                         cmd->advertising = lc->supported;
654                 cmd->advertising &= lc->supported;
655                 if (!cmd->advertising)
656                         return -EINVAL;
657                 lc->requested_speed = SPEED_INVALID;
658                 lc->requested_duplex = DUPLEX_INVALID;
659                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
660         }
661         lc->autoneg = cmd->autoneg;
662         if (netif_running(dev))
663                 t1_link_start(p->phy, p->mac, lc);
664         return 0;
665 }
666
667 static void get_pauseparam(struct net_device *dev,
668                            struct ethtool_pauseparam *epause)
669 {
670         struct adapter *adapter = dev->ml_priv;
671         struct port_info *p = &adapter->port[dev->if_port];
672
673         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
674         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
675         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
676 }
677
678 static int set_pauseparam(struct net_device *dev,
679                           struct ethtool_pauseparam *epause)
680 {
681         struct adapter *adapter = dev->ml_priv;
682         struct port_info *p = &adapter->port[dev->if_port];
683         struct link_config *lc = &p->link_config;
684
685         if (epause->autoneg == AUTONEG_DISABLE)
686                 lc->requested_fc = 0;
687         else if (lc->supported & SUPPORTED_Autoneg)
688                 lc->requested_fc = PAUSE_AUTONEG;
689         else
690                 return -EINVAL;
691
692         if (epause->rx_pause)
693                 lc->requested_fc |= PAUSE_RX;
694         if (epause->tx_pause)
695                 lc->requested_fc |= PAUSE_TX;
696         if (lc->autoneg == AUTONEG_ENABLE) {
697                 if (netif_running(dev))
698                         t1_link_start(p->phy, p->mac, lc);
699         } else {
700                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
701                 if (netif_running(dev))
702                         p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
703                                                          lc->fc);
704         }
705         return 0;
706 }
707
708 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
709 {
710         struct adapter *adapter = dev->ml_priv;
711         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
712
713         e->rx_max_pending = MAX_RX_BUFFERS;
714         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
715         e->tx_max_pending = MAX_CMDQ_ENTRIES;
716
717         e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
718         e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
719         e->tx_pending = adapter->params.sge.cmdQ_size[0];
720 }
721
722 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
723 {
724         struct adapter *adapter = dev->ml_priv;
725         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
726
727         if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
728             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
729             e->tx_pending > MAX_CMDQ_ENTRIES ||
730             e->rx_pending < MIN_FL_ENTRIES ||
731             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
732             e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
733                 return -EINVAL;
734
735         if (adapter->flags & FULL_INIT_DONE)
736                 return -EBUSY;
737
738         adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
739         adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
740         adapter->params.sge.cmdQ_size[0] = e->tx_pending;
741         adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
742                 MAX_CMDQ1_ENTRIES : e->tx_pending;
743         return 0;
744 }
745
746 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
747 {
748         struct adapter *adapter = dev->ml_priv;
749
750         adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
751         adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
752         adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
753         t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
754         return 0;
755 }
756
757 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
758 {
759         struct adapter *adapter = dev->ml_priv;
760
761         c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
762         c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
763         c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
764         return 0;
765 }
766
767 static int get_eeprom_len(struct net_device *dev)
768 {
769         struct adapter *adapter = dev->ml_priv;
770
771         return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
772 }
773
774 #define EEPROM_MAGIC(ap) \
775         (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
776
777 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
778                       u8 *data)
779 {
780         int i;
781         u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
782         struct adapter *adapter = dev->ml_priv;
783
784         e->magic = EEPROM_MAGIC(adapter);
785         for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
786                 t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
787         memcpy(data, buf + e->offset, e->len);
788         return 0;
789 }
790
791 static const struct ethtool_ops t1_ethtool_ops = {
792         .get_settings      = get_settings,
793         .set_settings      = set_settings,
794         .get_drvinfo       = get_drvinfo,
795         .get_msglevel      = get_msglevel,
796         .set_msglevel      = set_msglevel,
797         .get_ringparam     = get_sge_param,
798         .set_ringparam     = set_sge_param,
799         .get_coalesce      = get_coalesce,
800         .set_coalesce      = set_coalesce,
801         .get_eeprom_len    = get_eeprom_len,
802         .get_eeprom        = get_eeprom,
803         .get_pauseparam    = get_pauseparam,
804         .set_pauseparam    = set_pauseparam,
805         .get_link          = ethtool_op_get_link,
806         .get_strings       = get_strings,
807         .get_sset_count    = get_sset_count,
808         .get_ethtool_stats = get_stats,
809         .get_regs_len      = get_regs_len,
810         .get_regs          = get_regs,
811 };
812
813 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
814 {
815         struct adapter *adapter = dev->ml_priv;
816         struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
817
818         return mdio_mii_ioctl(mdio, if_mii(req), cmd);
819 }
820
821 static int t1_change_mtu(struct net_device *dev, int new_mtu)
822 {
823         int ret;
824         struct adapter *adapter = dev->ml_priv;
825         struct cmac *mac = adapter->port[dev->if_port].mac;
826
827         if (!mac->ops->set_mtu)
828                 return -EOPNOTSUPP;
829         if (new_mtu < 68)
830                 return -EINVAL;
831         if ((ret = mac->ops->set_mtu(mac, new_mtu)))
832                 return ret;
833         dev->mtu = new_mtu;
834         return 0;
835 }
836
837 static int t1_set_mac_addr(struct net_device *dev, void *p)
838 {
839         struct adapter *adapter = dev->ml_priv;
840         struct cmac *mac = adapter->port[dev->if_port].mac;
841         struct sockaddr *addr = p;
842
843         if (!mac->ops->macaddress_set)
844                 return -EOPNOTSUPP;
845
846         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
847         mac->ops->macaddress_set(mac, dev->dev_addr);
848         return 0;
849 }
850
851 static netdev_features_t t1_fix_features(struct net_device *dev,
852         netdev_features_t features)
853 {
854         /*
855          * Since there is no support for separate rx/tx vlan accel
856          * enable/disable make sure tx flag is always in same state as rx.
857          */
858         if (features & NETIF_F_HW_VLAN_CTAG_RX)
859                 features |= NETIF_F_HW_VLAN_CTAG_TX;
860         else
861                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
862
863         return features;
864 }
865
866 static int t1_set_features(struct net_device *dev, netdev_features_t features)
867 {
868         netdev_features_t changed = dev->features ^ features;
869         struct adapter *adapter = dev->ml_priv;
870
871         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
872                 t1_vlan_mode(adapter, features);
873
874         return 0;
875 }
876 #ifdef CONFIG_NET_POLL_CONTROLLER
877 static void t1_netpoll(struct net_device *dev)
878 {
879         unsigned long flags;
880         struct adapter *adapter = dev->ml_priv;
881
882         local_irq_save(flags);
883         t1_interrupt(adapter->pdev->irq, adapter);
884         local_irq_restore(flags);
885 }
886 #endif
887
888 /*
889  * Periodic accumulation of MAC statistics.  This is used only if the MAC
890  * does not have any other way to prevent stats counter overflow.
891  */
892 static void mac_stats_task(struct work_struct *work)
893 {
894         int i;
895         struct adapter *adapter =
896                 container_of(work, struct adapter, stats_update_task.work);
897
898         for_each_port(adapter, i) {
899                 struct port_info *p = &adapter->port[i];
900
901                 if (netif_running(p->dev))
902                         p->mac->ops->statistics_update(p->mac,
903                                                        MAC_STATS_UPDATE_FAST);
904         }
905
906         /* Schedule the next statistics update if any port is active. */
907         spin_lock(&adapter->work_lock);
908         if (adapter->open_device_map & PORT_MASK)
909                 schedule_mac_stats_update(adapter,
910                                           adapter->params.stats_update_period);
911         spin_unlock(&adapter->work_lock);
912 }
913
914 /*
915  * Processes elmer0 external interrupts in process context.
916  */
917 static void ext_intr_task(struct work_struct *work)
918 {
919         struct adapter *adapter =
920                 container_of(work, struct adapter, ext_intr_handler_task);
921
922         t1_elmer0_ext_intr_handler(adapter);
923
924         /* Now reenable external interrupts */
925         spin_lock_irq(&adapter->async_lock);
926         adapter->slow_intr_mask |= F_PL_INTR_EXT;
927         writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
928         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
929                    adapter->regs + A_PL_ENABLE);
930         spin_unlock_irq(&adapter->async_lock);
931 }
932
933 /*
934  * Interrupt-context handler for elmer0 external interrupts.
935  */
936 void t1_elmer0_ext_intr(struct adapter *adapter)
937 {
938         /*
939          * Schedule a task to handle external interrupts as we require
940          * a process context.  We disable EXT interrupts in the interim
941          * and let the task reenable them when it's done.
942          */
943         adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
944         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
945                    adapter->regs + A_PL_ENABLE);
946         schedule_work(&adapter->ext_intr_handler_task);
947 }
948
949 void t1_fatal_err(struct adapter *adapter)
950 {
951         if (adapter->flags & FULL_INIT_DONE) {
952                 t1_sge_stop(adapter->sge);
953                 t1_interrupts_disable(adapter);
954         }
955         pr_alert("%s: encountered fatal error, operation suspended\n",
956                  adapter->name);
957 }
958
959 static const struct net_device_ops cxgb_netdev_ops = {
960         .ndo_open               = cxgb_open,
961         .ndo_stop               = cxgb_close,
962         .ndo_start_xmit         = t1_start_xmit,
963         .ndo_get_stats          = t1_get_stats,
964         .ndo_validate_addr      = eth_validate_addr,
965         .ndo_set_rx_mode        = t1_set_rxmode,
966         .ndo_do_ioctl           = t1_ioctl,
967         .ndo_change_mtu         = t1_change_mtu,
968         .ndo_set_mac_address    = t1_set_mac_addr,
969         .ndo_fix_features       = t1_fix_features,
970         .ndo_set_features       = t1_set_features,
971 #ifdef CONFIG_NET_POLL_CONTROLLER
972         .ndo_poll_controller    = t1_netpoll,
973 #endif
974 };
975
976 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
977 {
978         int i, err, pci_using_dac = 0;
979         unsigned long mmio_start, mmio_len;
980         const struct board_info *bi;
981         struct adapter *adapter = NULL;
982         struct port_info *pi;
983
984         pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION);
985
986         err = pci_enable_device(pdev);
987         if (err)
988                 return err;
989
990         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
991                 pr_err("%s: cannot find PCI device memory base address\n",
992                        pci_name(pdev));
993                 err = -ENODEV;
994                 goto out_disable_pdev;
995         }
996
997         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
998                 pci_using_dac = 1;
999
1000                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1001                         pr_err("%s: unable to obtain 64-bit DMA for "
1002                                "consistent allocations\n", pci_name(pdev));
1003                         err = -ENODEV;
1004                         goto out_disable_pdev;
1005                 }
1006
1007         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
1008                 pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
1009                 goto out_disable_pdev;
1010         }
1011
1012         err = pci_request_regions(pdev, DRV_NAME);
1013         if (err) {
1014                 pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
1015                 goto out_disable_pdev;
1016         }
1017
1018         pci_set_master(pdev);
1019
1020         mmio_start = pci_resource_start(pdev, 0);
1021         mmio_len = pci_resource_len(pdev, 0);
1022         bi = t1_get_board_info(ent->driver_data);
1023
1024         for (i = 0; i < bi->port_number; ++i) {
1025                 struct net_device *netdev;
1026
1027                 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1028                 if (!netdev) {
1029                         err = -ENOMEM;
1030                         goto out_free_dev;
1031                 }
1032
1033                 SET_NETDEV_DEV(netdev, &pdev->dev);
1034
1035                 if (!adapter) {
1036                         adapter = netdev_priv(netdev);
1037                         adapter->pdev = pdev;
1038                         adapter->port[0].dev = netdev;  /* so we don't leak it */
1039
1040                         adapter->regs = ioremap(mmio_start, mmio_len);
1041                         if (!adapter->regs) {
1042                                 pr_err("%s: cannot map device registers\n",
1043                                        pci_name(pdev));
1044                                 err = -ENOMEM;
1045                                 goto out_free_dev;
1046                         }
1047
1048                         if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1049                                 err = -ENODEV;    /* Can't handle this chip rev */
1050                                 goto out_free_dev;
1051                         }
1052
1053                         adapter->name = pci_name(pdev);
1054                         adapter->msg_enable = dflt_msg_enable;
1055                         adapter->mmio_len = mmio_len;
1056
1057                         spin_lock_init(&adapter->tpi_lock);
1058                         spin_lock_init(&adapter->work_lock);
1059                         spin_lock_init(&adapter->async_lock);
1060                         spin_lock_init(&adapter->mac_lock);
1061
1062                         INIT_WORK(&adapter->ext_intr_handler_task,
1063                                   ext_intr_task);
1064                         INIT_DELAYED_WORK(&adapter->stats_update_task,
1065                                           mac_stats_task);
1066
1067                         pci_set_drvdata(pdev, netdev);
1068                 }
1069
1070                 pi = &adapter->port[i];
1071                 pi->dev = netdev;
1072                 netif_carrier_off(netdev);
1073                 netdev->irq = pdev->irq;
1074                 netdev->if_port = i;
1075                 netdev->mem_start = mmio_start;
1076                 netdev->mem_end = mmio_start + mmio_len - 1;
1077                 netdev->ml_priv = adapter;
1078                 netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1079                         NETIF_F_RXCSUM;
1080                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1081                         NETIF_F_RXCSUM | NETIF_F_LLTX;
1082
1083                 if (pci_using_dac)
1084                         netdev->features |= NETIF_F_HIGHDMA;
1085                 if (vlan_tso_capable(adapter)) {
1086                         netdev->features |=
1087                                 NETIF_F_HW_VLAN_CTAG_TX |
1088                                 NETIF_F_HW_VLAN_CTAG_RX;
1089                         netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1090
1091                         /* T204: disable TSO */
1092                         if (!(is_T2(adapter)) || bi->port_number != 4) {
1093                                 netdev->hw_features |= NETIF_F_TSO;
1094                                 netdev->features |= NETIF_F_TSO;
1095                         }
1096                 }
1097
1098                 netdev->netdev_ops = &cxgb_netdev_ops;
1099                 netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1100                         sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1101
1102                 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1103
1104                 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1105         }
1106
1107         if (t1_init_sw_modules(adapter, bi) < 0) {
1108                 err = -ENODEV;
1109                 goto out_free_dev;
1110         }
1111
1112         /*
1113          * The card is now ready to go.  If any errors occur during device
1114          * registration we do not fail the whole card but rather proceed only
1115          * with the ports we manage to register successfully.  However we must
1116          * register at least one net device.
1117          */
1118         for (i = 0; i < bi->port_number; ++i) {
1119                 err = register_netdev(adapter->port[i].dev);
1120                 if (err)
1121                         pr_warn("%s: cannot register net device %s, skipping\n",
1122                                 pci_name(pdev), adapter->port[i].dev->name);
1123                 else {
1124                         /*
1125                          * Change the name we use for messages to the name of
1126                          * the first successfully registered interface.
1127                          */
1128                         if (!adapter->registered_device_map)
1129                                 adapter->name = adapter->port[i].dev->name;
1130
1131                         __set_bit(i, &adapter->registered_device_map);
1132                 }
1133         }
1134         if (!adapter->registered_device_map) {
1135                 pr_err("%s: could not register any net devices\n",
1136                        pci_name(pdev));
1137                 goto out_release_adapter_res;
1138         }
1139
1140         pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
1141                 adapter->name, bi->desc, adapter->params.chip_revision,
1142                 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1143                 adapter->params.pci.speed, adapter->params.pci.width);
1144
1145         /*
1146          * Set the T1B ASIC and memory clocks.
1147          */
1148         if (t1powersave)
1149                 adapter->t1powersave = LCLOCK;  /* HW default is powersave mode. */
1150         else
1151                 adapter->t1powersave = HCLOCK;
1152         if (t1_is_T1B(adapter))
1153                 t1_clock(adapter, t1powersave);
1154
1155         return 0;
1156
1157 out_release_adapter_res:
1158         t1_free_sw_modules(adapter);
1159 out_free_dev:
1160         if (adapter) {
1161                 if (adapter->regs)
1162                         iounmap(adapter->regs);
1163                 for (i = bi->port_number - 1; i >= 0; --i)
1164                         if (adapter->port[i].dev)
1165                                 free_netdev(adapter->port[i].dev);
1166         }
1167         pci_release_regions(pdev);
1168 out_disable_pdev:
1169         pci_disable_device(pdev);
1170         return err;
1171 }
1172
1173 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1174 {
1175         int data;
1176         int i;
1177         u32 val;
1178
1179         enum {
1180                 S_CLOCK = 1 << 3,
1181                 S_DATA = 1 << 4
1182         };
1183
1184         for (i = (nbits - 1); i > -1; i--) {
1185
1186                 udelay(50);
1187
1188                 data = ((bitdata >> i) & 0x1);
1189                 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1190
1191                 if (data)
1192                         val |= S_DATA;
1193                 else
1194                         val &= ~S_DATA;
1195
1196                 udelay(50);
1197
1198                 /* Set SCLOCK low */
1199                 val &= ~S_CLOCK;
1200                 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1201
1202                 udelay(50);
1203
1204                 /* Write SCLOCK high */
1205                 val |= S_CLOCK;
1206                 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1207
1208         }
1209 }
1210
1211 static int t1_clock(struct adapter *adapter, int mode)
1212 {
1213         u32 val;
1214         int M_CORE_VAL;
1215         int M_MEM_VAL;
1216
1217         enum {
1218                 M_CORE_BITS     = 9,
1219                 T_CORE_VAL      = 0,
1220                 T_CORE_BITS     = 2,
1221                 N_CORE_VAL      = 0,
1222                 N_CORE_BITS     = 2,
1223                 M_MEM_BITS      = 9,
1224                 T_MEM_VAL       = 0,
1225                 T_MEM_BITS      = 2,
1226                 N_MEM_VAL       = 0,
1227                 N_MEM_BITS      = 2,
1228                 NP_LOAD         = 1 << 17,
1229                 S_LOAD_MEM      = 1 << 5,
1230                 S_LOAD_CORE     = 1 << 6,
1231                 S_CLOCK         = 1 << 3
1232         };
1233
1234         if (!t1_is_T1B(adapter))
1235                 return -ENODEV; /* Can't re-clock this chip. */
1236
1237         if (mode & 2)
1238                 return 0;       /* show current mode. */
1239
1240         if ((adapter->t1powersave & 1) == (mode & 1))
1241                 return -EALREADY;       /* ASIC already running in mode. */
1242
1243         if ((mode & 1) == HCLOCK) {
1244                 M_CORE_VAL = 0x14;
1245                 M_MEM_VAL = 0x18;
1246                 adapter->t1powersave = HCLOCK;  /* overclock */
1247         } else {
1248                 M_CORE_VAL = 0xe;
1249                 M_MEM_VAL = 0x10;
1250                 adapter->t1powersave = LCLOCK;  /* underclock */
1251         }
1252
1253         /* Don't interrupt this serial stream! */
1254         spin_lock(&adapter->tpi_lock);
1255
1256         /* Initialize for ASIC core */
1257         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1258         val |= NP_LOAD;
1259         udelay(50);
1260         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1261         udelay(50);
1262         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1263         val &= ~S_LOAD_CORE;
1264         val &= ~S_CLOCK;
1265         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1266         udelay(50);
1267
1268         /* Serial program the ASIC clock synthesizer */
1269         bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1270         bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1271         bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1272         udelay(50);
1273
1274         /* Finish ASIC core */
1275         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1276         val |= S_LOAD_CORE;
1277         udelay(50);
1278         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1279         udelay(50);
1280         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1281         val &= ~S_LOAD_CORE;
1282         udelay(50);
1283         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1284         udelay(50);
1285
1286         /* Initialize for memory */
1287         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1288         val |= NP_LOAD;
1289         udelay(50);
1290         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1291         udelay(50);
1292         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1293         val &= ~S_LOAD_MEM;
1294         val &= ~S_CLOCK;
1295         udelay(50);
1296         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1297         udelay(50);
1298
1299         /* Serial program the memory clock synthesizer */
1300         bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1301         bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1302         bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1303         udelay(50);
1304
1305         /* Finish memory */
1306         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1307         val |= S_LOAD_MEM;
1308         udelay(50);
1309         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1310         udelay(50);
1311         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1312         val &= ~S_LOAD_MEM;
1313         udelay(50);
1314         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1315
1316         spin_unlock(&adapter->tpi_lock);
1317
1318         return 0;
1319 }
1320
1321 static inline void t1_sw_reset(struct pci_dev *pdev)
1322 {
1323         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1324         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1325 }
1326
1327 static void remove_one(struct pci_dev *pdev)
1328 {
1329         struct net_device *dev = pci_get_drvdata(pdev);
1330         struct adapter *adapter = dev->ml_priv;
1331         int i;
1332
1333         for_each_port(adapter, i) {
1334                 if (test_bit(i, &adapter->registered_device_map))
1335                         unregister_netdev(adapter->port[i].dev);
1336         }
1337
1338         t1_free_sw_modules(adapter);
1339         iounmap(adapter->regs);
1340
1341         while (--i >= 0) {
1342                 if (adapter->port[i].dev)
1343                         free_netdev(adapter->port[i].dev);
1344         }
1345
1346         pci_release_regions(pdev);
1347         pci_disable_device(pdev);
1348         t1_sw_reset(pdev);
1349 }
1350
1351 static struct pci_driver cxgb_pci_driver = {
1352         .name     = DRV_NAME,
1353         .id_table = t1_pci_tbl,
1354         .probe    = init_one,
1355         .remove   = remove_one,
1356 };
1357
1358 module_pci_driver(cxgb_pci_driver);