Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-drm-fsl-dcu.git] / drivers / net / chelsio / cxgb2.c
1 /*****************************************************************************
2  *                                                                           *
3  * File: cxgb2.c                                                             *
4  * $Revision: 1.25 $                                                         *
5  * $Date: 2005/06/22 00:43:25 $                                              *
6  * Description:                                                              *
7  *  Chelsio 10Gb Ethernet Driver.                                            *
8  *                                                                           *
9  * This program is free software; you can redistribute it and/or modify      *
10  * it under the terms of the GNU General Public License, version 2, as       *
11  * published by the Free Software Foundation.                                *
12  *                                                                           *
13  * You should have received a copy of the GNU General Public License along   *
14  * with this program; if not, write to the Free Software Foundation, Inc.,   *
15  * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
16  *                                                                           *
17  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
18  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
20  *                                                                           *
21  * http://www.chelsio.com                                                    *
22  *                                                                           *
23  * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
24  * All rights reserved.                                                      *
25  *                                                                           *
26  * Maintainers: maintainers@chelsio.com                                      *
27  *                                                                           *
28  * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
29  *          Tina Yang               <tainay@chelsio.com>                     *
30  *          Felix Marti             <felix@chelsio.com>                      *
31  *          Scott Bardone           <sbardone@chelsio.com>                   *
32  *          Kurt Ottaway            <kottaway@chelsio.com>                   *
33  *          Frank DiMambro          <frank@chelsio.com>                      *
34  *                                                                           *
35  * History:                                                                  *
36  *                                                                           *
37  ****************************************************************************/
38
39 #include "common.h"
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mii.h>
47 #include <linux/sockios.h>
48 #include <linux/dma-mapping.h>
49 #include <asm/uaccess.h>
50
51 #include "cpl5_cmd.h"
52 #include "regs.h"
53 #include "gmac.h"
54 #include "cphy.h"
55 #include "sge.h"
56 #include "tp.h"
57 #include "espi.h"
58 #include "elmer0.h"
59
60 #include <linux/workqueue.h>
61
62 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
63 {
64         schedule_delayed_work(&ap->stats_update_task, secs * HZ);
65 }
66
67 static inline void cancel_mac_stats_update(struct adapter *ap)
68 {
69         cancel_delayed_work(&ap->stats_update_task);
70 }
71
72 #define MAX_CMDQ_ENTRIES 16384
73 #define MAX_CMDQ1_ENTRIES 1024
74 #define MAX_RX_BUFFERS 16384
75 #define MAX_RX_JUMBO_BUFFERS 16384
76 #define MAX_TX_BUFFERS_HIGH     16384U
77 #define MAX_TX_BUFFERS_LOW      1536U
78 #define MAX_TX_BUFFERS          1460U
79 #define MIN_FL_ENTRIES 32
80
81 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
83                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
84
85 /*
86  * The EEPROM is actually bigger but only the first few bytes are used so we
87  * only report those.
88  */
89 #define EEPROM_SIZE 32
90
91 MODULE_DESCRIPTION(DRV_DESCRIPTION);
92 MODULE_AUTHOR("Chelsio Communications");
93 MODULE_LICENSE("GPL");
94
95 static int dflt_msg_enable = DFLT_MSG_ENABLE;
96
97 module_param(dflt_msg_enable, int, 0);
98 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
99
100 #define HCLOCK 0x0
101 #define LCLOCK 0x1
102
103 /* T1 cards powersave mode */
104 static int t1_clock(struct adapter *adapter, int mode);
105 static int t1powersave = 1;     /* HW default is powersave mode. */
106
107 module_param(t1powersave, int, 0);
108 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
109
110 static int disable_msi = 0;
111 module_param(disable_msi, int, 0);
112 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
113
114 static const char pci_speed[][4] = {
115         "33", "66", "100", "133"
116 };
117
118 /*
119  * Setup MAC to receive the types of packets we want.
120  */
121 static void t1_set_rxmode(struct net_device *dev)
122 {
123         struct adapter *adapter = dev->priv;
124         struct cmac *mac = adapter->port[dev->if_port].mac;
125         struct t1_rx_mode rm;
126
127         rm.dev = dev;
128         rm.idx = 0;
129         rm.list = dev->mc_list;
130         mac->ops->set_rx_mode(mac, &rm);
131 }
132
133 static void link_report(struct port_info *p)
134 {
135         if (!netif_carrier_ok(p->dev))
136                 printk(KERN_INFO "%s: link down\n", p->dev->name);
137         else {
138                 const char *s = "10Mbps";
139
140                 switch (p->link_config.speed) {
141                         case SPEED_10000: s = "10Gbps"; break;
142                         case SPEED_1000:  s = "1000Mbps"; break;
143                         case SPEED_100:   s = "100Mbps"; break;
144                 }
145
146         printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
147                        p->dev->name, s,
148                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
149         }
150 }
151
152 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
153                         int speed, int duplex, int pause)
154 {
155         struct port_info *p = &adapter->port[port_id];
156
157         if (link_stat != netif_carrier_ok(p->dev)) {
158                 if (link_stat)
159                         netif_carrier_on(p->dev);
160                 else
161                         netif_carrier_off(p->dev);
162                 link_report(p);
163
164                 /* multi-ports: inform toe */
165                 if ((speed > 0) && (adapter->params.nports > 1)) {
166                         unsigned int sched_speed = 10;
167                         switch (speed) {
168                         case SPEED_1000:
169                                 sched_speed = 1000;
170                                 break;
171                         case SPEED_100:
172                                 sched_speed = 100;
173                                 break;
174                         case SPEED_10:
175                                 sched_speed = 10;
176                                 break;
177                         }
178                         t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
179                 }
180         }
181 }
182
183 static void link_start(struct port_info *p)
184 {
185         struct cmac *mac = p->mac;
186
187         mac->ops->reset(mac);
188         if (mac->ops->macaddress_set)
189                 mac->ops->macaddress_set(mac, p->dev->dev_addr);
190         t1_set_rxmode(p->dev);
191         t1_link_start(p->phy, mac, &p->link_config);
192         mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
193 }
194
195 static void enable_hw_csum(struct adapter *adapter)
196 {
197         if (adapter->flags & TSO_CAPABLE)
198                 t1_tp_set_ip_checksum_offload(adapter->tp, 1);  /* for TSO only */
199         if (adapter->flags & UDP_CSUM_CAPABLE)
200                 t1_tp_set_udp_checksum_offload(adapter->tp, 1);
201         t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
202 }
203
204 /*
205  * Things to do upon first use of a card.
206  * This must run with the rtnl lock held.
207  */
208 static int cxgb_up(struct adapter *adapter)
209 {
210         int err = 0;
211
212         if (!(adapter->flags & FULL_INIT_DONE)) {
213                 err = t1_init_hw_modules(adapter);
214                 if (err)
215                         goto out_err;
216
217                 enable_hw_csum(adapter);
218                 adapter->flags |= FULL_INIT_DONE;
219         }
220
221         t1_interrupts_clear(adapter);
222
223         adapter->params.has_msi = !disable_msi && pci_enable_msi(adapter->pdev) == 0;
224         err = request_irq(adapter->pdev->irq,
225                           t1_select_intr_handler(adapter),
226                           adapter->params.has_msi ? 0 : IRQF_SHARED,
227                           adapter->name, adapter);
228         if (err) {
229                 if (adapter->params.has_msi)
230                         pci_disable_msi(adapter->pdev);
231
232                 goto out_err;
233         }
234
235         t1_sge_start(adapter->sge);
236         t1_interrupts_enable(adapter);
237  out_err:
238         return err;
239 }
240
241 /*
242  * Release resources when all the ports have been stopped.
243  */
244 static void cxgb_down(struct adapter *adapter)
245 {
246         t1_sge_stop(adapter->sge);
247         t1_interrupts_disable(adapter);
248         free_irq(adapter->pdev->irq, adapter);
249         if (adapter->params.has_msi)
250                 pci_disable_msi(adapter->pdev);
251 }
252
253 static int cxgb_open(struct net_device *dev)
254 {
255         int err;
256         struct adapter *adapter = dev->priv;
257         int other_ports = adapter->open_device_map & PORT_MASK;
258
259         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
260                 return err;
261
262         __set_bit(dev->if_port, &adapter->open_device_map);
263         link_start(&adapter->port[dev->if_port]);
264         netif_start_queue(dev);
265         if (!other_ports && adapter->params.stats_update_period)
266                 schedule_mac_stats_update(adapter,
267                                           adapter->params.stats_update_period);
268         return 0;
269 }
270
271 static int cxgb_close(struct net_device *dev)
272 {
273         struct adapter *adapter = dev->priv;
274         struct port_info *p = &adapter->port[dev->if_port];
275         struct cmac *mac = p->mac;
276
277         netif_stop_queue(dev);
278         mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
279         netif_carrier_off(dev);
280
281         clear_bit(dev->if_port, &adapter->open_device_map);
282         if (adapter->params.stats_update_period &&
283             !(adapter->open_device_map & PORT_MASK)) {
284                 /* Stop statistics accumulation. */
285                 smp_mb__after_clear_bit();
286                 spin_lock(&adapter->work_lock);   /* sync with update task */
287                 spin_unlock(&adapter->work_lock);
288                 cancel_mac_stats_update(adapter);
289         }
290
291         if (!adapter->open_device_map)
292                 cxgb_down(adapter);
293         return 0;
294 }
295
296 static struct net_device_stats *t1_get_stats(struct net_device *dev)
297 {
298         struct adapter *adapter = dev->priv;
299         struct port_info *p = &adapter->port[dev->if_port];
300         struct net_device_stats *ns = &p->netstats;
301         const struct cmac_statistics *pstats;
302
303         /* Do a full update of the MAC stats */
304         pstats = p->mac->ops->statistics_update(p->mac,
305                                                 MAC_STATS_UPDATE_FULL);
306
307         ns->tx_packets = pstats->TxUnicastFramesOK +
308                 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
309
310         ns->rx_packets = pstats->RxUnicastFramesOK +
311                 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
312
313         ns->tx_bytes = pstats->TxOctetsOK;
314         ns->rx_bytes = pstats->RxOctetsOK;
315
316         ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
317                 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
318         ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
319                 pstats->RxFCSErrors + pstats->RxAlignErrors +
320                 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
321                 pstats->RxSymbolErrors + pstats->RxRuntErrors;
322
323         ns->multicast  = pstats->RxMulticastFramesOK;
324         ns->collisions = pstats->TxTotalCollisions;
325
326         /* detailed rx_errors */
327         ns->rx_length_errors = pstats->RxFrameTooLongErrors +
328                 pstats->RxJabberErrors;
329         ns->rx_over_errors   = 0;
330         ns->rx_crc_errors    = pstats->RxFCSErrors;
331         ns->rx_frame_errors  = pstats->RxAlignErrors;
332         ns->rx_fifo_errors   = 0;
333         ns->rx_missed_errors = 0;
334
335         /* detailed tx_errors */
336         ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
337         ns->tx_carrier_errors   = 0;
338         ns->tx_fifo_errors      = pstats->TxUnderrun;
339         ns->tx_heartbeat_errors = 0;
340         ns->tx_window_errors    = pstats->TxLateCollisions;
341         return ns;
342 }
343
344 static u32 get_msglevel(struct net_device *dev)
345 {
346         struct adapter *adapter = dev->priv;
347
348         return adapter->msg_enable;
349 }
350
351 static void set_msglevel(struct net_device *dev, u32 val)
352 {
353         struct adapter *adapter = dev->priv;
354
355         adapter->msg_enable = val;
356 }
357
358 static char stats_strings[][ETH_GSTRING_LEN] = {
359         "TxOctetsOK",
360         "TxOctetsBad",
361         "TxUnicastFramesOK",
362         "TxMulticastFramesOK",
363         "TxBroadcastFramesOK",
364         "TxPauseFrames",
365         "TxFramesWithDeferredXmissions",
366         "TxLateCollisions",
367         "TxTotalCollisions",
368         "TxFramesAbortedDueToXSCollisions",
369         "TxUnderrun",
370         "TxLengthErrors",
371         "TxInternalMACXmitError",
372         "TxFramesWithExcessiveDeferral",
373         "TxFCSErrors",
374
375         "RxOctetsOK",
376         "RxOctetsBad",
377         "RxUnicastFramesOK",
378         "RxMulticastFramesOK",
379         "RxBroadcastFramesOK",
380         "RxPauseFrames",
381         "RxFCSErrors",
382         "RxAlignErrors",
383         "RxSymbolErrors",
384         "RxDataErrors",
385         "RxSequenceErrors",
386         "RxRuntErrors",
387         "RxJabberErrors",
388         "RxInternalMACRcvError",
389         "RxInRangeLengthErrors",
390         "RxOutOfRangeLengthField",
391         "RxFrameTooLongErrors",
392
393         /* Port stats */
394         "RxPackets",
395         "RxCsumGood",
396         "TxPackets",
397         "TxCsumOffload",
398         "TxTso",
399         "RxVlan",
400         "TxVlan",
401
402         /* Interrupt stats */
403         "rx drops",
404         "pure_rsps",
405         "unhandled irqs",
406         "respQ_empty",
407         "respQ_overflow",
408         "freelistQ_empty",
409         "pkt_too_big",
410         "pkt_mismatch",
411         "cmdQ_full0",
412         "cmdQ_full1",
413
414         "espi_DIP2ParityErr",
415         "espi_DIP4Err",
416         "espi_RxDrops",
417         "espi_TxDrops",
418         "espi_RxOvfl",
419         "espi_ParityErr"
420 };
421
422 #define T2_REGMAP_SIZE (3 * 1024)
423
424 static int get_regs_len(struct net_device *dev)
425 {
426         return T2_REGMAP_SIZE;
427 }
428
429 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
430 {
431         struct adapter *adapter = dev->priv;
432
433         strcpy(info->driver, DRV_NAME);
434         strcpy(info->version, DRV_VERSION);
435         strcpy(info->fw_version, "N/A");
436         strcpy(info->bus_info, pci_name(adapter->pdev));
437 }
438
439 static int get_stats_count(struct net_device *dev)
440 {
441         return ARRAY_SIZE(stats_strings);
442 }
443
444 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
445 {
446         if (stringset == ETH_SS_STATS)
447                 memcpy(data, stats_strings, sizeof(stats_strings));
448 }
449
450 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
451                       u64 *data)
452 {
453         struct adapter *adapter = dev->priv;
454         struct cmac *mac = adapter->port[dev->if_port].mac;
455         const struct cmac_statistics *s;
456         const struct sge_intr_counts *t;
457         struct sge_port_stats ss;
458
459         s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
460
461         *data++ = s->TxOctetsOK;
462         *data++ = s->TxOctetsBad;
463         *data++ = s->TxUnicastFramesOK;
464         *data++ = s->TxMulticastFramesOK;
465         *data++ = s->TxBroadcastFramesOK;
466         *data++ = s->TxPauseFrames;
467         *data++ = s->TxFramesWithDeferredXmissions;
468         *data++ = s->TxLateCollisions;
469         *data++ = s->TxTotalCollisions;
470         *data++ = s->TxFramesAbortedDueToXSCollisions;
471         *data++ = s->TxUnderrun;
472         *data++ = s->TxLengthErrors;
473         *data++ = s->TxInternalMACXmitError;
474         *data++ = s->TxFramesWithExcessiveDeferral;
475         *data++ = s->TxFCSErrors;
476
477         *data++ = s->RxOctetsOK;
478         *data++ = s->RxOctetsBad;
479         *data++ = s->RxUnicastFramesOK;
480         *data++ = s->RxMulticastFramesOK;
481         *data++ = s->RxBroadcastFramesOK;
482         *data++ = s->RxPauseFrames;
483         *data++ = s->RxFCSErrors;
484         *data++ = s->RxAlignErrors;
485         *data++ = s->RxSymbolErrors;
486         *data++ = s->RxDataErrors;
487         *data++ = s->RxSequenceErrors;
488         *data++ = s->RxRuntErrors;
489         *data++ = s->RxJabberErrors;
490         *data++ = s->RxInternalMACRcvError;
491         *data++ = s->RxInRangeLengthErrors;
492         *data++ = s->RxOutOfRangeLengthField;
493         *data++ = s->RxFrameTooLongErrors;
494
495         t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
496         *data++ = ss.rx_packets;
497         *data++ = ss.rx_cso_good;
498         *data++ = ss.tx_packets;
499         *data++ = ss.tx_cso;
500         *data++ = ss.tx_tso;
501         *data++ = ss.vlan_xtract;
502         *data++ = ss.vlan_insert;
503
504         t = t1_sge_get_intr_counts(adapter->sge);
505         *data++ = t->rx_drops;
506         *data++ = t->pure_rsps;
507         *data++ = t->unhandled_irqs;
508         *data++ = t->respQ_empty;
509         *data++ = t->respQ_overflow;
510         *data++ = t->freelistQ_empty;
511         *data++ = t->pkt_too_big;
512         *data++ = t->pkt_mismatch;
513         *data++ = t->cmdQ_full[0];
514         *data++ = t->cmdQ_full[1];
515
516         if (adapter->espi) {
517                 const struct espi_intr_counts *e;
518
519                 e = t1_espi_get_intr_counts(adapter->espi);
520                 *data++ = e->DIP2_parity_err;
521                 *data++ = e->DIP4_err;
522                 *data++ = e->rx_drops;
523                 *data++ = e->tx_drops;
524                 *data++ = e->rx_ovflw;
525                 *data++ = e->parity_err;
526         }
527 }
528
529 static inline void reg_block_dump(struct adapter *ap, void *buf,
530                                   unsigned int start, unsigned int end)
531 {
532         u32 *p = buf + start;
533
534         for ( ; start <= end; start += sizeof(u32))
535                 *p++ = readl(ap->regs + start);
536 }
537
538 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
539                      void *buf)
540 {
541         struct adapter *ap = dev->priv;
542
543         /*
544          * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
545          */
546         regs->version = 2;
547
548         memset(buf, 0, T2_REGMAP_SIZE);
549         reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
550         reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
551         reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
552         reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
553         reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
554         reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
555         reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
556         reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
557         reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
558         reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
559 }
560
561 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
562 {
563         struct adapter *adapter = dev->priv;
564         struct port_info *p = &adapter->port[dev->if_port];
565
566         cmd->supported = p->link_config.supported;
567         cmd->advertising = p->link_config.advertising;
568
569         if (netif_carrier_ok(dev)) {
570                 cmd->speed = p->link_config.speed;
571                 cmd->duplex = p->link_config.duplex;
572         } else {
573                 cmd->speed = -1;
574                 cmd->duplex = -1;
575         }
576
577         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
578         cmd->phy_address = p->phy->addr;
579         cmd->transceiver = XCVR_EXTERNAL;
580         cmd->autoneg = p->link_config.autoneg;
581         cmd->maxtxpkt = 0;
582         cmd->maxrxpkt = 0;
583         return 0;
584 }
585
586 static int speed_duplex_to_caps(int speed, int duplex)
587 {
588         int cap = 0;
589
590         switch (speed) {
591         case SPEED_10:
592                 if (duplex == DUPLEX_FULL)
593                         cap = SUPPORTED_10baseT_Full;
594                 else
595                         cap = SUPPORTED_10baseT_Half;
596                 break;
597         case SPEED_100:
598                 if (duplex == DUPLEX_FULL)
599                         cap = SUPPORTED_100baseT_Full;
600                 else
601                         cap = SUPPORTED_100baseT_Half;
602                 break;
603         case SPEED_1000:
604                 if (duplex == DUPLEX_FULL)
605                         cap = SUPPORTED_1000baseT_Full;
606                 else
607                         cap = SUPPORTED_1000baseT_Half;
608                 break;
609         case SPEED_10000:
610                 if (duplex == DUPLEX_FULL)
611                         cap = SUPPORTED_10000baseT_Full;
612         }
613         return cap;
614 }
615
616 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
617                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
618                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
619                       ADVERTISED_10000baseT_Full)
620
621 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
622 {
623         struct adapter *adapter = dev->priv;
624         struct port_info *p = &adapter->port[dev->if_port];
625         struct link_config *lc = &p->link_config;
626
627         if (!(lc->supported & SUPPORTED_Autoneg))
628                 return -EOPNOTSUPP;             /* can't change speed/duplex */
629
630         if (cmd->autoneg == AUTONEG_DISABLE) {
631                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
632
633                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
634                         return -EINVAL;
635                 lc->requested_speed = cmd->speed;
636                 lc->requested_duplex = cmd->duplex;
637                 lc->advertising = 0;
638         } else {
639                 cmd->advertising &= ADVERTISED_MASK;
640                 if (cmd->advertising & (cmd->advertising - 1))
641                         cmd->advertising = lc->supported;
642                 cmd->advertising &= lc->supported;
643                 if (!cmd->advertising)
644                         return -EINVAL;
645                 lc->requested_speed = SPEED_INVALID;
646                 lc->requested_duplex = DUPLEX_INVALID;
647                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
648         }
649         lc->autoneg = cmd->autoneg;
650         if (netif_running(dev))
651                 t1_link_start(p->phy, p->mac, lc);
652         return 0;
653 }
654
655 static void get_pauseparam(struct net_device *dev,
656                            struct ethtool_pauseparam *epause)
657 {
658         struct adapter *adapter = dev->priv;
659         struct port_info *p = &adapter->port[dev->if_port];
660
661         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
662         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
663         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
664 }
665
666 static int set_pauseparam(struct net_device *dev,
667                           struct ethtool_pauseparam *epause)
668 {
669         struct adapter *adapter = dev->priv;
670         struct port_info *p = &adapter->port[dev->if_port];
671         struct link_config *lc = &p->link_config;
672
673         if (epause->autoneg == AUTONEG_DISABLE)
674                 lc->requested_fc = 0;
675         else if (lc->supported & SUPPORTED_Autoneg)
676                 lc->requested_fc = PAUSE_AUTONEG;
677         else
678                 return -EINVAL;
679
680         if (epause->rx_pause)
681                 lc->requested_fc |= PAUSE_RX;
682         if (epause->tx_pause)
683                 lc->requested_fc |= PAUSE_TX;
684         if (lc->autoneg == AUTONEG_ENABLE) {
685                 if (netif_running(dev))
686                         t1_link_start(p->phy, p->mac, lc);
687         } else {
688                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
689                 if (netif_running(dev))
690                         p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
691                                                          lc->fc);
692         }
693         return 0;
694 }
695
696 static u32 get_rx_csum(struct net_device *dev)
697 {
698         struct adapter *adapter = dev->priv;
699
700         return (adapter->flags & RX_CSUM_ENABLED) != 0;
701 }
702
703 static int set_rx_csum(struct net_device *dev, u32 data)
704 {
705         struct adapter *adapter = dev->priv;
706
707         if (data)
708                 adapter->flags |= RX_CSUM_ENABLED;
709         else
710                 adapter->flags &= ~RX_CSUM_ENABLED;
711         return 0;
712 }
713
714 static int set_tso(struct net_device *dev, u32 value)
715 {
716         struct adapter *adapter = dev->priv;
717
718         if (!(adapter->flags & TSO_CAPABLE))
719                 return value ? -EOPNOTSUPP : 0;
720         return ethtool_op_set_tso(dev, value);
721 }
722
723 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
724 {
725         struct adapter *adapter = dev->priv;
726         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
727
728         e->rx_max_pending = MAX_RX_BUFFERS;
729         e->rx_mini_max_pending = 0;
730         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
731         e->tx_max_pending = MAX_CMDQ_ENTRIES;
732
733         e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
734         e->rx_mini_pending = 0;
735         e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
736         e->tx_pending = adapter->params.sge.cmdQ_size[0];
737 }
738
739 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
740 {
741         struct adapter *adapter = dev->priv;
742         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
743
744         if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
745             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
746             e->tx_pending > MAX_CMDQ_ENTRIES ||
747             e->rx_pending < MIN_FL_ENTRIES ||
748             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
749             e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
750                 return -EINVAL;
751
752         if (adapter->flags & FULL_INIT_DONE)
753         return -EBUSY;
754
755         adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
756         adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
757         adapter->params.sge.cmdQ_size[0] = e->tx_pending;
758         adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
759                 MAX_CMDQ1_ENTRIES : e->tx_pending;
760         return 0;
761 }
762
763 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
764 {
765         struct adapter *adapter = dev->priv;
766
767         /*
768          * If RX coalescing is requested we use NAPI, otherwise interrupts.
769          * This choice can be made only when all ports and the TOE are off.
770          */
771         if (adapter->open_device_map == 0)
772                 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
773
774         if (adapter->params.sge.polling) {
775                 adapter->params.sge.rx_coalesce_usecs = 0;
776         } else {
777                 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
778         }
779         adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
780         adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
781         t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
782         return 0;
783 }
784
785 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
786 {
787         struct adapter *adapter = dev->priv;
788
789         c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
790         c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
791         c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
792         return 0;
793 }
794
795 static int get_eeprom_len(struct net_device *dev)
796 {
797         struct adapter *adapter = dev->priv;
798
799         return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
800 }
801
802 #define EEPROM_MAGIC(ap) \
803         (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
804
805 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
806                       u8 *data)
807 {
808         int i;
809         u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
810         struct adapter *adapter = dev->priv;
811
812         e->magic = EEPROM_MAGIC(adapter);
813         for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
814                 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
815         memcpy(data, buf + e->offset, e->len);
816         return 0;
817 }
818
819 static const struct ethtool_ops t1_ethtool_ops = {
820         .get_settings      = get_settings,
821         .set_settings      = set_settings,
822         .get_drvinfo       = get_drvinfo,
823         .get_msglevel      = get_msglevel,
824         .set_msglevel      = set_msglevel,
825         .get_ringparam     = get_sge_param,
826         .set_ringparam     = set_sge_param,
827         .get_coalesce      = get_coalesce,
828         .set_coalesce      = set_coalesce,
829         .get_eeprom_len    = get_eeprom_len,
830         .get_eeprom        = get_eeprom,
831         .get_pauseparam    = get_pauseparam,
832         .set_pauseparam    = set_pauseparam,
833         .get_rx_csum       = get_rx_csum,
834         .set_rx_csum       = set_rx_csum,
835         .get_tx_csum       = ethtool_op_get_tx_csum,
836         .set_tx_csum       = ethtool_op_set_tx_csum,
837         .get_sg            = ethtool_op_get_sg,
838         .set_sg            = ethtool_op_set_sg,
839         .get_link          = ethtool_op_get_link,
840         .get_strings       = get_strings,
841         .get_stats_count   = get_stats_count,
842         .get_ethtool_stats = get_stats,
843         .get_regs_len      = get_regs_len,
844         .get_regs          = get_regs,
845         .get_tso           = ethtool_op_get_tso,
846         .set_tso           = set_tso,
847 };
848
849 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
850 {
851         struct adapter *adapter = dev->priv;
852         struct mii_ioctl_data *data = if_mii(req);
853
854         switch (cmd) {
855         case SIOCGMIIPHY:
856                 data->phy_id = adapter->port[dev->if_port].phy->addr;
857                 /* FALLTHRU */
858         case SIOCGMIIREG: {
859                 struct cphy *phy = adapter->port[dev->if_port].phy;
860                 u32 val;
861
862                 if (!phy->mdio_read)
863             return -EOPNOTSUPP;
864                 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
865                                &val);
866                 data->val_out = val;
867                 break;
868         }
869         case SIOCSMIIREG: {
870                 struct cphy *phy = adapter->port[dev->if_port].phy;
871
872                 if (!capable(CAP_NET_ADMIN))
873                     return -EPERM;
874                 if (!phy->mdio_write)
875             return -EOPNOTSUPP;
876                 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
877                                 data->val_in);
878                 break;
879         }
880
881         default:
882                 return -EOPNOTSUPP;
883         }
884         return 0;
885 }
886
887 static int t1_change_mtu(struct net_device *dev, int new_mtu)
888 {
889         int ret;
890         struct adapter *adapter = dev->priv;
891         struct cmac *mac = adapter->port[dev->if_port].mac;
892
893         if (!mac->ops->set_mtu)
894         return -EOPNOTSUPP;
895         if (new_mtu < 68)
896         return -EINVAL;
897         if ((ret = mac->ops->set_mtu(mac, new_mtu)))
898                 return ret;
899         dev->mtu = new_mtu;
900         return 0;
901 }
902
903 static int t1_set_mac_addr(struct net_device *dev, void *p)
904 {
905         struct adapter *adapter = dev->priv;
906         struct cmac *mac = adapter->port[dev->if_port].mac;
907         struct sockaddr *addr = p;
908
909         if (!mac->ops->macaddress_set)
910                 return -EOPNOTSUPP;
911
912         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
913         mac->ops->macaddress_set(mac, dev->dev_addr);
914         return 0;
915 }
916
917 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
918 static void vlan_rx_register(struct net_device *dev,
919                                    struct vlan_group *grp)
920 {
921         struct adapter *adapter = dev->priv;
922
923         spin_lock_irq(&adapter->async_lock);
924         adapter->vlan_grp = grp;
925         t1_set_vlan_accel(adapter, grp != NULL);
926         spin_unlock_irq(&adapter->async_lock);
927 }
928
929 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
930 {
931         struct adapter *adapter = dev->priv;
932
933         spin_lock_irq(&adapter->async_lock);
934         if (adapter->vlan_grp)
935                 adapter->vlan_grp->vlan_devices[vid] = NULL;
936         spin_unlock_irq(&adapter->async_lock);
937 }
938 #endif
939
940 #ifdef CONFIG_NET_POLL_CONTROLLER
941 static void t1_netpoll(struct net_device *dev)
942 {
943         unsigned long flags;
944         struct adapter *adapter = dev->priv;
945
946         local_irq_save(flags);
947         t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter);
948         local_irq_restore(flags);
949 }
950 #endif
951
952 /*
953  * Periodic accumulation of MAC statistics.  This is used only if the MAC
954  * does not have any other way to prevent stats counter overflow.
955  */
956 static void mac_stats_task(struct work_struct *work)
957 {
958         int i;
959         struct adapter *adapter =
960                 container_of(work, struct adapter, stats_update_task.work);
961
962         for_each_port(adapter, i) {
963                 struct port_info *p = &adapter->port[i];
964
965                 if (netif_running(p->dev))
966                         p->mac->ops->statistics_update(p->mac,
967                                                        MAC_STATS_UPDATE_FAST);
968         }
969
970         /* Schedule the next statistics update if any port is active. */
971         spin_lock(&adapter->work_lock);
972         if (adapter->open_device_map & PORT_MASK)
973                 schedule_mac_stats_update(adapter,
974                                           adapter->params.stats_update_period);
975         spin_unlock(&adapter->work_lock);
976 }
977
978 /*
979  * Processes elmer0 external interrupts in process context.
980  */
981 static void ext_intr_task(struct work_struct *work)
982 {
983         struct adapter *adapter =
984                 container_of(work, struct adapter, ext_intr_handler_task);
985
986         t1_elmer0_ext_intr_handler(adapter);
987
988         /* Now reenable external interrupts */
989         spin_lock_irq(&adapter->async_lock);
990         adapter->slow_intr_mask |= F_PL_INTR_EXT;
991         writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
992         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
993                    adapter->regs + A_PL_ENABLE);
994         spin_unlock_irq(&adapter->async_lock);
995 }
996
997 /*
998  * Interrupt-context handler for elmer0 external interrupts.
999  */
1000 void t1_elmer0_ext_intr(struct adapter *adapter)
1001 {
1002         /*
1003          * Schedule a task to handle external interrupts as we require
1004          * a process context.  We disable EXT interrupts in the interim
1005          * and let the task reenable them when it's done.
1006          */
1007         adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
1008         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
1009                    adapter->regs + A_PL_ENABLE);
1010         schedule_work(&adapter->ext_intr_handler_task);
1011 }
1012
1013 void t1_fatal_err(struct adapter *adapter)
1014 {
1015         if (adapter->flags & FULL_INIT_DONE) {
1016                 t1_sge_stop(adapter->sge);
1017                 t1_interrupts_disable(adapter);
1018         }
1019         CH_ALERT("%s: encountered fatal error, operation suspended\n",
1020                  adapter->name);
1021 }
1022
1023 static int __devinit init_one(struct pci_dev *pdev,
1024                               const struct pci_device_id *ent)
1025 {
1026         static int version_printed;
1027
1028         int i, err, pci_using_dac = 0;
1029         unsigned long mmio_start, mmio_len;
1030         const struct board_info *bi;
1031         struct adapter *adapter = NULL;
1032         struct port_info *pi;
1033
1034         if (!version_printed) {
1035                 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1036                        DRV_VERSION);
1037                 ++version_printed;
1038         }
1039
1040         err = pci_enable_device(pdev);
1041         if (err)
1042                 return err;
1043
1044         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1045                 CH_ERR("%s: cannot find PCI device memory base address\n",
1046                        pci_name(pdev));
1047                 err = -ENODEV;
1048                 goto out_disable_pdev;
1049         }
1050
1051         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1052                 pci_using_dac = 1;
1053
1054                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1055                         CH_ERR("%s: unable to obtain 64-bit DMA for"
1056                                "consistent allocations\n", pci_name(pdev));
1057                         err = -ENODEV;
1058                         goto out_disable_pdev;
1059                 }
1060
1061         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1062                 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1063                 goto out_disable_pdev;
1064         }
1065
1066         err = pci_request_regions(pdev, DRV_NAME);
1067         if (err) {
1068                 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1069                 goto out_disable_pdev;
1070         }
1071
1072         pci_set_master(pdev);
1073
1074         mmio_start = pci_resource_start(pdev, 0);
1075         mmio_len = pci_resource_len(pdev, 0);
1076         bi = t1_get_board_info(ent->driver_data);
1077
1078         for (i = 0; i < bi->port_number; ++i) {
1079                 struct net_device *netdev;
1080
1081                 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1082                 if (!netdev) {
1083                         err = -ENOMEM;
1084                         goto out_free_dev;
1085                 }
1086
1087                 SET_MODULE_OWNER(netdev);
1088                 SET_NETDEV_DEV(netdev, &pdev->dev);
1089
1090                 if (!adapter) {
1091                         adapter = netdev->priv;
1092                         adapter->pdev = pdev;
1093                         adapter->port[0].dev = netdev;  /* so we don't leak it */
1094
1095                         adapter->regs = ioremap(mmio_start, mmio_len);
1096                         if (!adapter->regs) {
1097                                 CH_ERR("%s: cannot map device registers\n",
1098                                        pci_name(pdev));
1099                                 err = -ENOMEM;
1100                                 goto out_free_dev;
1101                         }
1102
1103                         if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1104                                 err = -ENODEV;    /* Can't handle this chip rev */
1105                                 goto out_free_dev;
1106                         }
1107
1108                         adapter->name = pci_name(pdev);
1109                         adapter->msg_enable = dflt_msg_enable;
1110                         adapter->mmio_len = mmio_len;
1111
1112                         spin_lock_init(&adapter->tpi_lock);
1113                         spin_lock_init(&adapter->work_lock);
1114                         spin_lock_init(&adapter->async_lock);
1115                         spin_lock_init(&adapter->mac_lock);
1116
1117                         INIT_WORK(&adapter->ext_intr_handler_task,
1118                                   ext_intr_task);
1119                         INIT_DELAYED_WORK(&adapter->stats_update_task,
1120                                           mac_stats_task);
1121
1122                         pci_set_drvdata(pdev, netdev);
1123                 }
1124
1125                 pi = &adapter->port[i];
1126                 pi->dev = netdev;
1127                 netif_carrier_off(netdev);
1128                 netdev->irq = pdev->irq;
1129                 netdev->if_port = i;
1130                 netdev->mem_start = mmio_start;
1131                 netdev->mem_end = mmio_start + mmio_len - 1;
1132                 netdev->priv = adapter;
1133                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1134                 netdev->features |= NETIF_F_LLTX;
1135
1136                 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1137                 if (pci_using_dac)
1138                         netdev->features |= NETIF_F_HIGHDMA;
1139                 if (vlan_tso_capable(adapter)) {
1140 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1141                         adapter->flags |= VLAN_ACCEL_CAPABLE;
1142                         netdev->features |=
1143                                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1144                         netdev->vlan_rx_register = vlan_rx_register;
1145                         netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1146 #endif
1147
1148                         /* T204: disable TSO */
1149                         if (!(is_T2(adapter)) || bi->port_number != 4) {
1150                                 adapter->flags |= TSO_CAPABLE;
1151                                 netdev->features |= NETIF_F_TSO;
1152                         }
1153                 }
1154
1155                 netdev->open = cxgb_open;
1156                 netdev->stop = cxgb_close;
1157                 netdev->hard_start_xmit = t1_start_xmit;
1158                 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1159                         sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1160                 netdev->get_stats = t1_get_stats;
1161                 netdev->set_multicast_list = t1_set_rxmode;
1162                 netdev->do_ioctl = t1_ioctl;
1163                 netdev->change_mtu = t1_change_mtu;
1164                 netdev->set_mac_address = t1_set_mac_addr;
1165 #ifdef CONFIG_NET_POLL_CONTROLLER
1166                 netdev->poll_controller = t1_netpoll;
1167 #endif
1168                 netdev->weight = 64;
1169
1170                 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1171         }
1172
1173         if (t1_init_sw_modules(adapter, bi) < 0) {
1174                 err = -ENODEV;
1175                 goto out_free_dev;
1176         }
1177
1178         /*
1179          * The card is now ready to go.  If any errors occur during device
1180          * registration we do not fail the whole card but rather proceed only
1181          * with the ports we manage to register successfully.  However we must
1182          * register at least one net device.
1183          */
1184         for (i = 0; i < bi->port_number; ++i) {
1185                 err = register_netdev(adapter->port[i].dev);
1186                 if (err)
1187                         CH_WARN("%s: cannot register net device %s, skipping\n",
1188                                 pci_name(pdev), adapter->port[i].dev->name);
1189                 else {
1190                         /*
1191                          * Change the name we use for messages to the name of
1192                          * the first successfully registered interface.
1193                          */
1194                         if (!adapter->registered_device_map)
1195                                 adapter->name = adapter->port[i].dev->name;
1196
1197                         __set_bit(i, &adapter->registered_device_map);
1198                 }
1199         }
1200         if (!adapter->registered_device_map) {
1201                 CH_ERR("%s: could not register any net devices\n",
1202                        pci_name(pdev));
1203                 goto out_release_adapter_res;
1204         }
1205
1206         printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1207                bi->desc, adapter->params.chip_revision,
1208                adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1209                adapter->params.pci.speed, adapter->params.pci.width);
1210
1211         /*
1212          * Set the T1B ASIC and memory clocks.
1213          */
1214         if (t1powersave)
1215                 adapter->t1powersave = LCLOCK;  /* HW default is powersave mode. */
1216         else
1217                 adapter->t1powersave = HCLOCK;
1218         if (t1_is_T1B(adapter))
1219                 t1_clock(adapter, t1powersave);
1220
1221         return 0;
1222
1223  out_release_adapter_res:
1224         t1_free_sw_modules(adapter);
1225  out_free_dev:
1226         if (adapter) {
1227                 if (adapter->regs)
1228                         iounmap(adapter->regs);
1229                 for (i = bi->port_number - 1; i >= 0; --i)
1230                         if (adapter->port[i].dev)
1231                                 free_netdev(adapter->port[i].dev);
1232         }
1233         pci_release_regions(pdev);
1234  out_disable_pdev:
1235         pci_disable_device(pdev);
1236         pci_set_drvdata(pdev, NULL);
1237         return err;
1238 }
1239
1240 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1241 {
1242         int data;
1243         int i;
1244         u32 val;
1245
1246         enum {
1247                 S_CLOCK = 1 << 3,
1248                 S_DATA = 1 << 4
1249         };
1250
1251         for (i = (nbits - 1); i > -1; i--) {
1252
1253                 udelay(50);
1254
1255                 data = ((bitdata >> i) & 0x1);
1256                 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1257
1258                 if (data)
1259                         val |= S_DATA;
1260                 else
1261                         val &= ~S_DATA;
1262
1263                 udelay(50);
1264
1265                 /* Set SCLOCK low */
1266                 val &= ~S_CLOCK;
1267                 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1268
1269                 udelay(50);
1270
1271                 /* Write SCLOCK high */
1272                 val |= S_CLOCK;
1273                 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1274
1275         }
1276 }
1277
1278 static int t1_clock(struct adapter *adapter, int mode)
1279 {
1280         u32 val;
1281         int M_CORE_VAL;
1282         int M_MEM_VAL;
1283
1284         enum {
1285                 M_CORE_BITS = 9,
1286                 T_CORE_VAL = 0,
1287                 T_CORE_BITS = 2,
1288                 N_CORE_VAL = 0,
1289                 N_CORE_BITS = 2,
1290                 M_MEM_BITS = 9,
1291                 T_MEM_VAL = 0,
1292                 T_MEM_BITS = 2,
1293                 N_MEM_VAL = 0,
1294                 N_MEM_BITS = 2,
1295                 NP_LOAD = 1 << 17,
1296                 S_LOAD_MEM = 1 << 5,
1297                 S_LOAD_CORE = 1 << 6,
1298                 S_CLOCK = 1 << 3
1299         };
1300
1301         if (!t1_is_T1B(adapter))
1302                 return -ENODEV; /* Can't re-clock this chip. */
1303
1304         if (mode & 2) {
1305                 return 0;       /* show current mode. */
1306         }
1307
1308         if ((adapter->t1powersave & 1) == (mode & 1))
1309                 return -EALREADY;       /* ASIC already running in mode. */
1310
1311         if ((mode & 1) == HCLOCK) {
1312                 M_CORE_VAL = 0x14;
1313                 M_MEM_VAL = 0x18;
1314                 adapter->t1powersave = HCLOCK;  /* overclock */
1315         } else {
1316                 M_CORE_VAL = 0xe;
1317                 M_MEM_VAL = 0x10;
1318                 adapter->t1powersave = LCLOCK;  /* underclock */
1319         }
1320
1321         /* Don't interrupt this serial stream! */
1322         spin_lock(&adapter->tpi_lock);
1323
1324         /* Initialize for ASIC core */
1325         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1326         val |= NP_LOAD;
1327         udelay(50);
1328         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1329         udelay(50);
1330         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1331         val &= ~S_LOAD_CORE;
1332         val &= ~S_CLOCK;
1333         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1334         udelay(50);
1335
1336         /* Serial program the ASIC clock synthesizer */
1337         bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1338         bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1339         bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1340         udelay(50);
1341
1342         /* Finish ASIC core */
1343         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1344         val |= S_LOAD_CORE;
1345         udelay(50);
1346         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1347         udelay(50);
1348         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1349         val &= ~S_LOAD_CORE;
1350         udelay(50);
1351         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1352         udelay(50);
1353
1354         /* Initialize for memory */
1355         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1356         val |= NP_LOAD;
1357         udelay(50);
1358         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1359         udelay(50);
1360         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1361         val &= ~S_LOAD_MEM;
1362         val &= ~S_CLOCK;
1363         udelay(50);
1364         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1365         udelay(50);
1366
1367         /* Serial program the memory clock synthesizer */
1368         bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1369         bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1370         bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1371         udelay(50);
1372
1373         /* Finish memory */
1374         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1375         val |= S_LOAD_MEM;
1376         udelay(50);
1377         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1378         udelay(50);
1379         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1380         val &= ~S_LOAD_MEM;
1381         udelay(50);
1382         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1383
1384         spin_unlock(&adapter->tpi_lock);
1385
1386         return 0;
1387 }
1388
1389 static inline void t1_sw_reset(struct pci_dev *pdev)
1390 {
1391         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1392         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1393 }
1394
1395 static void __devexit remove_one(struct pci_dev *pdev)
1396 {
1397         struct net_device *dev = pci_get_drvdata(pdev);
1398
1399         if (dev) {
1400                 int i;
1401                 struct adapter *adapter = dev->priv;
1402
1403                 for_each_port(adapter, i)
1404                         if (test_bit(i, &adapter->registered_device_map))
1405                                 unregister_netdev(adapter->port[i].dev);
1406
1407                 t1_free_sw_modules(adapter);
1408                 iounmap(adapter->regs);
1409                 while (--i >= 0)
1410                         if (adapter->port[i].dev)
1411                                 free_netdev(adapter->port[i].dev);
1412
1413                 pci_release_regions(pdev);
1414                 pci_disable_device(pdev);
1415                 pci_set_drvdata(pdev, NULL);
1416                 t1_sw_reset(pdev);
1417         }
1418 }
1419
1420 static struct pci_driver driver = {
1421         .name     = DRV_NAME,
1422         .id_table = t1_pci_tbl,
1423         .probe    = init_one,
1424         .remove   = __devexit_p(remove_one),
1425 };
1426
1427 static int __init t1_init_module(void)
1428 {
1429         return pci_register_driver(&driver);
1430 }
1431
1432 static void __exit t1_cleanup_module(void)
1433 {
1434         pci_unregister_driver(&driver);
1435 }
1436
1437 module_init(t1_init_module);
1438 module_exit(t1_cleanup_module);