Merge branch 'master' into for_paulus
[linux-drm-fsl-dcu.git] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <asm/uaccess.h>
46
47 #include "common.h"
48 #include "cxgb3_ioctl.h"
49 #include "regs.h"
50 #include "cxgb3_offload.h"
51 #include "version.h"
52
53 #include "cxgb3_ctl_defs.h"
54 #include "t3_cpl.h"
55 #include "firmware_exports.h"
56
57 enum {
58         MAX_TXQ_ENTRIES = 16384,
59         MAX_CTRL_TXQ_ENTRIES = 1024,
60         MAX_RSPQ_ENTRIES = 16384,
61         MAX_RX_BUFFERS = 16384,
62         MAX_RX_JUMBO_BUFFERS = 16384,
63         MIN_TXQ_ENTRIES = 4,
64         MIN_CTRL_TXQ_ENTRIES = 4,
65         MIN_RSPQ_ENTRIES = 32,
66         MIN_FL_ENTRIES = 32
67 };
68
69 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
70
71 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
72                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
73                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
74
75 #define EEPROM_MAGIC 0x38E2F10C
76
77 #define CH_DEVICE(devid, ssid, idx) \
78         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
79
80 static const struct pci_device_id cxgb3_pci_tbl[] = {
81         CH_DEVICE(0x20, 1, 0),  /* PE9000 */
82         CH_DEVICE(0x21, 1, 1),  /* T302E */
83         CH_DEVICE(0x22, 1, 2),  /* T310E */
84         CH_DEVICE(0x23, 1, 3),  /* T320X */
85         CH_DEVICE(0x24, 1, 1),  /* T302X */
86         CH_DEVICE(0x25, 1, 3),  /* T320E */
87         CH_DEVICE(0x26, 1, 2),  /* T310X */
88         CH_DEVICE(0x30, 1, 2),  /* T3B10 */
89         CH_DEVICE(0x31, 1, 3),  /* T3B20 */
90         CH_DEVICE(0x32, 1, 1),  /* T3B02 */
91         {0,}
92 };
93
94 MODULE_DESCRIPTION(DRV_DESC);
95 MODULE_AUTHOR("Chelsio Communications");
96 MODULE_LICENSE("Dual BSD/GPL");
97 MODULE_VERSION(DRV_VERSION);
98 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
99
100 static int dflt_msg_enable = DFLT_MSG_ENABLE;
101
102 module_param(dflt_msg_enable, int, 0644);
103 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
104
105 /*
106  * The driver uses the best interrupt scheme available on a platform in the
107  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
108  * of these schemes the driver may consider as follows:
109  *
110  * msi = 2: choose from among all three options
111  * msi = 1: only consider MSI and pin interrupts
112  * msi = 0: force pin interrupts
113  */
114 static int msi = 2;
115
116 module_param(msi, int, 0644);
117 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
118
119 /*
120  * The driver enables offload as a default.
121  * To disable it, use ofld_disable = 1.
122  */
123
124 static int ofld_disable = 0;
125
126 module_param(ofld_disable, int, 0644);
127 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
128
129 /*
130  * We have work elements that we need to cancel when an interface is taken
131  * down.  Normally the work elements would be executed by keventd but that
132  * can deadlock because of linkwatch.  If our close method takes the rtnl
133  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
134  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
135  * for our work to complete.  Get our own work queue to solve this.
136  */
137 static struct workqueue_struct *cxgb3_wq;
138
139 /**
140  *      link_report - show link status and link speed/duplex
141  *      @p: the port whose settings are to be reported
142  *
143  *      Shows the link status, speed, and duplex of a port.
144  */
145 static void link_report(struct net_device *dev)
146 {
147         if (!netif_carrier_ok(dev))
148                 printk(KERN_INFO "%s: link down\n", dev->name);
149         else {
150                 const char *s = "10Mbps";
151                 const struct port_info *p = netdev_priv(dev);
152
153                 switch (p->link_config.speed) {
154                 case SPEED_10000:
155                         s = "10Gbps";
156                         break;
157                 case SPEED_1000:
158                         s = "1000Mbps";
159                         break;
160                 case SPEED_100:
161                         s = "100Mbps";
162                         break;
163                 }
164
165                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
166                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
167         }
168 }
169
170 /**
171  *      t3_os_link_changed - handle link status changes
172  *      @adapter: the adapter associated with the link change
173  *      @port_id: the port index whose limk status has changed
174  *      @link_stat: the new status of the link
175  *      @speed: the new speed setting
176  *      @duplex: the new duplex setting
177  *      @pause: the new flow-control setting
178  *
179  *      This is the OS-dependent handler for link status changes.  The OS
180  *      neutral handler takes care of most of the processing for these events,
181  *      then calls this handler for any OS-specific processing.
182  */
183 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
184                         int speed, int duplex, int pause)
185 {
186         struct net_device *dev = adapter->port[port_id];
187
188         /* Skip changes from disabled ports. */
189         if (!netif_running(dev))
190                 return;
191
192         if (link_stat != netif_carrier_ok(dev)) {
193                 if (link_stat)
194                         netif_carrier_on(dev);
195                 else
196                         netif_carrier_off(dev);
197                 link_report(dev);
198         }
199 }
200
201 static void cxgb_set_rxmode(struct net_device *dev)
202 {
203         struct t3_rx_mode rm;
204         struct port_info *pi = netdev_priv(dev);
205
206         init_rx_mode(&rm, dev, dev->mc_list);
207         t3_mac_set_rx_mode(&pi->mac, &rm);
208 }
209
210 /**
211  *      link_start - enable a port
212  *      @dev: the device to enable
213  *
214  *      Performs the MAC and PHY actions needed to enable a port.
215  */
216 static void link_start(struct net_device *dev)
217 {
218         struct t3_rx_mode rm;
219         struct port_info *pi = netdev_priv(dev);
220         struct cmac *mac = &pi->mac;
221
222         init_rx_mode(&rm, dev, dev->mc_list);
223         t3_mac_reset(mac);
224         t3_mac_set_mtu(mac, dev->mtu);
225         t3_mac_set_address(mac, 0, dev->dev_addr);
226         t3_mac_set_rx_mode(mac, &rm);
227         t3_link_start(&pi->phy, mac, &pi->link_config);
228         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
229 }
230
231 static inline void cxgb_disable_msi(struct adapter *adapter)
232 {
233         if (adapter->flags & USING_MSIX) {
234                 pci_disable_msix(adapter->pdev);
235                 adapter->flags &= ~USING_MSIX;
236         } else if (adapter->flags & USING_MSI) {
237                 pci_disable_msi(adapter->pdev);
238                 adapter->flags &= ~USING_MSI;
239         }
240 }
241
242 /*
243  * Interrupt handler for asynchronous events used with MSI-X.
244  */
245 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
246 {
247         t3_slow_intr_handler(cookie);
248         return IRQ_HANDLED;
249 }
250
251 /*
252  * Name the MSI-X interrupts.
253  */
254 static void name_msix_vecs(struct adapter *adap)
255 {
256         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
257
258         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
259         adap->msix_info[0].desc[n] = 0;
260
261         for_each_port(adap, j) {
262                 struct net_device *d = adap->port[j];
263                 const struct port_info *pi = netdev_priv(d);
264
265                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
266                         snprintf(adap->msix_info[msi_idx].desc, n,
267                                  "%s (queue %d)", d->name, i);
268                         adap->msix_info[msi_idx].desc[n] = 0;
269                 }
270         }
271 }
272
273 static int request_msix_data_irqs(struct adapter *adap)
274 {
275         int i, j, err, qidx = 0;
276
277         for_each_port(adap, i) {
278                 int nqsets = adap2pinfo(adap, i)->nqsets;
279
280                 for (j = 0; j < nqsets; ++j) {
281                         err = request_irq(adap->msix_info[qidx + 1].vec,
282                                           t3_intr_handler(adap,
283                                                           adap->sge.qs[qidx].
284                                                           rspq.polling), 0,
285                                           adap->msix_info[qidx + 1].desc,
286                                           &adap->sge.qs[qidx]);
287                         if (err) {
288                                 while (--qidx >= 0)
289                                         free_irq(adap->msix_info[qidx + 1].vec,
290                                                  &adap->sge.qs[qidx]);
291                                 return err;
292                         }
293                         qidx++;
294                 }
295         }
296         return 0;
297 }
298
299 /**
300  *      setup_rss - configure RSS
301  *      @adap: the adapter
302  *
303  *      Sets up RSS to distribute packets to multiple receive queues.  We
304  *      configure the RSS CPU lookup table to distribute to the number of HW
305  *      receive queues, and the response queue lookup table to narrow that
306  *      down to the response queues actually configured for each port.
307  *      We always configure the RSS mapping for two ports since the mapping
308  *      table has plenty of entries.
309  */
310 static void setup_rss(struct adapter *adap)
311 {
312         int i;
313         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
314         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
315         u8 cpus[SGE_QSETS + 1];
316         u16 rspq_map[RSS_TABLE_SIZE];
317
318         for (i = 0; i < SGE_QSETS; ++i)
319                 cpus[i] = i;
320         cpus[SGE_QSETS] = 0xff; /* terminator */
321
322         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
323                 rspq_map[i] = i % nq0;
324                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
325         }
326
327         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
328                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
329                       V_RRCPLCPUSIZE(6), cpus, rspq_map);
330 }
331
332 /*
333  * If we have multiple receive queues per port serviced by NAPI we need one
334  * netdevice per queue as NAPI operates on netdevices.  We already have one
335  * netdevice, namely the one associated with the interface, so we use dummy
336  * ones for any additional queues.  Note that these netdevices exist purely
337  * so that NAPI has something to work with, they do not represent network
338  * ports and are not registered.
339  */
340 static int init_dummy_netdevs(struct adapter *adap)
341 {
342         int i, j, dummy_idx = 0;
343         struct net_device *nd;
344
345         for_each_port(adap, i) {
346                 struct net_device *dev = adap->port[i];
347                 const struct port_info *pi = netdev_priv(dev);
348
349                 for (j = 0; j < pi->nqsets - 1; j++) {
350                         if (!adap->dummy_netdev[dummy_idx]) {
351                                 nd = alloc_netdev(0, "", ether_setup);
352                                 if (!nd)
353                                         goto free_all;
354
355                                 nd->priv = adap;
356                                 nd->weight = 64;
357                                 set_bit(__LINK_STATE_START, &nd->state);
358                                 adap->dummy_netdev[dummy_idx] = nd;
359                         }
360                         strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
361                         dummy_idx++;
362                 }
363         }
364         return 0;
365
366 free_all:
367         while (--dummy_idx >= 0) {
368                 free_netdev(adap->dummy_netdev[dummy_idx]);
369                 adap->dummy_netdev[dummy_idx] = NULL;
370         }
371         return -ENOMEM;
372 }
373
374 /*
375  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
376  * both netdevices representing interfaces and the dummy ones for the extra
377  * queues.
378  */
379 static void quiesce_rx(struct adapter *adap)
380 {
381         int i;
382         struct net_device *dev;
383
384         for_each_port(adap, i) {
385                 dev = adap->port[i];
386                 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
387                         msleep(1);
388         }
389
390         for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
391                 dev = adap->dummy_netdev[i];
392                 if (dev)
393                         while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
394                                 msleep(1);
395         }
396 }
397
398 /**
399  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
400  *      @adap: the adapter
401  *
402  *      Determines how many sets of SGE queues to use and initializes them.
403  *      We support multiple queue sets per port if we have MSI-X, otherwise
404  *      just one queue set per port.
405  */
406 static int setup_sge_qsets(struct adapter *adap)
407 {
408         int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
409         unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
410
411         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
412                 irq_idx = -1;
413
414         for_each_port(adap, i) {
415                 struct net_device *dev = adap->port[i];
416                 const struct port_info *pi = netdev_priv(dev);
417
418                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
419                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
420                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
421                                                              irq_idx,
422                                 &adap->params.sge.qset[qset_idx], ntxq,
423                                 j == 0 ? dev :
424                                          adap-> dummy_netdev[dummy_dev_idx++]);
425                         if (err) {
426                                 t3_free_sge_resources(adap);
427                                 return err;
428                         }
429                 }
430         }
431
432         return 0;
433 }
434
435 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
436                          char *buf,
437                          ssize_t(*format) (struct adapter *, char *))
438 {
439         ssize_t len;
440         struct adapter *adap = to_net_dev(d)->priv;
441
442         /* Synchronize with ioctls that may shut down the device */
443         rtnl_lock();
444         len = (*format) (adap, buf);
445         rtnl_unlock();
446         return len;
447 }
448
449 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
450                           const char *buf, size_t len,
451                           ssize_t(*set) (struct adapter *, unsigned int),
452                           unsigned int min_val, unsigned int max_val)
453 {
454         char *endp;
455         ssize_t ret;
456         unsigned int val;
457         struct adapter *adap = to_net_dev(d)->priv;
458
459         if (!capable(CAP_NET_ADMIN))
460                 return -EPERM;
461
462         val = simple_strtoul(buf, &endp, 0);
463         if (endp == buf || val < min_val || val > max_val)
464                 return -EINVAL;
465
466         rtnl_lock();
467         ret = (*set) (adap, val);
468         if (!ret)
469                 ret = len;
470         rtnl_unlock();
471         return ret;
472 }
473
474 #define CXGB3_SHOW(name, val_expr) \
475 static ssize_t format_##name(struct adapter *adap, char *buf) \
476 { \
477         return sprintf(buf, "%u\n", val_expr); \
478 } \
479 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
480                            char *buf) \
481 { \
482         return attr_show(d, attr, buf, format_##name); \
483 }
484
485 static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
486 {
487         if (adap->flags & FULL_INIT_DONE)
488                 return -EBUSY;
489         if (val && adap->params.rev == 0)
490                 return -EINVAL;
491         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
492                 return -EINVAL;
493         adap->params.mc5.nfilters = val;
494         return 0;
495 }
496
497 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
498                               const char *buf, size_t len)
499 {
500         return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
501 }
502
503 static ssize_t set_nservers(struct adapter *adap, unsigned int val)
504 {
505         if (adap->flags & FULL_INIT_DONE)
506                 return -EBUSY;
507         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
508                 return -EINVAL;
509         adap->params.mc5.nservers = val;
510         return 0;
511 }
512
513 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
514                               const char *buf, size_t len)
515 {
516         return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
517 }
518
519 #define CXGB3_ATTR_R(name, val_expr) \
520 CXGB3_SHOW(name, val_expr) \
521 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
522
523 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
524 CXGB3_SHOW(name, val_expr) \
525 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
526
527 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
528 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
529 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
530
531 static struct attribute *cxgb3_attrs[] = {
532         &dev_attr_cam_size.attr,
533         &dev_attr_nfilters.attr,
534         &dev_attr_nservers.attr,
535         NULL
536 };
537
538 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
539
540 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
541                             char *buf, int sched)
542 {
543         ssize_t len;
544         unsigned int v, addr, bpt, cpt;
545         struct adapter *adap = to_net_dev(d)->priv;
546
547         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
548         rtnl_lock();
549         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
550         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
551         if (sched & 1)
552                 v >>= 16;
553         bpt = (v >> 8) & 0xff;
554         cpt = v & 0xff;
555         if (!cpt)
556                 len = sprintf(buf, "disabled\n");
557         else {
558                 v = (adap->params.vpd.cclk * 1000) / cpt;
559                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
560         }
561         rtnl_unlock();
562         return len;
563 }
564
565 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
566                              const char *buf, size_t len, int sched)
567 {
568         char *endp;
569         ssize_t ret;
570         unsigned int val;
571         struct adapter *adap = to_net_dev(d)->priv;
572
573         if (!capable(CAP_NET_ADMIN))
574                 return -EPERM;
575
576         val = simple_strtoul(buf, &endp, 0);
577         if (endp == buf || val > 10000000)
578                 return -EINVAL;
579
580         rtnl_lock();
581         ret = t3_config_sched(adap, val, sched);
582         if (!ret)
583                 ret = len;
584         rtnl_unlock();
585         return ret;
586 }
587
588 #define TM_ATTR(name, sched) \
589 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
590                            char *buf) \
591 { \
592         return tm_attr_show(d, attr, buf, sched); \
593 } \
594 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
595                             const char *buf, size_t len) \
596 { \
597         return tm_attr_store(d, attr, buf, len, sched); \
598 } \
599 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
600
601 TM_ATTR(sched0, 0);
602 TM_ATTR(sched1, 1);
603 TM_ATTR(sched2, 2);
604 TM_ATTR(sched3, 3);
605 TM_ATTR(sched4, 4);
606 TM_ATTR(sched5, 5);
607 TM_ATTR(sched6, 6);
608 TM_ATTR(sched7, 7);
609
610 static struct attribute *offload_attrs[] = {
611         &dev_attr_sched0.attr,
612         &dev_attr_sched1.attr,
613         &dev_attr_sched2.attr,
614         &dev_attr_sched3.attr,
615         &dev_attr_sched4.attr,
616         &dev_attr_sched5.attr,
617         &dev_attr_sched6.attr,
618         &dev_attr_sched7.attr,
619         NULL
620 };
621
622 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
623
624 /*
625  * Sends an sk_buff to an offload queue driver
626  * after dealing with any active network taps.
627  */
628 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
629 {
630         int ret;
631
632         local_bh_disable();
633         ret = t3_offload_tx(tdev, skb);
634         local_bh_enable();
635         return ret;
636 }
637
638 static int write_smt_entry(struct adapter *adapter, int idx)
639 {
640         struct cpl_smt_write_req *req;
641         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
642
643         if (!skb)
644                 return -ENOMEM;
645
646         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
647         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
648         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
649         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
650         req->iff = idx;
651         memset(req->src_mac1, 0, sizeof(req->src_mac1));
652         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
653         skb->priority = 1;
654         offload_tx(&adapter->tdev, skb);
655         return 0;
656 }
657
658 static int init_smt(struct adapter *adapter)
659 {
660         int i;
661
662         for_each_port(adapter, i)
663             write_smt_entry(adapter, i);
664         return 0;
665 }
666
667 static void init_port_mtus(struct adapter *adapter)
668 {
669         unsigned int mtus = adapter->port[0]->mtu;
670
671         if (adapter->port[1])
672                 mtus |= adapter->port[1]->mtu << 16;
673         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
674 }
675
676 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
677                               int hi, int port)
678 {
679         struct sk_buff *skb;
680         struct mngt_pktsched_wr *req;
681
682         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
683         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
684         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
685         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
686         req->sched = sched;
687         req->idx = qidx;
688         req->min = lo;
689         req->max = hi;
690         req->binding = port;
691         t3_mgmt_tx(adap, skb);
692 }
693
694 static void bind_qsets(struct adapter *adap)
695 {
696         int i, j;
697
698         for_each_port(adap, i) {
699                 const struct port_info *pi = adap2pinfo(adap, i);
700
701                 for (j = 0; j < pi->nqsets; ++j)
702                         send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
703                                           -1, i);
704         }
705 }
706
707 /**
708  *      cxgb_up - enable the adapter
709  *      @adapter: adapter being enabled
710  *
711  *      Called when the first port is enabled, this function performs the
712  *      actions necessary to make an adapter operational, such as completing
713  *      the initialization of HW modules, and enabling interrupts.
714  *
715  *      Must be called with the rtnl lock held.
716  */
717 static int cxgb_up(struct adapter *adap)
718 {
719         int err = 0;
720
721         if (!(adap->flags & FULL_INIT_DONE)) {
722                 err = t3_check_fw_version(adap);
723                 if (err)
724                         goto out;
725
726                 err = init_dummy_netdevs(adap);
727                 if (err)
728                         goto out;
729
730                 err = t3_init_hw(adap, 0);
731                 if (err)
732                         goto out;
733
734                 err = setup_sge_qsets(adap);
735                 if (err)
736                         goto out;
737
738                 setup_rss(adap);
739                 adap->flags |= FULL_INIT_DONE;
740         }
741
742         t3_intr_clear(adap);
743
744         if (adap->flags & USING_MSIX) {
745                 name_msix_vecs(adap);
746                 err = request_irq(adap->msix_info[0].vec,
747                                   t3_async_intr_handler, 0,
748                                   adap->msix_info[0].desc, adap);
749                 if (err)
750                         goto irq_err;
751
752                 if (request_msix_data_irqs(adap)) {
753                         free_irq(adap->msix_info[0].vec, adap);
754                         goto irq_err;
755                 }
756         } else if ((err = request_irq(adap->pdev->irq,
757                                       t3_intr_handler(adap,
758                                                       adap->sge.qs[0].rspq.
759                                                       polling),
760                                       (adap->flags & USING_MSI) ? 0 : SA_SHIRQ,
761                                       adap->name, adap)))
762                 goto irq_err;
763
764         t3_sge_start(adap);
765         t3_intr_enable(adap);
766
767         if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
768                 bind_qsets(adap);
769         adap->flags |= QUEUES_BOUND;
770
771 out:
772         return err;
773 irq_err:
774         CH_ERR(adap, "request_irq failed, err %d\n", err);
775         goto out;
776 }
777
778 /*
779  * Release resources when all the ports and offloading have been stopped.
780  */
781 static void cxgb_down(struct adapter *adapter)
782 {
783         t3_sge_stop(adapter);
784         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
785         t3_intr_disable(adapter);
786         spin_unlock_irq(&adapter->work_lock);
787
788         if (adapter->flags & USING_MSIX) {
789                 int i, n = 0;
790
791                 free_irq(adapter->msix_info[0].vec, adapter);
792                 for_each_port(adapter, i)
793                     n += adap2pinfo(adapter, i)->nqsets;
794
795                 for (i = 0; i < n; ++i)
796                         free_irq(adapter->msix_info[i + 1].vec,
797                                  &adapter->sge.qs[i]);
798         } else
799                 free_irq(adapter->pdev->irq, adapter);
800
801         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
802         quiesce_rx(adapter);
803 }
804
805 static void schedule_chk_task(struct adapter *adap)
806 {
807         unsigned int timeo;
808
809         timeo = adap->params.linkpoll_period ?
810             (HZ * adap->params.linkpoll_period) / 10 :
811             adap->params.stats_update_period * HZ;
812         if (timeo)
813                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
814 }
815
816 static int offload_open(struct net_device *dev)
817 {
818         struct adapter *adapter = dev->priv;
819         struct t3cdev *tdev = T3CDEV(dev);
820         int adap_up = adapter->open_device_map & PORT_MASK;
821         int err = 0;
822
823         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
824                 return 0;
825
826         if (!adap_up && (err = cxgb_up(adapter)) < 0)
827                 return err;
828
829         t3_tp_set_offload_mode(adapter, 1);
830         tdev->lldev = adapter->port[0];
831         err = cxgb3_offload_activate(adapter);
832         if (err)
833                 goto out;
834
835         init_port_mtus(adapter);
836         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
837                      adapter->params.b_wnd,
838                      adapter->params.rev == 0 ?
839                      adapter->port[0]->mtu : 0xffff);
840         init_smt(adapter);
841
842         /* Never mind if the next step fails */
843         sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
844
845         /* Call back all registered clients */
846         cxgb3_add_clients(tdev);
847
848 out:
849         /* restore them in case the offload module has changed them */
850         if (err) {
851                 t3_tp_set_offload_mode(adapter, 0);
852                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
853                 cxgb3_set_dummy_ops(tdev);
854         }
855         return err;
856 }
857
858 static int offload_close(struct t3cdev *tdev)
859 {
860         struct adapter *adapter = tdev2adap(tdev);
861
862         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
863                 return 0;
864
865         /* Call back all registered clients */
866         cxgb3_remove_clients(tdev);
867
868         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
869
870         tdev->lldev = NULL;
871         cxgb3_set_dummy_ops(tdev);
872         t3_tp_set_offload_mode(adapter, 0);
873         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
874
875         if (!adapter->open_device_map)
876                 cxgb_down(adapter);
877
878         cxgb3_offload_deactivate(adapter);
879         return 0;
880 }
881
882 static int cxgb_open(struct net_device *dev)
883 {
884         int err;
885         struct adapter *adapter = dev->priv;
886         struct port_info *pi = netdev_priv(dev);
887         int other_ports = adapter->open_device_map & PORT_MASK;
888
889         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
890                 return err;
891
892         set_bit(pi->port_id, &adapter->open_device_map);
893         if (!ofld_disable) {
894                 err = offload_open(dev);
895                 if (err)
896                         printk(KERN_WARNING
897                                "Could not initialize offload capabilities\n");
898         }
899
900         link_start(dev);
901         t3_port_intr_enable(adapter, pi->port_id);
902         netif_start_queue(dev);
903         if (!other_ports)
904                 schedule_chk_task(adapter);
905
906         return 0;
907 }
908
909 static int cxgb_close(struct net_device *dev)
910 {
911         struct adapter *adapter = dev->priv;
912         struct port_info *p = netdev_priv(dev);
913
914         t3_port_intr_disable(adapter, p->port_id);
915         netif_stop_queue(dev);
916         p->phy.ops->power_down(&p->phy, 1);
917         netif_carrier_off(dev);
918         t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
919
920         spin_lock(&adapter->work_lock); /* sync with update task */
921         clear_bit(p->port_id, &adapter->open_device_map);
922         spin_unlock(&adapter->work_lock);
923
924         if (!(adapter->open_device_map & PORT_MASK))
925                 cancel_rearming_delayed_workqueue(cxgb3_wq,
926                                                   &adapter->adap_check_task);
927
928         if (!adapter->open_device_map)
929                 cxgb_down(adapter);
930
931         return 0;
932 }
933
934 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
935 {
936         struct adapter *adapter = dev->priv;
937         struct port_info *p = netdev_priv(dev);
938         struct net_device_stats *ns = &p->netstats;
939         const struct mac_stats *pstats;
940
941         spin_lock(&adapter->stats_lock);
942         pstats = t3_mac_update_stats(&p->mac);
943         spin_unlock(&adapter->stats_lock);
944
945         ns->tx_bytes = pstats->tx_octets;
946         ns->tx_packets = pstats->tx_frames;
947         ns->rx_bytes = pstats->rx_octets;
948         ns->rx_packets = pstats->rx_frames;
949         ns->multicast = pstats->rx_mcast_frames;
950
951         ns->tx_errors = pstats->tx_underrun;
952         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
953             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
954             pstats->rx_fifo_ovfl;
955
956         /* detailed rx_errors */
957         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
958         ns->rx_over_errors = 0;
959         ns->rx_crc_errors = pstats->rx_fcs_errs;
960         ns->rx_frame_errors = pstats->rx_symbol_errs;
961         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
962         ns->rx_missed_errors = pstats->rx_cong_drops;
963
964         /* detailed tx_errors */
965         ns->tx_aborted_errors = 0;
966         ns->tx_carrier_errors = 0;
967         ns->tx_fifo_errors = pstats->tx_underrun;
968         ns->tx_heartbeat_errors = 0;
969         ns->tx_window_errors = 0;
970         return ns;
971 }
972
973 static u32 get_msglevel(struct net_device *dev)
974 {
975         struct adapter *adapter = dev->priv;
976
977         return adapter->msg_enable;
978 }
979
980 static void set_msglevel(struct net_device *dev, u32 val)
981 {
982         struct adapter *adapter = dev->priv;
983
984         adapter->msg_enable = val;
985 }
986
987 static char stats_strings[][ETH_GSTRING_LEN] = {
988         "TxOctetsOK         ",
989         "TxFramesOK         ",
990         "TxMulticastFramesOK",
991         "TxBroadcastFramesOK",
992         "TxPauseFrames      ",
993         "TxUnderrun         ",
994         "TxExtUnderrun      ",
995
996         "TxFrames64         ",
997         "TxFrames65To127    ",
998         "TxFrames128To255   ",
999         "TxFrames256To511   ",
1000         "TxFrames512To1023  ",
1001         "TxFrames1024To1518 ",
1002         "TxFrames1519ToMax  ",
1003
1004         "RxOctetsOK         ",
1005         "RxFramesOK         ",
1006         "RxMulticastFramesOK",
1007         "RxBroadcastFramesOK",
1008         "RxPauseFrames      ",
1009         "RxFCSErrors        ",
1010         "RxSymbolErrors     ",
1011         "RxShortErrors      ",
1012         "RxJabberErrors     ",
1013         "RxLengthErrors     ",
1014         "RxFIFOoverflow     ",
1015
1016         "RxFrames64         ",
1017         "RxFrames65To127    ",
1018         "RxFrames128To255   ",
1019         "RxFrames256To511   ",
1020         "RxFrames512To1023  ",
1021         "RxFrames1024To1518 ",
1022         "RxFrames1519ToMax  ",
1023
1024         "PhyFIFOErrors      ",
1025         "TSO                ",
1026         "VLANextractions    ",
1027         "VLANinsertions     ",
1028         "TxCsumOffload      ",
1029         "RxCsumGood         ",
1030         "RxDrops            "
1031 };
1032
1033 static int get_stats_count(struct net_device *dev)
1034 {
1035         return ARRAY_SIZE(stats_strings);
1036 }
1037
1038 #define T3_REGMAP_SIZE (3 * 1024)
1039
1040 static int get_regs_len(struct net_device *dev)
1041 {
1042         return T3_REGMAP_SIZE;
1043 }
1044
1045 static int get_eeprom_len(struct net_device *dev)
1046 {
1047         return EEPROMSIZE;
1048 }
1049
1050 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1051 {
1052         u32 fw_vers = 0;
1053         struct adapter *adapter = dev->priv;
1054
1055         t3_get_fw_version(adapter, &fw_vers);
1056
1057         strcpy(info->driver, DRV_NAME);
1058         strcpy(info->version, DRV_VERSION);
1059         strcpy(info->bus_info, pci_name(adapter->pdev));
1060         if (!fw_vers)
1061                 strcpy(info->fw_version, "N/A");
1062         else {
1063                 snprintf(info->fw_version, sizeof(info->fw_version),
1064                          "%s %u.%u.%u",
1065                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1066                          G_FW_VERSION_MAJOR(fw_vers),
1067                          G_FW_VERSION_MINOR(fw_vers),
1068                          G_FW_VERSION_MICRO(fw_vers));
1069         }
1070 }
1071
1072 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1073 {
1074         if (stringset == ETH_SS_STATS)
1075                 memcpy(data, stats_strings, sizeof(stats_strings));
1076 }
1077
1078 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1079                                             struct port_info *p, int idx)
1080 {
1081         int i;
1082         unsigned long tot = 0;
1083
1084         for (i = 0; i < p->nqsets; ++i)
1085                 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1086         return tot;
1087 }
1088
1089 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1090                       u64 *data)
1091 {
1092         struct adapter *adapter = dev->priv;
1093         struct port_info *pi = netdev_priv(dev);
1094         const struct mac_stats *s;
1095
1096         spin_lock(&adapter->stats_lock);
1097         s = t3_mac_update_stats(&pi->mac);
1098         spin_unlock(&adapter->stats_lock);
1099
1100         *data++ = s->tx_octets;
1101         *data++ = s->tx_frames;
1102         *data++ = s->tx_mcast_frames;
1103         *data++ = s->tx_bcast_frames;
1104         *data++ = s->tx_pause;
1105         *data++ = s->tx_underrun;
1106         *data++ = s->tx_fifo_urun;
1107
1108         *data++ = s->tx_frames_64;
1109         *data++ = s->tx_frames_65_127;
1110         *data++ = s->tx_frames_128_255;
1111         *data++ = s->tx_frames_256_511;
1112         *data++ = s->tx_frames_512_1023;
1113         *data++ = s->tx_frames_1024_1518;
1114         *data++ = s->tx_frames_1519_max;
1115
1116         *data++ = s->rx_octets;
1117         *data++ = s->rx_frames;
1118         *data++ = s->rx_mcast_frames;
1119         *data++ = s->rx_bcast_frames;
1120         *data++ = s->rx_pause;
1121         *data++ = s->rx_fcs_errs;
1122         *data++ = s->rx_symbol_errs;
1123         *data++ = s->rx_short;
1124         *data++ = s->rx_jabber;
1125         *data++ = s->rx_too_long;
1126         *data++ = s->rx_fifo_ovfl;
1127
1128         *data++ = s->rx_frames_64;
1129         *data++ = s->rx_frames_65_127;
1130         *data++ = s->rx_frames_128_255;
1131         *data++ = s->rx_frames_256_511;
1132         *data++ = s->rx_frames_512_1023;
1133         *data++ = s->rx_frames_1024_1518;
1134         *data++ = s->rx_frames_1519_max;
1135
1136         *data++ = pi->phy.fifo_errors;
1137
1138         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1139         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1140         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1141         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1142         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1143         *data++ = s->rx_cong_drops;
1144 }
1145
1146 static inline void reg_block_dump(struct adapter *ap, void *buf,
1147                                   unsigned int start, unsigned int end)
1148 {
1149         u32 *p = buf + start;
1150
1151         for (; start <= end; start += sizeof(u32))
1152                 *p++ = t3_read_reg(ap, start);
1153 }
1154
1155 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1156                      void *buf)
1157 {
1158         struct adapter *ap = dev->priv;
1159
1160         /*
1161          * Version scheme:
1162          * bits 0..9: chip version
1163          * bits 10..15: chip revision
1164          * bit 31: set for PCIe cards
1165          */
1166         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1167
1168         /*
1169          * We skip the MAC statistics registers because they are clear-on-read.
1170          * Also reading multi-register stats would need to synchronize with the
1171          * periodic mac stats accumulation.  Hard to justify the complexity.
1172          */
1173         memset(buf, 0, T3_REGMAP_SIZE);
1174         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1175         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1176         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1177         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1178         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1179         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1180                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1181         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1182                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1183 }
1184
1185 static int restart_autoneg(struct net_device *dev)
1186 {
1187         struct port_info *p = netdev_priv(dev);
1188
1189         if (!netif_running(dev))
1190                 return -EAGAIN;
1191         if (p->link_config.autoneg != AUTONEG_ENABLE)
1192                 return -EINVAL;
1193         p->phy.ops->autoneg_restart(&p->phy);
1194         return 0;
1195 }
1196
1197 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1198 {
1199         int i;
1200         struct adapter *adapter = dev->priv;
1201
1202         if (data == 0)
1203                 data = 2;
1204
1205         for (i = 0; i < data * 2; i++) {
1206                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1207                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1208                 if (msleep_interruptible(500))
1209                         break;
1210         }
1211         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1212                          F_GPIO0_OUT_VAL);
1213         return 0;
1214 }
1215
1216 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1217 {
1218         struct port_info *p = netdev_priv(dev);
1219
1220         cmd->supported = p->link_config.supported;
1221         cmd->advertising = p->link_config.advertising;
1222
1223         if (netif_carrier_ok(dev)) {
1224                 cmd->speed = p->link_config.speed;
1225                 cmd->duplex = p->link_config.duplex;
1226         } else {
1227                 cmd->speed = -1;
1228                 cmd->duplex = -1;
1229         }
1230
1231         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1232         cmd->phy_address = p->phy.addr;
1233         cmd->transceiver = XCVR_EXTERNAL;
1234         cmd->autoneg = p->link_config.autoneg;
1235         cmd->maxtxpkt = 0;
1236         cmd->maxrxpkt = 0;
1237         return 0;
1238 }
1239
1240 static int speed_duplex_to_caps(int speed, int duplex)
1241 {
1242         int cap = 0;
1243
1244         switch (speed) {
1245         case SPEED_10:
1246                 if (duplex == DUPLEX_FULL)
1247                         cap = SUPPORTED_10baseT_Full;
1248                 else
1249                         cap = SUPPORTED_10baseT_Half;
1250                 break;
1251         case SPEED_100:
1252                 if (duplex == DUPLEX_FULL)
1253                         cap = SUPPORTED_100baseT_Full;
1254                 else
1255                         cap = SUPPORTED_100baseT_Half;
1256                 break;
1257         case SPEED_1000:
1258                 if (duplex == DUPLEX_FULL)
1259                         cap = SUPPORTED_1000baseT_Full;
1260                 else
1261                         cap = SUPPORTED_1000baseT_Half;
1262                 break;
1263         case SPEED_10000:
1264                 if (duplex == DUPLEX_FULL)
1265                         cap = SUPPORTED_10000baseT_Full;
1266         }
1267         return cap;
1268 }
1269
1270 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1271                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1272                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1273                       ADVERTISED_10000baseT_Full)
1274
1275 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1276 {
1277         struct port_info *p = netdev_priv(dev);
1278         struct link_config *lc = &p->link_config;
1279
1280         if (!(lc->supported & SUPPORTED_Autoneg))
1281                 return -EOPNOTSUPP;     /* can't change speed/duplex */
1282
1283         if (cmd->autoneg == AUTONEG_DISABLE) {
1284                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1285
1286                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1287                         return -EINVAL;
1288                 lc->requested_speed = cmd->speed;
1289                 lc->requested_duplex = cmd->duplex;
1290                 lc->advertising = 0;
1291         } else {
1292                 cmd->advertising &= ADVERTISED_MASK;
1293                 cmd->advertising &= lc->supported;
1294                 if (!cmd->advertising)
1295                         return -EINVAL;
1296                 lc->requested_speed = SPEED_INVALID;
1297                 lc->requested_duplex = DUPLEX_INVALID;
1298                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1299         }
1300         lc->autoneg = cmd->autoneg;
1301         if (netif_running(dev))
1302                 t3_link_start(&p->phy, &p->mac, lc);
1303         return 0;
1304 }
1305
1306 static void get_pauseparam(struct net_device *dev,
1307                            struct ethtool_pauseparam *epause)
1308 {
1309         struct port_info *p = netdev_priv(dev);
1310
1311         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1312         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1313         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1314 }
1315
1316 static int set_pauseparam(struct net_device *dev,
1317                           struct ethtool_pauseparam *epause)
1318 {
1319         struct port_info *p = netdev_priv(dev);
1320         struct link_config *lc = &p->link_config;
1321
1322         if (epause->autoneg == AUTONEG_DISABLE)
1323                 lc->requested_fc = 0;
1324         else if (lc->supported & SUPPORTED_Autoneg)
1325                 lc->requested_fc = PAUSE_AUTONEG;
1326         else
1327                 return -EINVAL;
1328
1329         if (epause->rx_pause)
1330                 lc->requested_fc |= PAUSE_RX;
1331         if (epause->tx_pause)
1332                 lc->requested_fc |= PAUSE_TX;
1333         if (lc->autoneg == AUTONEG_ENABLE) {
1334                 if (netif_running(dev))
1335                         t3_link_start(&p->phy, &p->mac, lc);
1336         } else {
1337                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1338                 if (netif_running(dev))
1339                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1340         }
1341         return 0;
1342 }
1343
1344 static u32 get_rx_csum(struct net_device *dev)
1345 {
1346         struct port_info *p = netdev_priv(dev);
1347
1348         return p->rx_csum_offload;
1349 }
1350
1351 static int set_rx_csum(struct net_device *dev, u32 data)
1352 {
1353         struct port_info *p = netdev_priv(dev);
1354
1355         p->rx_csum_offload = data;
1356         return 0;
1357 }
1358
1359 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1360 {
1361         struct adapter *adapter = dev->priv;
1362
1363         e->rx_max_pending = MAX_RX_BUFFERS;
1364         e->rx_mini_max_pending = 0;
1365         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1366         e->tx_max_pending = MAX_TXQ_ENTRIES;
1367
1368         e->rx_pending = adapter->params.sge.qset[0].fl_size;
1369         e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1370         e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1371         e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1372 }
1373
1374 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1375 {
1376         int i;
1377         struct adapter *adapter = dev->priv;
1378
1379         if (e->rx_pending > MAX_RX_BUFFERS ||
1380             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1381             e->tx_pending > MAX_TXQ_ENTRIES ||
1382             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1383             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1384             e->rx_pending < MIN_FL_ENTRIES ||
1385             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1386             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1387                 return -EINVAL;
1388
1389         if (adapter->flags & FULL_INIT_DONE)
1390                 return -EBUSY;
1391
1392         for (i = 0; i < SGE_QSETS; ++i) {
1393                 struct qset_params *q = &adapter->params.sge.qset[i];
1394
1395                 q->rspq_size = e->rx_mini_pending;
1396                 q->fl_size = e->rx_pending;
1397                 q->jumbo_size = e->rx_jumbo_pending;
1398                 q->txq_size[0] = e->tx_pending;
1399                 q->txq_size[1] = e->tx_pending;
1400                 q->txq_size[2] = e->tx_pending;
1401         }
1402         return 0;
1403 }
1404
1405 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1406 {
1407         struct adapter *adapter = dev->priv;
1408         struct qset_params *qsp = &adapter->params.sge.qset[0];
1409         struct sge_qset *qs = &adapter->sge.qs[0];
1410
1411         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1412                 return -EINVAL;
1413
1414         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1415         t3_update_qset_coalesce(qs, qsp);
1416         return 0;
1417 }
1418
1419 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1420 {
1421         struct adapter *adapter = dev->priv;
1422         struct qset_params *q = adapter->params.sge.qset;
1423
1424         c->rx_coalesce_usecs = q->coalesce_usecs;
1425         return 0;
1426 }
1427
1428 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1429                       u8 * data)
1430 {
1431         int i, err = 0;
1432         struct adapter *adapter = dev->priv;
1433
1434         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1435         if (!buf)
1436                 return -ENOMEM;
1437
1438         e->magic = EEPROM_MAGIC;
1439         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1440                 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1441
1442         if (!err)
1443                 memcpy(data, buf + e->offset, e->len);
1444         kfree(buf);
1445         return err;
1446 }
1447
1448 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1449                       u8 * data)
1450 {
1451         u8 *buf;
1452         int err = 0;
1453         u32 aligned_offset, aligned_len, *p;
1454         struct adapter *adapter = dev->priv;
1455
1456         if (eeprom->magic != EEPROM_MAGIC)
1457                 return -EINVAL;
1458
1459         aligned_offset = eeprom->offset & ~3;
1460         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1461
1462         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1463                 buf = kmalloc(aligned_len, GFP_KERNEL);
1464                 if (!buf)
1465                         return -ENOMEM;
1466                 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1467                 if (!err && aligned_len > 4)
1468                         err = t3_seeprom_read(adapter,
1469                                               aligned_offset + aligned_len - 4,
1470                                               (u32 *) & buf[aligned_len - 4]);
1471                 if (err)
1472                         goto out;
1473                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1474         } else
1475                 buf = data;
1476
1477         err = t3_seeprom_wp(adapter, 0);
1478         if (err)
1479                 goto out;
1480
1481         for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1482                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1483                 aligned_offset += 4;
1484         }
1485
1486         if (!err)
1487                 err = t3_seeprom_wp(adapter, 1);
1488 out:
1489         if (buf != data)
1490                 kfree(buf);
1491         return err;
1492 }
1493
1494 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1495 {
1496         wol->supported = 0;
1497         wol->wolopts = 0;
1498         memset(&wol->sopass, 0, sizeof(wol->sopass));
1499 }
1500
1501 static const struct ethtool_ops cxgb_ethtool_ops = {
1502         .get_settings = get_settings,
1503         .set_settings = set_settings,
1504         .get_drvinfo = get_drvinfo,
1505         .get_msglevel = get_msglevel,
1506         .set_msglevel = set_msglevel,
1507         .get_ringparam = get_sge_param,
1508         .set_ringparam = set_sge_param,
1509         .get_coalesce = get_coalesce,
1510         .set_coalesce = set_coalesce,
1511         .get_eeprom_len = get_eeprom_len,
1512         .get_eeprom = get_eeprom,
1513         .set_eeprom = set_eeprom,
1514         .get_pauseparam = get_pauseparam,
1515         .set_pauseparam = set_pauseparam,
1516         .get_rx_csum = get_rx_csum,
1517         .set_rx_csum = set_rx_csum,
1518         .get_tx_csum = ethtool_op_get_tx_csum,
1519         .set_tx_csum = ethtool_op_set_tx_csum,
1520         .get_sg = ethtool_op_get_sg,
1521         .set_sg = ethtool_op_set_sg,
1522         .get_link = ethtool_op_get_link,
1523         .get_strings = get_strings,
1524         .phys_id = cxgb3_phys_id,
1525         .nway_reset = restart_autoneg,
1526         .get_stats_count = get_stats_count,
1527         .get_ethtool_stats = get_stats,
1528         .get_regs_len = get_regs_len,
1529         .get_regs = get_regs,
1530         .get_wol = get_wol,
1531         .get_tso = ethtool_op_get_tso,
1532         .set_tso = ethtool_op_set_tso,
1533         .get_perm_addr = ethtool_op_get_perm_addr
1534 };
1535
1536 static int in_range(int val, int lo, int hi)
1537 {
1538         return val < 0 || (val <= hi && val >= lo);
1539 }
1540
1541 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1542 {
1543         int ret;
1544         u32 cmd;
1545         struct adapter *adapter = dev->priv;
1546
1547         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1548                 return -EFAULT;
1549
1550         switch (cmd) {
1551         case CHELSIO_SETREG:{
1552                 struct ch_reg edata;
1553
1554                 if (!capable(CAP_NET_ADMIN))
1555                         return -EPERM;
1556                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1557                         return -EFAULT;
1558                 if ((edata.addr & 3) != 0
1559                         || edata.addr >= adapter->mmio_len)
1560                         return -EINVAL;
1561                 writel(edata.val, adapter->regs + edata.addr);
1562                 break;
1563         }
1564         case CHELSIO_GETREG:{
1565                 struct ch_reg edata;
1566
1567                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1568                         return -EFAULT;
1569                 if ((edata.addr & 3) != 0
1570                         || edata.addr >= adapter->mmio_len)
1571                         return -EINVAL;
1572                 edata.val = readl(adapter->regs + edata.addr);
1573                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1574                         return -EFAULT;
1575                 break;
1576         }
1577         case CHELSIO_SET_QSET_PARAMS:{
1578                 int i;
1579                 struct qset_params *q;
1580                 struct ch_qset_params t;
1581
1582                 if (!capable(CAP_NET_ADMIN))
1583                         return -EPERM;
1584                 if (copy_from_user(&t, useraddr, sizeof(t)))
1585                         return -EFAULT;
1586                 if (t.qset_idx >= SGE_QSETS)
1587                         return -EINVAL;
1588                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1589                         !in_range(t.cong_thres, 0, 255) ||
1590                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1591                                 MAX_TXQ_ENTRIES) ||
1592                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1593                                 MAX_TXQ_ENTRIES) ||
1594                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1595                                 MAX_CTRL_TXQ_ENTRIES) ||
1596                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1597                                 MAX_RX_BUFFERS)
1598                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1599                                         MAX_RX_JUMBO_BUFFERS)
1600                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1601                                         MAX_RSPQ_ENTRIES))
1602                         return -EINVAL;
1603                 if ((adapter->flags & FULL_INIT_DONE) &&
1604                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1605                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1606                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1607                         t.polling >= 0 || t.cong_thres >= 0))
1608                         return -EBUSY;
1609
1610                 q = &adapter->params.sge.qset[t.qset_idx];
1611
1612                 if (t.rspq_size >= 0)
1613                         q->rspq_size = t.rspq_size;
1614                 if (t.fl_size[0] >= 0)
1615                         q->fl_size = t.fl_size[0];
1616                 if (t.fl_size[1] >= 0)
1617                         q->jumbo_size = t.fl_size[1];
1618                 if (t.txq_size[0] >= 0)
1619                         q->txq_size[0] = t.txq_size[0];
1620                 if (t.txq_size[1] >= 0)
1621                         q->txq_size[1] = t.txq_size[1];
1622                 if (t.txq_size[2] >= 0)
1623                         q->txq_size[2] = t.txq_size[2];
1624                 if (t.cong_thres >= 0)
1625                         q->cong_thres = t.cong_thres;
1626                 if (t.intr_lat >= 0) {
1627                         struct sge_qset *qs =
1628                                 &adapter->sge.qs[t.qset_idx];
1629
1630                         q->coalesce_usecs = t.intr_lat;
1631                         t3_update_qset_coalesce(qs, q);
1632                 }
1633                 if (t.polling >= 0) {
1634                         if (adapter->flags & USING_MSIX)
1635                                 q->polling = t.polling;
1636                         else {
1637                                 /* No polling with INTx for T3A */
1638                                 if (adapter->params.rev == 0 &&
1639                                         !(adapter->flags & USING_MSI))
1640                                         t.polling = 0;
1641
1642                                 for (i = 0; i < SGE_QSETS; i++) {
1643                                         q = &adapter->params.sge.
1644                                                 qset[i];
1645                                         q->polling = t.polling;
1646                                 }
1647                         }
1648                 }
1649                 break;
1650         }
1651         case CHELSIO_GET_QSET_PARAMS:{
1652                 struct qset_params *q;
1653                 struct ch_qset_params t;
1654
1655                 if (copy_from_user(&t, useraddr, sizeof(t)))
1656                         return -EFAULT;
1657                 if (t.qset_idx >= SGE_QSETS)
1658                         return -EINVAL;
1659
1660                 q = &adapter->params.sge.qset[t.qset_idx];
1661                 t.rspq_size = q->rspq_size;
1662                 t.txq_size[0] = q->txq_size[0];
1663                 t.txq_size[1] = q->txq_size[1];
1664                 t.txq_size[2] = q->txq_size[2];
1665                 t.fl_size[0] = q->fl_size;
1666                 t.fl_size[1] = q->jumbo_size;
1667                 t.polling = q->polling;
1668                 t.intr_lat = q->coalesce_usecs;
1669                 t.cong_thres = q->cong_thres;
1670
1671                 if (copy_to_user(useraddr, &t, sizeof(t)))
1672                         return -EFAULT;
1673                 break;
1674         }
1675         case CHELSIO_SET_QSET_NUM:{
1676                 struct ch_reg edata;
1677                 struct port_info *pi = netdev_priv(dev);
1678                 unsigned int i, first_qset = 0, other_qsets = 0;
1679
1680                 if (!capable(CAP_NET_ADMIN))
1681                         return -EPERM;
1682                 if (adapter->flags & FULL_INIT_DONE)
1683                         return -EBUSY;
1684                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1685                         return -EFAULT;
1686                 if (edata.val < 1 ||
1687                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1688                         return -EINVAL;
1689
1690                 for_each_port(adapter, i)
1691                         if (adapter->port[i] && adapter->port[i] != dev)
1692                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
1693
1694                 if (edata.val + other_qsets > SGE_QSETS)
1695                         return -EINVAL;
1696
1697                 pi->nqsets = edata.val;
1698
1699                 for_each_port(adapter, i)
1700                         if (adapter->port[i]) {
1701                                 pi = adap2pinfo(adapter, i);
1702                                 pi->first_qset = first_qset;
1703                                 first_qset += pi->nqsets;
1704                         }
1705                 break;
1706         }
1707         case CHELSIO_GET_QSET_NUM:{
1708                 struct ch_reg edata;
1709                 struct port_info *pi = netdev_priv(dev);
1710
1711                 edata.cmd = CHELSIO_GET_QSET_NUM;
1712                 edata.val = pi->nqsets;
1713                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1714                         return -EFAULT;
1715                 break;
1716         }
1717         case CHELSIO_LOAD_FW:{
1718                 u8 *fw_data;
1719                 struct ch_mem_range t;
1720
1721                 if (!capable(CAP_NET_ADMIN))
1722                         return -EPERM;
1723                 if (copy_from_user(&t, useraddr, sizeof(t)))
1724                         return -EFAULT;
1725
1726                 fw_data = kmalloc(t.len, GFP_KERNEL);
1727                 if (!fw_data)
1728                         return -ENOMEM;
1729
1730                 if (copy_from_user
1731                         (fw_data, useraddr + sizeof(t), t.len)) {
1732                         kfree(fw_data);
1733                         return -EFAULT;
1734                 }
1735
1736                 ret = t3_load_fw(adapter, fw_data, t.len);
1737                 kfree(fw_data);
1738                 if (ret)
1739                         return ret;
1740                 break;
1741         }
1742         case CHELSIO_SETMTUTAB:{
1743                 struct ch_mtus m;
1744                 int i;
1745
1746                 if (!is_offload(adapter))
1747                         return -EOPNOTSUPP;
1748                 if (!capable(CAP_NET_ADMIN))
1749                         return -EPERM;
1750                 if (offload_running(adapter))
1751                         return -EBUSY;
1752                 if (copy_from_user(&m, useraddr, sizeof(m)))
1753                         return -EFAULT;
1754                 if (m.nmtus != NMTUS)
1755                         return -EINVAL;
1756                 if (m.mtus[0] < 81)     /* accommodate SACK */
1757                         return -EINVAL;
1758
1759                 /* MTUs must be in ascending order */
1760                 for (i = 1; i < NMTUS; ++i)
1761                         if (m.mtus[i] < m.mtus[i - 1])
1762                                 return -EINVAL;
1763
1764                 memcpy(adapter->params.mtus, m.mtus,
1765                         sizeof(adapter->params.mtus));
1766                 break;
1767         }
1768         case CHELSIO_GET_PM:{
1769                 struct tp_params *p = &adapter->params.tp;
1770                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1771
1772                 if (!is_offload(adapter))
1773                         return -EOPNOTSUPP;
1774                 m.tx_pg_sz = p->tx_pg_size;
1775                 m.tx_num_pg = p->tx_num_pgs;
1776                 m.rx_pg_sz = p->rx_pg_size;
1777                 m.rx_num_pg = p->rx_num_pgs;
1778                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1779                 if (copy_to_user(useraddr, &m, sizeof(m)))
1780                         return -EFAULT;
1781                 break;
1782         }
1783         case CHELSIO_SET_PM:{
1784                 struct ch_pm m;
1785                 struct tp_params *p = &adapter->params.tp;
1786
1787                 if (!is_offload(adapter))
1788                         return -EOPNOTSUPP;
1789                 if (!capable(CAP_NET_ADMIN))
1790                         return -EPERM;
1791                 if (adapter->flags & FULL_INIT_DONE)
1792                         return -EBUSY;
1793                 if (copy_from_user(&m, useraddr, sizeof(m)))
1794                         return -EFAULT;
1795                 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1796                         !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1797                         return -EINVAL; /* not power of 2 */
1798                 if (!(m.rx_pg_sz & 0x14000))
1799                         return -EINVAL; /* not 16KB or 64KB */
1800                 if (!(m.tx_pg_sz & 0x1554000))
1801                         return -EINVAL;
1802                 if (m.tx_num_pg == -1)
1803                         m.tx_num_pg = p->tx_num_pgs;
1804                 if (m.rx_num_pg == -1)
1805                         m.rx_num_pg = p->rx_num_pgs;
1806                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1807                         return -EINVAL;
1808                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1809                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1810                         return -EINVAL;
1811                 p->rx_pg_size = m.rx_pg_sz;
1812                 p->tx_pg_size = m.tx_pg_sz;
1813                 p->rx_num_pgs = m.rx_num_pg;
1814                 p->tx_num_pgs = m.tx_num_pg;
1815                 break;
1816         }
1817         case CHELSIO_GET_MEM:{
1818                 struct ch_mem_range t;
1819                 struct mc7 *mem;
1820                 u64 buf[32];
1821
1822                 if (!is_offload(adapter))
1823                         return -EOPNOTSUPP;
1824                 if (!(adapter->flags & FULL_INIT_DONE))
1825                         return -EIO;    /* need the memory controllers */
1826                 if (copy_from_user(&t, useraddr, sizeof(t)))
1827                         return -EFAULT;
1828                 if ((t.addr & 7) || (t.len & 7))
1829                         return -EINVAL;
1830                 if (t.mem_id == MEM_CM)
1831                         mem = &adapter->cm;
1832                 else if (t.mem_id == MEM_PMRX)
1833                         mem = &adapter->pmrx;
1834                 else if (t.mem_id == MEM_PMTX)
1835                         mem = &adapter->pmtx;
1836                 else
1837                         return -EINVAL;
1838
1839                 /*
1840                         * Version scheme:
1841                         * bits 0..9: chip version
1842                         * bits 10..15: chip revision
1843                         */
1844                 t.version = 3 | (adapter->params.rev << 10);
1845                 if (copy_to_user(useraddr, &t, sizeof(t)))
1846                         return -EFAULT;
1847
1848                 /*
1849                  * Read 256 bytes at a time as len can be large and we don't
1850                  * want to use huge intermediate buffers.
1851                  */
1852                 useraddr += sizeof(t);  /* advance to start of buffer */
1853                 while (t.len) {
1854                         unsigned int chunk =
1855                                 min_t(unsigned int, t.len, sizeof(buf));
1856
1857                         ret =
1858                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1859                                                 buf);
1860                         if (ret)
1861                                 return ret;
1862                         if (copy_to_user(useraddr, buf, chunk))
1863                                 return -EFAULT;
1864                         useraddr += chunk;
1865                         t.addr += chunk;
1866                         t.len -= chunk;
1867                 }
1868                 break;
1869         }
1870         case CHELSIO_SET_TRACE_FILTER:{
1871                 struct ch_trace t;
1872                 const struct trace_params *tp;
1873
1874                 if (!capable(CAP_NET_ADMIN))
1875                         return -EPERM;
1876                 if (!offload_running(adapter))
1877                         return -EAGAIN;
1878                 if (copy_from_user(&t, useraddr, sizeof(t)))
1879                         return -EFAULT;
1880
1881                 tp = (const struct trace_params *)&t.sip;
1882                 if (t.config_tx)
1883                         t3_config_trace_filter(adapter, tp, 0,
1884                                                 t.invert_match,
1885                                                 t.trace_tx);
1886                 if (t.config_rx)
1887                         t3_config_trace_filter(adapter, tp, 1,
1888                                                 t.invert_match,
1889                                                 t.trace_rx);
1890                 break;
1891         }
1892         case CHELSIO_SET_PKTSCHED:{
1893                 struct ch_pktsched_params p;
1894
1895                 if (!capable(CAP_NET_ADMIN))
1896                                 return -EPERM;
1897                 if (!adapter->open_device_map)
1898                                 return -EAGAIN; /* uP and SGE must be running */
1899                 if (copy_from_user(&p, useraddr, sizeof(p)))
1900                                 return -EFAULT;
1901                 send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
1902                                   p.binding);
1903                 break;
1904                         
1905         }
1906         default:
1907                 return -EOPNOTSUPP;
1908         }
1909         return 0;
1910 }
1911
1912 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1913 {
1914         int ret, mmd;
1915         struct adapter *adapter = dev->priv;
1916         struct port_info *pi = netdev_priv(dev);
1917         struct mii_ioctl_data *data = if_mii(req);
1918
1919         switch (cmd) {
1920         case SIOCGMIIPHY:
1921                 data->phy_id = pi->phy.addr;
1922                 /* FALLTHRU */
1923         case SIOCGMIIREG:{
1924                 u32 val;
1925                 struct cphy *phy = &pi->phy;
1926
1927                 if (!phy->mdio_read)
1928                         return -EOPNOTSUPP;
1929                 if (is_10G(adapter)) {
1930                         mmd = data->phy_id >> 8;
1931                         if (!mmd)
1932                                 mmd = MDIO_DEV_PCS;
1933                         else if (mmd > MDIO_DEV_XGXS)
1934                                 return -EINVAL;
1935
1936                         ret =
1937                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
1938                                                 mmd, data->reg_num, &val);
1939                 } else
1940                         ret =
1941                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
1942                                                 0, data->reg_num & 0x1f,
1943                                                 &val);
1944                 if (!ret)
1945                         data->val_out = val;
1946                 break;
1947         }
1948         case SIOCSMIIREG:{
1949                 struct cphy *phy = &pi->phy;
1950
1951                 if (!capable(CAP_NET_ADMIN))
1952                         return -EPERM;
1953                 if (!phy->mdio_write)
1954                         return -EOPNOTSUPP;
1955                 if (is_10G(adapter)) {
1956                         mmd = data->phy_id >> 8;
1957                         if (!mmd)
1958                                 mmd = MDIO_DEV_PCS;
1959                         else if (mmd > MDIO_DEV_XGXS)
1960                                 return -EINVAL;
1961
1962                         ret =
1963                                 phy->mdio_write(adapter,
1964                                                 data->phy_id & 0x1f, mmd,
1965                                                 data->reg_num,
1966                                                 data->val_in);
1967                 } else
1968                         ret =
1969                                 phy->mdio_write(adapter,
1970                                                 data->phy_id & 0x1f, 0,
1971                                                 data->reg_num & 0x1f,
1972                                                 data->val_in);
1973                 break;
1974         }
1975         case SIOCCHIOCTL:
1976                 return cxgb_extension_ioctl(dev, req->ifr_data);
1977         default:
1978                 return -EOPNOTSUPP;
1979         }
1980         return ret;
1981 }
1982
1983 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1984 {
1985         int ret;
1986         struct adapter *adapter = dev->priv;
1987         struct port_info *pi = netdev_priv(dev);
1988
1989         if (new_mtu < 81)       /* accommodate SACK */
1990                 return -EINVAL;
1991         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1992                 return ret;
1993         dev->mtu = new_mtu;
1994         init_port_mtus(adapter);
1995         if (adapter->params.rev == 0 && offload_running(adapter))
1996                 t3_load_mtus(adapter, adapter->params.mtus,
1997                              adapter->params.a_wnd, adapter->params.b_wnd,
1998                              adapter->port[0]->mtu);
1999         return 0;
2000 }
2001
2002 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2003 {
2004         struct adapter *adapter = dev->priv;
2005         struct port_info *pi = netdev_priv(dev);
2006         struct sockaddr *addr = p;
2007
2008         if (!is_valid_ether_addr(addr->sa_data))
2009                 return -EINVAL;
2010
2011         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2012         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2013         if (offload_running(adapter))
2014                 write_smt_entry(adapter, pi->port_id);
2015         return 0;
2016 }
2017
2018 /**
2019  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2020  * @adap: the adapter
2021  * @p: the port
2022  *
2023  * Ensures that current Rx processing on any of the queues associated with
2024  * the given port completes before returning.  We do this by acquiring and
2025  * releasing the locks of the response queues associated with the port.
2026  */
2027 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2028 {
2029         int i;
2030
2031         for (i = 0; i < p->nqsets; i++) {
2032                 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2033
2034                 spin_lock_irq(&q->lock);
2035                 spin_unlock_irq(&q->lock);
2036         }
2037 }
2038
2039 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2040 {
2041         struct adapter *adapter = dev->priv;
2042         struct port_info *pi = netdev_priv(dev);
2043
2044         pi->vlan_grp = grp;
2045         if (adapter->params.rev > 0)
2046                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2047         else {
2048                 /* single control for all ports */
2049                 unsigned int i, have_vlans = 0;
2050                 for_each_port(adapter, i)
2051                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2052
2053                 t3_set_vlan_accel(adapter, 1, have_vlans);
2054         }
2055         t3_synchronize_rx(adapter, pi);
2056 }
2057
2058 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2059 {
2060         /* nothing */
2061 }
2062
2063 #ifdef CONFIG_NET_POLL_CONTROLLER
2064 static void cxgb_netpoll(struct net_device *dev)
2065 {
2066         struct adapter *adapter = dev->priv;
2067         struct sge_qset *qs = dev2qset(dev);
2068
2069         t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2070                                                     adapter);
2071 }
2072 #endif
2073
2074 /*
2075  * Periodic accumulation of MAC statistics.
2076  */
2077 static void mac_stats_update(struct adapter *adapter)
2078 {
2079         int i;
2080
2081         for_each_port(adapter, i) {
2082                 struct net_device *dev = adapter->port[i];
2083                 struct port_info *p = netdev_priv(dev);
2084
2085                 if (netif_running(dev)) {
2086                         spin_lock(&adapter->stats_lock);
2087                         t3_mac_update_stats(&p->mac);
2088                         spin_unlock(&adapter->stats_lock);
2089                 }
2090         }
2091 }
2092
2093 static void check_link_status(struct adapter *adapter)
2094 {
2095         int i;
2096
2097         for_each_port(adapter, i) {
2098                 struct net_device *dev = adapter->port[i];
2099                 struct port_info *p = netdev_priv(dev);
2100
2101                 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2102                         t3_link_changed(adapter, i);
2103         }
2104 }
2105
2106 static void t3_adap_check_task(struct work_struct *work)
2107 {
2108         struct adapter *adapter = container_of(work, struct adapter,
2109                                                adap_check_task.work);
2110         const struct adapter_params *p = &adapter->params;
2111
2112         adapter->check_task_cnt++;
2113
2114         /* Check link status for PHYs without interrupts */
2115         if (p->linkpoll_period)
2116                 check_link_status(adapter);
2117
2118         /* Accumulate MAC stats if needed */
2119         if (!p->linkpoll_period ||
2120             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2121             p->stats_update_period) {
2122                 mac_stats_update(adapter);
2123                 adapter->check_task_cnt = 0;
2124         }
2125
2126         /* Schedule the next check update if any port is active. */
2127         spin_lock(&adapter->work_lock);
2128         if (adapter->open_device_map & PORT_MASK)
2129                 schedule_chk_task(adapter);
2130         spin_unlock(&adapter->work_lock);
2131 }
2132
2133 /*
2134  * Processes external (PHY) interrupts in process context.
2135  */
2136 static void ext_intr_task(struct work_struct *work)
2137 {
2138         struct adapter *adapter = container_of(work, struct adapter,
2139                                                ext_intr_handler_task);
2140
2141         t3_phy_intr_handler(adapter);
2142
2143         /* Now reenable external interrupts */
2144         spin_lock_irq(&adapter->work_lock);
2145         if (adapter->slow_intr_mask) {
2146                 adapter->slow_intr_mask |= F_T3DBG;
2147                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2148                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2149                              adapter->slow_intr_mask);
2150         }
2151         spin_unlock_irq(&adapter->work_lock);
2152 }
2153
2154 /*
2155  * Interrupt-context handler for external (PHY) interrupts.
2156  */
2157 void t3_os_ext_intr_handler(struct adapter *adapter)
2158 {
2159         /*
2160          * Schedule a task to handle external interrupts as they may be slow
2161          * and we use a mutex to protect MDIO registers.  We disable PHY
2162          * interrupts in the meantime and let the task reenable them when
2163          * it's done.
2164          */
2165         spin_lock(&adapter->work_lock);
2166         if (adapter->slow_intr_mask) {
2167                 adapter->slow_intr_mask &= ~F_T3DBG;
2168                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2169                              adapter->slow_intr_mask);
2170                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2171         }
2172         spin_unlock(&adapter->work_lock);
2173 }
2174
2175 void t3_fatal_err(struct adapter *adapter)
2176 {
2177         unsigned int fw_status[4];
2178
2179         if (adapter->flags & FULL_INIT_DONE) {
2180                 t3_sge_stop(adapter);
2181                 t3_intr_disable(adapter);
2182         }
2183         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2184         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2185                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2186                          fw_status[0], fw_status[1],
2187                          fw_status[2], fw_status[3]);
2188
2189 }
2190
2191 static int __devinit cxgb_enable_msix(struct adapter *adap)
2192 {
2193         struct msix_entry entries[SGE_QSETS + 1];
2194         int i, err;
2195
2196         for (i = 0; i < ARRAY_SIZE(entries); ++i)
2197                 entries[i].entry = i;
2198
2199         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2200         if (!err) {
2201                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2202                         adap->msix_info[i].vec = entries[i].vector;
2203         } else if (err > 0)
2204                 dev_info(&adap->pdev->dev,
2205                        "only %d MSI-X vectors left, not using MSI-X\n", err);
2206         return err;
2207 }
2208
2209 static void __devinit print_port_info(struct adapter *adap,
2210                                       const struct adapter_info *ai)
2211 {
2212         static const char *pci_variant[] = {
2213                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2214         };
2215
2216         int i;
2217         char buf[80];
2218
2219         if (is_pcie(adap))
2220                 snprintf(buf, sizeof(buf), "%s x%d",
2221                          pci_variant[adap->params.pci.variant],
2222                          adap->params.pci.width);
2223         else
2224                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2225                          pci_variant[adap->params.pci.variant],
2226                          adap->params.pci.speed, adap->params.pci.width);
2227
2228         for_each_port(adap, i) {
2229                 struct net_device *dev = adap->port[i];
2230                 const struct port_info *pi = netdev_priv(dev);
2231
2232                 if (!test_bit(i, &adap->registered_device_map))
2233                         continue;
2234                 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2235                        dev->name, ai->desc, pi->port_type->desc,
2236                        adap->params.rev, buf,
2237                        (adap->flags & USING_MSIX) ? " MSI-X" :
2238                        (adap->flags & USING_MSI) ? " MSI" : "");
2239                 if (adap->name == dev->name && adap->params.vpd.mclk)
2240                         printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2241                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2242                                t3_mc7_size(&adap->pmtx) >> 20,
2243                                t3_mc7_size(&adap->pmrx) >> 20);
2244         }
2245 }
2246
2247 static int __devinit init_one(struct pci_dev *pdev,
2248                               const struct pci_device_id *ent)
2249 {
2250         static int version_printed;
2251
2252         int i, err, pci_using_dac = 0;
2253         unsigned long mmio_start, mmio_len;
2254         const struct adapter_info *ai;
2255         struct adapter *adapter = NULL;
2256         struct port_info *pi;
2257
2258         if (!version_printed) {
2259                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2260                 ++version_printed;
2261         }
2262
2263         if (!cxgb3_wq) {
2264                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2265                 if (!cxgb3_wq) {
2266                         printk(KERN_ERR DRV_NAME
2267                                ": cannot initialize work queue\n");
2268                         return -ENOMEM;
2269                 }
2270         }
2271
2272         err = pci_request_regions(pdev, DRV_NAME);
2273         if (err) {
2274                 /* Just info, some other driver may have claimed the device. */
2275                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2276                 return err;
2277         }
2278
2279         err = pci_enable_device(pdev);
2280         if (err) {
2281                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2282                 goto out_release_regions;
2283         }
2284
2285         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2286                 pci_using_dac = 1;
2287                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2288                 if (err) {
2289                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2290                                "coherent allocations\n");
2291                         goto out_disable_device;
2292                 }
2293         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2294                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2295                 goto out_disable_device;
2296         }
2297
2298         pci_set_master(pdev);
2299
2300         mmio_start = pci_resource_start(pdev, 0);
2301         mmio_len = pci_resource_len(pdev, 0);
2302         ai = t3_get_adapter_info(ent->driver_data);
2303
2304         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2305         if (!adapter) {
2306                 err = -ENOMEM;
2307                 goto out_disable_device;
2308         }
2309
2310         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2311         if (!adapter->regs) {
2312                 dev_err(&pdev->dev, "cannot map device registers\n");
2313                 err = -ENOMEM;
2314                 goto out_free_adapter;
2315         }
2316
2317         adapter->pdev = pdev;
2318         adapter->name = pci_name(pdev);
2319         adapter->msg_enable = dflt_msg_enable;
2320         adapter->mmio_len = mmio_len;
2321
2322         mutex_init(&adapter->mdio_lock);
2323         spin_lock_init(&adapter->work_lock);
2324         spin_lock_init(&adapter->stats_lock);
2325
2326         INIT_LIST_HEAD(&adapter->adapter_list);
2327         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2328         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2329
2330         for (i = 0; i < ai->nports; ++i) {
2331                 struct net_device *netdev;
2332
2333                 netdev = alloc_etherdev(sizeof(struct port_info));
2334                 if (!netdev) {
2335                         err = -ENOMEM;
2336                         goto out_free_dev;
2337                 }
2338
2339                 SET_MODULE_OWNER(netdev);
2340                 SET_NETDEV_DEV(netdev, &pdev->dev);
2341
2342                 adapter->port[i] = netdev;
2343                 pi = netdev_priv(netdev);
2344                 pi->rx_csum_offload = 1;
2345                 pi->nqsets = 1;
2346                 pi->first_qset = i;
2347                 pi->activity = 0;
2348                 pi->port_id = i;
2349                 netif_carrier_off(netdev);
2350                 netdev->irq = pdev->irq;
2351                 netdev->mem_start = mmio_start;
2352                 netdev->mem_end = mmio_start + mmio_len - 1;
2353                 netdev->priv = adapter;
2354                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2355                 netdev->features |= NETIF_F_LLTX;
2356                 if (pci_using_dac)
2357                         netdev->features |= NETIF_F_HIGHDMA;
2358
2359                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2360                 netdev->vlan_rx_register = vlan_rx_register;
2361                 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2362
2363                 netdev->open = cxgb_open;
2364                 netdev->stop = cxgb_close;
2365                 netdev->hard_start_xmit = t3_eth_xmit;
2366                 netdev->get_stats = cxgb_get_stats;
2367                 netdev->set_multicast_list = cxgb_set_rxmode;
2368                 netdev->do_ioctl = cxgb_ioctl;
2369                 netdev->change_mtu = cxgb_change_mtu;
2370                 netdev->set_mac_address = cxgb_set_mac_addr;
2371 #ifdef CONFIG_NET_POLL_CONTROLLER
2372                 netdev->poll_controller = cxgb_netpoll;
2373 #endif
2374                 netdev->weight = 64;
2375
2376                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2377         }
2378
2379         pci_set_drvdata(pdev, adapter->port[0]);
2380         if (t3_prep_adapter(adapter, ai, 1) < 0) {
2381                 err = -ENODEV;
2382                 goto out_free_dev;
2383         }
2384
2385         /*
2386          * The card is now ready to go.  If any errors occur during device
2387          * registration we do not fail the whole card but rather proceed only
2388          * with the ports we manage to register successfully.  However we must
2389          * register at least one net device.
2390          */
2391         for_each_port(adapter, i) {
2392                 err = register_netdev(adapter->port[i]);
2393                 if (err)
2394                         dev_warn(&pdev->dev,
2395                                  "cannot register net device %s, skipping\n",
2396                                  adapter->port[i]->name);
2397                 else {
2398                         /*
2399                          * Change the name we use for messages to the name of
2400                          * the first successfully registered interface.
2401                          */
2402                         if (!adapter->registered_device_map)
2403                                 adapter->name = adapter->port[i]->name;
2404
2405                         __set_bit(i, &adapter->registered_device_map);
2406                 }
2407         }
2408         if (!adapter->registered_device_map) {
2409                 dev_err(&pdev->dev, "could not register any net devices\n");
2410                 goto out_free_dev;
2411         }
2412
2413         /* Driver's ready. Reflect it on LEDs */
2414         t3_led_ready(adapter);
2415
2416         if (is_offload(adapter)) {
2417                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2418                 cxgb3_adapter_ofld(adapter);
2419         }
2420
2421         /* See what interrupts we'll be using */
2422         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2423                 adapter->flags |= USING_MSIX;
2424         else if (msi > 0 && pci_enable_msi(pdev) == 0)
2425                 adapter->flags |= USING_MSI;
2426
2427         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2428                                  &cxgb3_attr_group);
2429
2430         print_port_info(adapter, ai);
2431         return 0;
2432
2433 out_free_dev:
2434         iounmap(adapter->regs);
2435         for (i = ai->nports - 1; i >= 0; --i)
2436                 if (adapter->port[i])
2437                         free_netdev(adapter->port[i]);
2438
2439 out_free_adapter:
2440         kfree(adapter);
2441
2442 out_disable_device:
2443         pci_disable_device(pdev);
2444 out_release_regions:
2445         pci_release_regions(pdev);
2446         pci_set_drvdata(pdev, NULL);
2447         return err;
2448 }
2449
2450 static void __devexit remove_one(struct pci_dev *pdev)
2451 {
2452         struct net_device *dev = pci_get_drvdata(pdev);
2453
2454         if (dev) {
2455                 int i;
2456                 struct adapter *adapter = dev->priv;
2457
2458                 t3_sge_stop(adapter);
2459                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2460                                    &cxgb3_attr_group);
2461
2462                 for_each_port(adapter, i)
2463                     if (test_bit(i, &adapter->registered_device_map))
2464                         unregister_netdev(adapter->port[i]);
2465
2466                 if (is_offload(adapter)) {
2467                         cxgb3_adapter_unofld(adapter);
2468                         if (test_bit(OFFLOAD_DEVMAP_BIT,
2469                                      &adapter->open_device_map))
2470                                 offload_close(&adapter->tdev);
2471                 }
2472
2473                 t3_free_sge_resources(adapter);
2474                 cxgb_disable_msi(adapter);
2475
2476                 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2477                         if (adapter->dummy_netdev[i]) {
2478                                 free_netdev(adapter->dummy_netdev[i]);
2479                                 adapter->dummy_netdev[i] = NULL;
2480                         }
2481
2482                 for_each_port(adapter, i)
2483                         if (adapter->port[i])
2484                                 free_netdev(adapter->port[i]);
2485
2486                 iounmap(adapter->regs);
2487                 kfree(adapter);
2488                 pci_release_regions(pdev);
2489                 pci_disable_device(pdev);
2490                 pci_set_drvdata(pdev, NULL);
2491         }
2492 }
2493
2494 static struct pci_driver driver = {
2495         .name = DRV_NAME,
2496         .id_table = cxgb3_pci_tbl,
2497         .probe = init_one,
2498         .remove = __devexit_p(remove_one),
2499 };
2500
2501 static int __init cxgb3_init_module(void)
2502 {
2503         int ret;
2504
2505         cxgb3_offload_init();
2506
2507         ret = pci_register_driver(&driver);
2508         return ret;
2509 }
2510
2511 static void __exit cxgb3_cleanup_module(void)
2512 {
2513         pci_unregister_driver(&driver);
2514         if (cxgb3_wq)
2515                 destroy_workqueue(cxgb3_wq);
2516 }
2517
2518 module_init(cxgb3_init_module);
2519 module_exit(cxgb3_cleanup_module);