2 * olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
3 * 1999/2000 Mike Phillips (mikep@linuxtr.net)
5 * Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
8 * Base Driver Skeleton:
9 * Written 1993-94 by Donald Becker.
11 * Copyright 1993 United States Government as represented by the
12 * Director, National Security Agency.
14 * Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
15 * assistance and perserverance with the testing of this driver.
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License, incorporated herein by reference.
20 * 4/27/99 - Alpha Release 0.1.0
21 * First release to the public
23 * 6/8/99 - Official Release 0.2.0
24 * Merged into the kernel code
25 * 8/18/99 - Updated driver for 2.3.13 kernel to use new pci
26 * resource. Driver also reports the card name returned by
28 * 1/11/00 - Added spinlocks for smp
29 * 2/23/00 - Updated to dev_kfree_irq
30 * 3/10/00 - Fixed FDX enable which triggered other bugs also
32 * 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
33 * The odd thing about the changes is that the fix for
34 * endian issues with the big-endian data in the arb, asb...
35 * was to always swab() the bytes, no matter what CPU.
36 * That's because the read[wl]() functions always swap the
37 * bytes on the way in on PPC.
38 * Fixing the hardware descriptors was another matter,
39 * because they weren't going through read[wl](), there all
40 * the results had to be in memory in le32 values. kdaaker
42 * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
44 * 03/09/01 - Add new pci api, dev_base_lock, general clean up.
46 * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
47 * Change proc_fs behaviour, now one entry per adapter.
49 * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
50 * adapter when live does not take the system down with it.
52 * 06/02/01 - Clean up, copy skb for small packets
54 * 06/22/01 - Add EISR error handling routines
56 * 07/19/01 - Improve bad LAA reporting, strip out freemem
57 * into a separate function, its called from 3
58 * different places now.
59 * 02/09/02 - Replaced sleep_on.
60 * 03/01/02 - Replace access to several registers from 32 bit to
61 * 16 bit. Fixes alignment errors on PPC 64 bit machines.
62 * Thanks to Al Trautman for this one.
63 * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
64 * silently ignored until the error checking code
65 * went into version 1.0.0
66 * 06/04/02 - Add correct start up sequence for the cardbus adapters.
67 * Required for strict compliance with pci power mgmt specs.
72 * If Problems do Occur
73 * Most problems can be rectified by either closing and opening the interface
74 * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
75 * if compiled into the kernel).
78 /* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
80 #define OLYMPIC_DEBUG 0
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/errno.h>
86 #include <linux/timer.h>
88 #include <linux/ioport.h>
89 #include <linux/string.h>
90 #include <linux/proc_fs.h>
91 #include <linux/ptrace.h>
92 #include <linux/skbuff.h>
93 #include <linux/interrupt.h>
94 #include <linux/delay.h>
95 #include <linux/netdevice.h>
96 #include <linux/trdevice.h>
97 #include <linux/stddef.h>
98 #include <linux/init.h>
99 #include <linux/pci.h>
100 #include <linux/spinlock.h>
101 #include <linux/bitops.h>
102 #include <linux/jiffies.h>
104 #include <net/checksum.h>
105 #include <net/net_namespace.h>
108 #include <asm/system.h>
112 /* I've got to put some intelligence into the version number so that Peter and I know
113 * which version of the code somebody has got.
114 * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
115 * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
117 * Official releases will only have an a.b.c version number format.
120 static char version[] =
121 "Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
123 static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
124 "Address Verification", "Neighbor Notification (Ring Poll)",
125 "Request Parameters","FDX Registration Request",
126 "FDX Duplicate Address Check", "Station registration Query Wait",
129 static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
130 "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
131 "Duplicate Node Address","Request Parameters","Remove Received",
132 "Reserved", "Reserved", "No Monitor Detected for RPL",
133 "Monitor Contention failer for RPL", "FDX Protocol Error"};
135 /* Module paramters */
137 MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
138 MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
140 /* Ring Speed 0,4,16,100
142 * 4,16 = Selected speed only, no autosense
143 * This allows the card to be the first on the ring
144 * and become the active monitor.
145 * 100 = Nothing at present, 100mbps is autodetected
146 * if FDX is turned on. May be implemented in the future to
147 * fail if 100mpbs is not detected.
149 * WARNING: Some hubs will allow you to insert
153 static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
154 module_param_array(ringspeed, int, NULL, 0);
156 /* Packet buffer size */
158 static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
159 module_param_array(pkt_buf_sz, int, NULL, 0) ;
163 static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
164 module_param_array(message_level, int, NULL, 0) ;
166 /* Change network_monitor to receive mac frames through the arb channel.
167 * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
168 * device, i.e. tr0, tr1 etc.
169 * Intended to be used to create a ring-error reporting network module
170 * i.e. it will give you the source address of beaconers on the ring
172 static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
173 module_param_array(network_monitor, int, NULL, 0);
175 static struct pci_device_id olympic_pci_tbl[] = {
176 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
177 { } /* Terminating Entry */
179 MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
182 static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
183 static int olympic_init(struct net_device *dev);
184 static int olympic_open(struct net_device *dev);
185 static int olympic_xmit(struct sk_buff *skb, struct net_device *dev);
186 static int olympic_close(struct net_device *dev);
187 static void olympic_set_rx_mode(struct net_device *dev);
188 static void olympic_freemem(struct net_device *dev) ;
189 static irqreturn_t olympic_interrupt(int irq, void *dev_id);
190 static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
191 static void olympic_arb_cmd(struct net_device *dev);
192 static int olympic_change_mtu(struct net_device *dev, int mtu);
193 static void olympic_srb_bh(struct net_device *dev) ;
194 static void olympic_asb_bh(struct net_device *dev) ;
195 static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
197 static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
199 struct net_device *dev ;
200 struct olympic_private *olympic_priv;
201 static int card_no = -1 ;
206 if ((i = pci_enable_device(pdev))) {
210 pci_set_master(pdev);
212 if ((i = pci_request_regions(pdev,"olympic"))) {
216 dev = alloc_trdev(sizeof(struct olympic_private)) ;
222 olympic_priv = netdev_priv(dev) ;
224 spin_lock_init(&olympic_priv->olympic_lock) ;
226 init_waitqueue_head(&olympic_priv->srb_wait);
227 init_waitqueue_head(&olympic_priv->trb_wait);
229 printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, netdev_priv(dev));
232 dev->base_addr=pci_resource_start(pdev, 0);
233 olympic_priv->olympic_card_name = pci_name(pdev);
234 olympic_priv->pdev = pdev;
235 olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
236 olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
237 if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) {
241 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
242 olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
244 olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
246 dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
247 olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
248 olympic_priv->olympic_message_level = message_level[card_no] ;
249 olympic_priv->olympic_network_monitor = network_monitor[card_no];
251 if ((i = olympic_init(dev))) {
255 dev->open=&olympic_open;
256 dev->hard_start_xmit=&olympic_xmit;
257 dev->change_mtu=&olympic_change_mtu;
258 dev->stop=&olympic_close;
260 dev->set_multicast_list=&olympic_set_rx_mode;
261 dev->set_mac_address=&olympic_set_mac_address ;
262 SET_NETDEV_DEV(dev, &pdev->dev);
264 pci_set_drvdata(pdev,dev) ;
265 register_netdev(dev) ;
266 printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
267 if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
269 strcpy(proc_name,"olympic_") ;
270 strcat(proc_name,dev->name) ;
271 create_proc_read_entry(proc_name,0,init_net.proc_net,olympic_proc_info,(void *)dev) ;
272 printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
277 if (olympic_priv->olympic_mmio)
278 iounmap(olympic_priv->olympic_mmio);
279 if (olympic_priv->olympic_lap)
280 iounmap(olympic_priv->olympic_lap);
284 pci_release_regions(pdev);
287 pci_disable_device(pdev);
291 static int olympic_init(struct net_device *dev)
293 struct olympic_private *olympic_priv;
294 u8 __iomem *olympic_mmio, *init_srb,*adapter_addr;
296 unsigned int uaa_addr;
298 olympic_priv=netdev_priv(dev);
299 olympic_mmio=olympic_priv->olympic_mmio;
301 printk("%s \n", version);
302 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
304 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
306 while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
308 if(time_after(jiffies, t + 40*HZ)) {
309 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
315 /* Needed for cardbus */
316 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
317 writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
321 printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
322 printk("GPR: %x\n",readw(olympic_mmio+GPR));
323 printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
325 /* Aaaahhh, You have got to be real careful setting GPR, the card
326 holds the previous values from flash memory, including autosense
329 writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
331 if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */
332 writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
333 if (olympic_priv->olympic_message_level)
334 printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
335 } else if (olympic_priv->olympic_ring_speed == 16) {
336 if (olympic_priv->olympic_message_level)
337 printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
338 writew(GPR_16MBPS, olympic_mmio+GPR);
339 } else if (olympic_priv->olympic_ring_speed == 4) {
340 if (olympic_priv->olympic_message_level)
341 printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
342 writew(0, olympic_mmio+GPR);
345 writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
348 printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
350 /* Solo has been paused to meet the Cardbus power
351 * specs if the adapter is cardbus. Check to
352 * see its been paused and then restart solo. The
353 * adapter should set the pause bit within 1 second.
356 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
358 while (!(readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE)) {
360 if(time_after(jiffies, t + 2*HZ)) {
361 printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
365 writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
368 /* start solo init */
369 writel((1<<15),olympic_mmio+SISR_MASK_SUM);
372 while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
374 if(time_after(jiffies, t + 15*HZ)) {
375 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
380 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
383 printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
386 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
391 printk("init_srb(%p): ",init_srb);
393 printk("%x ",readb(init_srb+i));
397 if(readw(init_srb+6)) {
398 printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6));
402 if (olympic_priv->olympic_message_level) {
403 if ( readb(init_srb +2) & 0x40) {
404 printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
406 printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
410 uaa_addr=swab16(readw(init_srb+8));
413 printk("UAA resides at %x\n",uaa_addr);
416 writel(uaa_addr,olympic_mmio+LAPA);
417 adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
419 memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
422 printk("adapter address: %pM\n", dev->dev_addr);
425 olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
426 olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
432 static int olympic_open(struct net_device *dev)
434 struct olympic_private *olympic_priv=netdev_priv(dev);
435 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
436 unsigned long flags, t;
437 int i, open_finished = 1 ;
440 DECLARE_WAITQUEUE(wait,current) ;
444 if(request_irq(dev->irq, &olympic_interrupt, IRQF_SHARED , "olympic", dev)) {
449 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
450 printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
453 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
455 writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
457 writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
459 /* adapter is closed, so SRB is pointed to by LAPWWO */
461 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
462 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
465 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
466 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
467 printk("Before the open command \n");
470 memset_io(init_srb,0,SRB_COMMAND_SIZE);
472 writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */
473 writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
475 /* If Network Monitor, instruct card to copy MAC frames through the ARB */
476 if (olympic_priv->olympic_network_monitor)
477 writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
479 writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
481 /* Test OR of first 3 bytes as its totally possible for
482 * someone to set the first 2 bytes to be zero, although this
483 * is an error, the first byte must have bit 6 set to 1 */
485 if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
486 writeb(olympic_priv->olympic_laa[0],init_srb+12);
487 writeb(olympic_priv->olympic_laa[1],init_srb+13);
488 writeb(olympic_priv->olympic_laa[2],init_srb+14);
489 writeb(olympic_priv->olympic_laa[3],init_srb+15);
490 writeb(olympic_priv->olympic_laa[4],init_srb+16);
491 writeb(olympic_priv->olympic_laa[5],init_srb+17);
492 memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
494 writeb(1,init_srb+30);
496 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
497 olympic_priv->srb_queued=1;
499 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
500 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
504 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
505 set_current_state(TASK_INTERRUPTIBLE) ;
507 while(olympic_priv->srb_queued) {
509 if(signal_pending(current)) {
510 printk(KERN_WARNING "%s: Signal received in open.\n",
512 printk(KERN_WARNING "SISR=%x LISR=%x\n",
513 readl(olympic_mmio+SISR),
514 readl(olympic_mmio+LISR));
515 olympic_priv->srb_queued=0;
518 if (time_after(jiffies, t + 10*HZ)) {
519 printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
520 olympic_priv->srb_queued=0;
523 set_current_state(TASK_INTERRUPTIBLE) ;
525 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
526 set_current_state(TASK_RUNNING) ;
527 olympic_priv->srb_queued = 0 ;
529 printk("init_srb(%p): ",init_srb);
531 printk("%02x ",readb(init_srb+i));
535 /* If we get the same return response as we set, the interrupt wasn't raised and the open
539 switch (resp = readb(init_srb+2)) {
540 case OLYMPIC_CLEAR_RET_CODE:
541 printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
547 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
548 printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name);
553 err = readb(init_srb+7);
555 if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
556 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
557 printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name);
559 printk(KERN_WARNING "%s: %s - %s\n", dev->name,
560 open_maj_error[(err & 0xf0) >> 4],
561 open_min_error[(err & 0x0f)]);
566 printk(KERN_WARNING "%s: Invalid LAA: %pM\n",
567 dev->name, olympic_priv->olympic_laa);
571 printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name, resp);
575 } while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
577 if (readb(init_srb+18) & (1<<3))
578 if (olympic_priv->olympic_message_level)
579 printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
581 if (readb(init_srb+18) & (1<<1))
582 olympic_priv->olympic_ring_speed = 100 ;
583 else if (readb(init_srb+18) & 1)
584 olympic_priv->olympic_ring_speed = 16 ;
586 olympic_priv->olympic_ring_speed = 4 ;
588 if (olympic_priv->olympic_message_level)
589 printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
591 olympic_priv->asb = swab16(readw(init_srb+8));
592 olympic_priv->srb = swab16(readw(init_srb+10));
593 olympic_priv->arb = swab16(readw(init_srb+12));
594 olympic_priv->trb = swab16(readw(init_srb+16));
596 olympic_priv->olympic_receive_options = 0x01 ;
597 olympic_priv->olympic_copy_all_options = 0 ;
601 writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
603 writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
605 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
609 skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
615 olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
616 skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
617 olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
618 olympic_priv->rx_ring_skb[i]=skb;
622 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
626 olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
627 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
628 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
629 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
630 writew(i, olympic_mmio+RXDESCQCNT);
632 olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
633 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
634 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
635 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
637 olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */
638 olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
640 writew(i, olympic_mmio+RXSTATQCNT);
643 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
644 printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
645 printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
646 printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
647 printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) );
649 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
650 printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",
651 olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
654 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
657 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
658 printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
659 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
662 writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
666 writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
667 for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
668 olympic_priv->olympic_tx_ring[i].buffer=cpu_to_le32(0xdeadbeef);
670 olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
671 olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
672 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
673 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
674 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
675 writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
677 olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
678 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
679 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
680 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
681 writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
683 olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
684 olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
686 writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
687 writel(0,olympic_mmio+EISR) ;
688 writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
689 writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
692 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
693 printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
696 if (olympic_priv->olympic_network_monitor) {
701 oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr);
702 opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr);
704 for (i = 0; i < 6; i++)
705 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i);
706 printk("%s: Node Address: %pM\n", dev->name, addr);
707 printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
708 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
709 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
710 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
711 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
713 for (i = 0; i < 6; i++)
714 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i);
715 printk("%s: NAUN Address: %pM\n", dev->name, addr);
718 netif_start_queue(dev);
722 free_irq(dev->irq, dev);
727 * When we enter the rx routine we do not know how many frames have been
728 * queued on the rx channel. Therefore we start at the next rx status
729 * position and travel around the receive ring until we have completed
732 * This means that we may process the frame before we receive the end
733 * of frame interrupt. This is why we always test the status instead
734 * of blindly processing the next frame.
736 * We also remove the last 4 bytes from the packet as well, these are
737 * just token ring trailer info and upset protocols that don't check
738 * their own length, i.e. SNA.
741 static void olympic_rx(struct net_device *dev)
743 struct olympic_private *olympic_priv=netdev_priv(dev);
744 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
745 struct olympic_rx_status *rx_status;
746 struct olympic_rx_desc *rx_desc ;
747 int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
748 struct sk_buff *skb, *skb2;
751 rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
753 while (rx_status->status_buffercnt) {
754 u32 l_status_buffercnt;
756 olympic_priv->rx_status_last_received++ ;
757 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
759 printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
761 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
762 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
763 i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
764 frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
767 printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
769 l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
770 if(l_status_buffercnt & 0xC0000000) {
771 if (l_status_buffercnt & 0x3B000000) {
772 if (olympic_priv->olympic_message_level) {
773 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
774 printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);
775 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
776 printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);
777 if (l_status_buffercnt & (1<<27)) /* No receive buffers */
778 printk(KERN_WARNING "%s: No receive buffers \n",dev->name);
779 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
780 printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);
781 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
782 printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);
784 olympic_priv->rx_ring_last_received += i ;
785 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
786 dev->stats.rx_errors++;
789 if (buffer_cnt == 1) {
790 skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
792 skb = dev_alloc_skb(length) ;
796 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
797 dev->stats.rx_dropped++;
798 /* Update counters even though we don't transfer the frame */
799 olympic_priv->rx_ring_last_received += i ;
800 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
802 /* Optimise based upon number of buffers used.
803 If only one buffer is used we can simply swap the buffers around.
804 If more than one then we must use the new buffer and copy the information
805 first. Ideally all frames would be in a single buffer, this can be tuned by
806 altering the buffer size. If the length of the packet is less than
807 1500 bytes we're going to copy it over anyway to stop packets getting
808 dropped from sockets with buffers smaller than our pkt_buf_sz. */
811 olympic_priv->rx_ring_last_received++ ;
812 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
813 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
815 skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
817 pci_unmap_single(olympic_priv->pdev,
818 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
819 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
820 skb_put(skb2,length-4);
821 skb2->protocol = tr_type_trans(skb2,dev);
822 olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
823 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
824 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
825 olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
826 cpu_to_le32(olympic_priv->pkt_buf_sz);
827 olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
830 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
831 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
832 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
833 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
834 skb_put(skb,length - 4),
836 pci_dma_sync_single_for_device(olympic_priv->pdev,
837 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
838 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
839 skb->protocol = tr_type_trans(skb,dev) ;
843 do { /* Walk the buffers */
844 olympic_priv->rx_ring_last_received++ ;
845 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
846 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
847 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
848 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
849 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
850 rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
851 cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
852 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
853 skb_put(skb, cpy_length),
855 pci_dma_sync_single_for_device(olympic_priv->pdev,
856 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
857 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
859 skb_trim(skb,skb->len-4) ;
860 skb->protocol = tr_type_trans(skb,dev);
863 dev->stats.rx_packets++ ;
864 dev->stats.rx_bytes += length ;
865 } /* if skb == null */
866 } /* If status & 0x3b */
868 } else { /*if buffercnt & 0xC */
869 olympic_priv->rx_ring_last_received += i ;
870 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
873 rx_status->fragmentcnt_framelen = 0 ;
874 rx_status->status_buffercnt = 0 ;
875 rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
877 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ);
882 static void olympic_freemem(struct net_device *dev)
884 struct olympic_private *olympic_priv=netdev_priv(dev);
887 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
888 if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) {
889 dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
890 olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL;
892 if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != cpu_to_le32(0xdeadbeef)) {
893 pci_unmap_single(olympic_priv->pdev,
894 le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
895 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
897 olympic_priv->rx_status_last_received++;
898 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
901 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
902 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
903 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
904 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
906 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
907 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
908 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
909 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
914 static irqreturn_t olympic_interrupt(int irq, void *dev_id)
916 struct net_device *dev= (struct net_device *)dev_id;
917 struct olympic_private *olympic_priv=netdev_priv(dev);
918 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
920 u8 __iomem *adapter_check_area ;
923 * Read sisr but don't reset it yet.
924 * The indication bit may have been set but the interrupt latch
925 * bit may not be set, so we'd lose the interrupt later.
927 sisr=readl(olympic_mmio+SISR) ;
928 if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
930 sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */
932 spin_lock(&olympic_priv->olympic_lock);
934 /* Hotswap gives us this on removal */
935 if (sisr == 0xffffffff) {
936 printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
937 spin_unlock(&olympic_priv->olympic_lock) ;
941 if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
942 SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
944 /* If we ever get this the adapter is seriously dead. Only a reset is going to
945 * bring it back to life. We're talking pci bus errors and such like :( */
946 if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
947 printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
948 printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
949 printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
950 printk(KERN_ERR "or the linux-tr mailing list.\n") ;
951 wake_up_interruptible(&olympic_priv->srb_wait);
952 spin_unlock(&olympic_priv->olympic_lock) ;
956 if(sisr & SISR_SRB_REPLY) {
957 if(olympic_priv->srb_queued==1) {
958 wake_up_interruptible(&olympic_priv->srb_wait);
959 } else if (olympic_priv->srb_queued==2) {
960 olympic_srb_bh(dev) ;
962 olympic_priv->srb_queued=0;
963 } /* SISR_SRB_REPLY */
965 /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
966 we get all tx completions. */
967 if (sisr & SISR_TX1_EOF) {
968 while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
969 olympic_priv->tx_ring_last_status++;
970 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
971 olympic_priv->free_tx_ring_entries++;
972 dev->stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
973 dev->stats.tx_packets++ ;
974 pci_unmap_single(olympic_priv->pdev,
975 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
976 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
977 dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
978 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=cpu_to_le32(0xdeadbeef);
979 olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
981 netif_wake_queue(dev);
984 if (sisr & SISR_RX_STATUS) {
986 } /* SISR_RX_STATUS */
988 if (sisr & SISR_ADAPTER_CHECK) {
989 netif_stop_queue(dev);
990 printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
991 writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
992 adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
993 printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
994 spin_unlock(&olympic_priv->olympic_lock) ;
996 } /* SISR_ADAPTER_CHECK */
998 if (sisr & SISR_ASB_FREE) {
999 /* Wake up anything that is waiting for the asb response */
1000 if (olympic_priv->asb_queued) {
1001 olympic_asb_bh(dev) ;
1003 } /* SISR_ASB_FREE */
1005 if (sisr & SISR_ARB_CMD) {
1006 olympic_arb_cmd(dev) ;
1007 } /* SISR_ARB_CMD */
1009 if (sisr & SISR_TRB_REPLY) {
1010 /* Wake up anything that is waiting for the trb response */
1011 if (olympic_priv->trb_queued) {
1012 wake_up_interruptible(&olympic_priv->trb_wait);
1014 olympic_priv->trb_queued = 0 ;
1015 } /* SISR_TRB_REPLY */
1017 if (sisr & SISR_RX_NOBUF) {
1018 /* According to the documentation, we don't have to do anything, but trapping it keeps it out of
1019 /var/log/messages. */
1020 } /* SISR_RX_NOBUF */
1022 printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
1023 printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
1024 } /* One if the interrupts we want */
1025 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
1027 spin_unlock(&olympic_priv->olympic_lock) ;
1031 static int olympic_xmit(struct sk_buff *skb, struct net_device *dev)
1033 struct olympic_private *olympic_priv=netdev_priv(dev);
1034 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1035 unsigned long flags ;
1037 spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
1039 netif_stop_queue(dev);
1041 if(olympic_priv->free_tx_ring_entries) {
1042 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
1043 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
1044 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
1045 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
1046 olympic_priv->free_tx_ring_entries--;
1048 olympic_priv->tx_ring_free++;
1049 olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
1050 writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
1051 netif_wake_queue(dev);
1052 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1055 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1062 static int olympic_close(struct net_device *dev)
1064 struct olympic_private *olympic_priv=netdev_priv(dev);
1065 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb;
1066 unsigned long t,flags;
1068 DECLARE_WAITQUEUE(wait,current) ;
1070 netif_stop_queue(dev);
1072 writel(olympic_priv->srb,olympic_mmio+LAPA);
1073 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1075 writeb(SRB_CLOSE_ADAPTER,srb+0);
1077 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1079 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
1080 set_current_state(TASK_INTERRUPTIBLE) ;
1082 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
1083 olympic_priv->srb_queued=1;
1085 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1086 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1088 while(olympic_priv->srb_queued) {
1090 t = schedule_timeout_interruptible(60*HZ);
1092 if(signal_pending(current)) {
1093 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
1094 printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
1095 olympic_priv->srb_queued=0;
1100 printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
1102 olympic_priv->srb_queued=0;
1104 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
1106 olympic_priv->rx_status_last_received++;
1107 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1109 olympic_freemem(dev) ;
1111 /* reset tx/rx fifo's and busmaster logic */
1113 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1115 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1120 printk("srb(%p): ",srb);
1122 printk("%x ",readb(srb+i));
1126 free_irq(dev->irq,dev);
1132 static void olympic_set_rx_mode(struct net_device *dev)
1134 struct olympic_private *olympic_priv = netdev_priv(dev);
1135 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1138 struct dev_mc_list *dmi ;
1139 unsigned char dev_mc_address[4] ;
1142 writel(olympic_priv->srb,olympic_mmio+LAPA);
1143 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1144 options = olympic_priv->olympic_copy_all_options;
1146 if (dev->flags&IFF_PROMISC)
1151 /* Only issue the srb if there is a change in options */
1153 if ((options ^ olympic_priv->olympic_copy_all_options)) {
1155 /* Now to issue the srb command to alter the copy.all.options */
1157 writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
1159 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1161 writeb(olympic_priv->olympic_receive_options,srb+4);
1162 writeb(options,srb+5);
1164 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1166 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1168 olympic_priv->olympic_copy_all_options = options ;
1173 /* Set the functional addresses we need for multicast */
1175 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1177 for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
1178 dev_mc_address[0] |= dmi->dmi_addr[2] ;
1179 dev_mc_address[1] |= dmi->dmi_addr[3] ;
1180 dev_mc_address[2] |= dmi->dmi_addr[4] ;
1181 dev_mc_address[3] |= dmi->dmi_addr[5] ;
1184 writeb(SRB_SET_FUNC_ADDRESS,srb+0);
1186 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1190 writeb(dev_mc_address[0],srb+6);
1191 writeb(dev_mc_address[1],srb+7);
1192 writeb(dev_mc_address[2],srb+8);
1193 writeb(dev_mc_address[3],srb+9);
1195 olympic_priv->srb_queued = 2 ;
1196 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1200 static void olympic_srb_bh(struct net_device *dev)
1202 struct olympic_private *olympic_priv = netdev_priv(dev);
1203 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1206 writel(olympic_priv->srb,olympic_mmio+LAPA);
1207 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1209 switch (readb(srb)) {
1211 /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
1212 * At some point we should do something if we get an error, such as
1213 * resetting the IFF_PROMISC flag in dev
1216 case SRB_MODIFY_RECEIVE_OPTIONS:
1217 switch (readb(srb+2)) {
1219 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
1222 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1225 if (olympic_priv->olympic_message_level)
1226 printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
1228 } /* switch srb[2] */
1231 /* SRB_SET_GROUP_ADDRESS - Multicast group setting
1234 case SRB_SET_GROUP_ADDRESS:
1235 switch (readb(srb+2)) {
1239 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1242 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1245 printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
1247 case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
1248 printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
1251 printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
1255 } /* switch srb[2] */
1258 /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
1261 case SRB_RESET_GROUP_ADDRESS:
1262 switch (readb(srb+2)) {
1266 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1269 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1271 case 0x39: /* Must deal with this if individual multicast addresses used */
1272 printk(KERN_INFO "%s: Group address not found \n",dev->name);
1276 } /* switch srb[2] */
1280 /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
1283 case SRB_SET_FUNC_ADDRESS:
1284 switch (readb(srb+2)) {
1286 if (olympic_priv->olympic_message_level)
1287 printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ;
1290 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1293 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1297 } /* switch srb[2] */
1300 /* SRB_READ_LOG - Read and reset the adapter error counters
1304 switch (readb(srb+2)) {
1306 if (olympic_priv->olympic_message_level)
1307 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
1310 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1313 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1316 } /* switch srb[2] */
1319 /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
1321 case SRB_READ_SR_COUNTERS:
1322 switch (readb(srb+2)) {
1324 if (olympic_priv->olympic_message_level)
1325 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
1328 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1331 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1335 } /* switch srb[2] */
1339 printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
1341 } /* switch srb[0] */
1345 static int olympic_set_mac_address (struct net_device *dev, void *addr)
1347 struct sockaddr *saddr = addr ;
1348 struct olympic_private *olympic_priv = netdev_priv(dev);
1350 if (netif_running(dev)) {
1351 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1355 memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
1357 if (olympic_priv->olympic_message_level) {
1358 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
1359 olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
1360 olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
1361 olympic_priv->olympic_laa[5]);
1367 static void olympic_arb_cmd(struct net_device *dev)
1369 struct olympic_private *olympic_priv = netdev_priv(dev);
1370 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1371 u8 __iomem *arb_block, *asb_block, *srb ;
1373 u16 frame_len, buffer_len ;
1374 struct sk_buff *mac_frame ;
1375 u8 __iomem *buf_ptr ;
1376 u8 __iomem *frame_data ;
1378 u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
1382 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1383 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1384 srb = (olympic_priv->olympic_lap + olympic_priv->srb) ;
1386 if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
1388 header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
1389 frame_len = swab16(readw(arb_block + 10)) ;
1391 buff_off = swab16(readw(arb_block + 6)) ;
1393 buf_ptr = olympic_priv->olympic_lap + buff_off ;
1398 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1400 for (i=0 ; i < 14 ; i++) {
1401 printk("Loc %d = %02x\n",i,readb(frame_data + i));
1404 printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1407 mac_frame = dev_alloc_skb(frame_len) ;
1409 printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
1413 /* Walk the buffer chain, creating the frame */
1416 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1417 buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1418 memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
1419 next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
1420 } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + swab16(next_ptr)));
1422 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1424 if (olympic_priv->olympic_network_monitor) {
1425 struct trh_hdr *mac_hdr;
1426 printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name);
1427 mac_hdr = tr_hdr(mac_frame);
1428 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
1429 dev->name, mac_hdr->daddr);
1430 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %pM\n",
1431 dev->name, mac_hdr->saddr);
1433 netif_rx(mac_frame);
1436 /* Now tell the card we have dealt with the received frame */
1438 /* Set LISR Bit 1 */
1439 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
1441 /* Is the ASB free ? */
1443 if (readb(asb_block + 2) != 0xff) {
1444 olympic_priv->asb_queued = 1 ;
1445 writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1447 /* Drop out and wait for the bottom half to be run */
1450 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1451 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1452 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1453 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1455 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1457 olympic_priv->asb_queued = 2 ;
1461 } else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
1462 lan_status = swab16(readw(arb_block+6));
1463 fdx_prot_error = readb(arb_block+8) ;
1465 /* Issue ARB Free */
1466 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
1468 lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
1470 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1471 if (lan_status_diff & LSC_LWF)
1472 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1473 if (lan_status_diff & LSC_ARW)
1474 printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1475 if (lan_status_diff & LSC_FPE)
1476 printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1477 if (lan_status_diff & LSC_RR)
1478 printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1480 /* Adapter has been closed by the hardware */
1482 /* reset tx/rx fifo's and busmaster logic */
1484 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1486 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1487 netif_stop_queue(dev);
1488 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
1489 printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
1490 } /* If serious error */
1492 if (olympic_priv->olympic_message_level) {
1493 if (lan_status_diff & LSC_SIG_LOSS)
1494 printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
1495 if (lan_status_diff & LSC_HARD_ERR)
1496 printk(KERN_INFO "%s: Beaconing \n",dev->name);
1497 if (lan_status_diff & LSC_SOFT_ERR)
1498 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
1499 if (lan_status_diff & LSC_TRAN_BCN)
1500 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
1501 if (lan_status_diff & LSC_SS)
1502 printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
1503 if (lan_status_diff & LSC_RING_REC)
1504 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1505 if (lan_status_diff & LSC_FDX_MODE)
1506 printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1509 if (lan_status_diff & LSC_CO) {
1511 if (olympic_priv->olympic_message_level)
1512 printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
1514 /* Issue READ.LOG command */
1516 writeb(SRB_READ_LOG, srb);
1518 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1523 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1525 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1529 if (lan_status_diff & LSC_SR_CO) {
1531 if (olympic_priv->olympic_message_level)
1532 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1534 /* Issue a READ.SR.COUNTERS */
1536 writeb(SRB_READ_SR_COUNTERS,srb);
1538 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1541 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1543 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1547 olympic_priv->olympic_lan_status = lan_status ;
1549 } /* Lan.change.status */
1551 printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
1554 static void olympic_asb_bh(struct net_device *dev)
1556 struct olympic_private *olympic_priv = netdev_priv(dev);
1557 u8 __iomem *arb_block, *asb_block ;
1559 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1560 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1562 if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
1564 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1565 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1566 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1567 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1569 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1570 olympic_priv->asb_queued = 2 ;
1575 if (olympic_priv->asb_queued == 2) {
1576 switch (readb(asb_block+2)) {
1578 printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
1581 printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
1584 /* Valid response, everything should be ok again */
1587 printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
1591 olympic_priv->asb_queued = 0 ;
1594 static int olympic_change_mtu(struct net_device *dev, int mtu)
1596 struct olympic_private *olympic_priv = netdev_priv(dev);
1599 if (olympic_priv->olympic_ring_speed == 4)
1610 olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
1615 static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
1617 struct net_device *dev = (struct net_device *)data ;
1618 struct olympic_private *olympic_priv=netdev_priv(dev);
1619 u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
1620 u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
1629 size = sprintf(buffer,
1630 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
1631 size += sprintf(buffer+size, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
1634 for (i = 0 ; i < 6 ; i++)
1635 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i);
1637 size += sprintf(buffer+size, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
1639 dev->dev_addr, addr,
1640 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
1641 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
1642 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
1643 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
1645 size += sprintf(buffer+size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1647 size += sprintf(buffer+size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
1650 for (i = 0 ; i < 6 ; i++)
1651 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr) + i);
1652 for (i = 0 ; i < 6 ; i++)
1653 addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i);
1655 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
1657 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
1658 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
1659 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
1660 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
1662 swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
1663 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
1664 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
1666 size += sprintf(buffer+size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
1669 for (i = 0 ; i < 6 ; i++)
1670 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i);
1671 size += sprintf(buffer+size, "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1673 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
1674 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
1675 swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
1676 swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
1677 swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
1678 swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
1680 size += sprintf(buffer+size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
1683 for (i = 0 ; i < 6 ; i++)
1684 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i);
1685 size += sprintf(buffer+size, "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
1687 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
1688 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
1690 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
1691 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
1692 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
1693 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
1701 *start=buffer+(offset-begin); /* Start of wanted data */
1702 len-=(offset-begin); /* Start slop */
1704 len=length; /* Ending slop */
1708 static void __devexit olympic_remove_one(struct pci_dev *pdev)
1710 struct net_device *dev = pci_get_drvdata(pdev) ;
1711 struct olympic_private *olympic_priv=netdev_priv(dev);
1713 if (olympic_priv->olympic_network_monitor) {
1714 char proc_name[20] ;
1715 strcpy(proc_name,"olympic_") ;
1716 strcat(proc_name,dev->name) ;
1717 remove_proc_entry(proc_name,init_net.proc_net);
1719 unregister_netdev(dev) ;
1720 iounmap(olympic_priv->olympic_mmio) ;
1721 iounmap(olympic_priv->olympic_lap) ;
1722 pci_release_regions(pdev) ;
1723 pci_set_drvdata(pdev,NULL) ;
1727 static struct pci_driver olympic_driver = {
1729 .id_table = olympic_pci_tbl,
1730 .probe = olympic_probe,
1731 .remove = __devexit_p(olympic_remove_one),
1734 static int __init olympic_pci_init(void)
1736 return pci_register_driver(&olympic_driver) ;
1739 static void __exit olympic_pci_cleanup(void)
1741 pci_unregister_driver(&olympic_driver) ;
1745 module_init(olympic_pci_init) ;
1746 module_exit(olympic_pci_cleanup) ;
1748 MODULE_LICENSE("GPL");