1/*
2 *   olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
3 *		   1999/2000 Mike Phillips (mikep@linuxtr.net)
4 *
5 *  Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
6 *  chipset.
7 *
8 *  Base Driver Skeleton:
9 *      Written 1993-94 by Donald Becker.
10 *
11 *      Copyright 1993 United States Government as represented by the
12 *      Director, National Security Agency.
13 *
14 *  Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
15 *  assistance and perserverance with the testing of this driver.
16 *
17 *  This software may be used and distributed according to the terms
18 *  of the GNU General Public License, incorporated herein by reference.
19 *
20 *  4/27/99 - Alpha Release 0.1.0
21 *            First release to the public
22 *
23 *  6/8/99  - Official Release 0.2.0
24 *            Merged into the kernel code
25 *  8/18/99 - Updated driver for 2.3.13 kernel to use new pci
26 *	      resource. Driver also reports the card name returned by
27 *            the pci resource.
28 *  1/11/00 - Added spinlocks for smp
29 *  2/23/00 - Updated to dev_kfree_irq
30 *  3/10/00 - Fixed FDX enable which triggered other bugs also
31 *            squashed.
32 *  5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
33 *            The odd thing about the changes is that the fix for
34 *            endian issues with the big-endian data in the arb, asb...
35 *            was to always swab() the bytes, no matter what CPU.
36 *            That's because the read[wl]() functions always swap the
37 *            bytes on the way in on PPC.
38 *            Fixing the hardware descriptors was another matter,
39 *            because they weren't going through read[wl](), there all
40 *            the results had to be in memory in le32 values. kdaaker
41 *
42 * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
43 *
44 * 03/09/01 - Add new pci api, dev_base_lock, general clean up.
45 *
46 * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
47 *	      Change proc_fs behaviour, now one entry per adapter.
48 *
49 * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
50 *	      adapter when live does not take the system down with it.
51 *
52 * 06/02/01 - Clean up, copy skb for small packets
53 *
54 * 06/22/01 - Add EISR error handling routines
55 *
56 * 07/19/01 - Improve bad LAA reporting, strip out freemem
57 *	      into a separate function, its called from 3
58 *	      different places now.
59 * 02/09/02 - Replaced sleep_on.
60 * 03/01/02 - Replace access to several registers from 32 bit to
61 * 	      16 bit. Fixes alignment errors on PPC 64 bit machines.
62 * 	      Thanks to Al Trautman for this one.
63 * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
64 * 	      silently ignored until the error checking code
65 * 	      went into version 1.0.0
66 * 06/04/02 - Add correct start up sequence for the cardbus adapters.
67 * 	      Required for strict compliance with pci power mgmt specs.
68 *  To Do:
69 *
70 *	     Wake on lan
71 *
72 *  If Problems do Occur
73 *  Most problems can be rectified by either closing and opening the interface
74 *  (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
75 *  if compiled into the kernel).
76 */
77
78/* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
79
80#define OLYMPIC_DEBUG 0
81
82
83#include <linux/config.h>
84#include <linux/module.h>
85
86#include <linux/kernel.h>
87#include <linux/sched.h>
88#include <linux/errno.h>
89#include <linux/timer.h>
90#include <linux/in.h>
91#include <linux/ioport.h>
92#include <linux/string.h>
93#include <linux/proc_fs.h>
94#include <linux/ptrace.h>
95#include <linux/skbuff.h>
96#include <linux/interrupt.h>
97#include <linux/delay.h>
98#include <linux/netdevice.h>
99#include <linux/trdevice.h>
100#include <linux/stddef.h>
101#include <linux/init.h>
102#include <linux/pci.h>
103#include <linux/spinlock.h>
104#include <net/checksum.h>
105
106#include <asm/io.h>
107#include <asm/system.h>
108#include <asm/bitops.h>
109
110#include "olympic.h"
111
112/* I've got to put some intelligence into the version number so that Peter and I know
113 * which version of the code somebody has got.
114 * Version Number = a.b.c.d  where a.b.c is the level of code and d is the latest author.
115 * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
116 *
117 * Official releases will only have an a.b.c version number format.
118 */
119
120static char version[] __devinitdata =
121"Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
122
123static char *open_maj_error[]  = {"No error", "Lobe Media Test", "Physical Insertion",
124				   "Address Verification", "Neighbor Notification (Ring Poll)",
125				   "Request Parameters","FDX Registration Request",
126				   "FDX Duplicate Address Check", "Station registration Query Wait",
127				   "Unknown stage"};
128
129static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
130				   "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
131				   "Duplicate Node Address","Request Parameters","Remove Received",
132				   "Reserved", "Reserved", "No Monitor Detected for RPL",
133				   "Monitor Contention failer for RPL", "FDX Protocol Error"};
134
135/* Module paramters */
136
137MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
138MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
139
140/* Ring Speed 0,4,16,100
141 * 0 = Autosense
142 * 4,16 = Selected speed only, no autosense
143 * This allows the card to be the first on the ring
144 * and become the active monitor.
145 * 100 = Nothing at present, 100mbps is autodetected
146 * if FDX is turned on. May be implemented in the future to
147 * fail if 100mpbs is not detected.
148 *
149 * WARNING: Some hubs will allow you to insert
150 * at the wrong speed
151 */
152
153static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
154MODULE_PARM(ringspeed, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i");
155
156/* Packet buffer size */
157
158static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
159MODULE_PARM(pkt_buf_sz, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i") ;
160
161/* Message Level */
162
163static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
164MODULE_PARM(message_level, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i") ;
165
166/* Change network_monitor to receive mac frames through the arb channel.
167 * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
168 * device, i.e. tr0, tr1 etc.
169 * Intended to be used to create a ring-error reporting network module
170 * i.e. it will give you the source address of beaconers on the ring
171 */
172static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
173MODULE_PARM(network_monitor, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i");
174
175static struct pci_device_id olympic_pci_tbl[] __devinitdata = {
176	{PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
177	{ } 	/* Terminating Entry */
178};
179MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
180
181
182static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
183static int olympic_init(struct net_device *dev);
184static int olympic_open(struct net_device *dev);
185static int olympic_xmit(struct sk_buff *skb, struct net_device *dev);
186static int olympic_close(struct net_device *dev);
187static void olympic_set_rx_mode(struct net_device *dev);
188static void olympic_freemem(struct net_device *dev) ;
189static void olympic_interrupt(int irq, void *dev_id, struct pt_regs *regs);
190static struct net_device_stats * olympic_get_stats(struct net_device *dev);
191static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
192static void olympic_arb_cmd(struct net_device *dev);
193static int olympic_change_mtu(struct net_device *dev, int mtu);
194static void olympic_srb_bh(struct net_device *dev) ;
195static void olympic_asb_bh(struct net_device *dev) ;
196static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
197
198static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
199{
200	struct net_device *dev ;
201	struct olympic_private *olympic_priv;
202	static int card_no = -1 ;
203	int i ;
204
205	card_no++ ;
206
207	if ((i = pci_enable_device(pdev))) {
208		return i ;
209	}
210
211	pci_set_master(pdev);
212
213	if ((i = pci_request_regions(pdev,"olympic"))) {
214		return i ;
215	} ;
216
217	dev = alloc_trdev(sizeof(struct olympic_private)) ;
218
219	if (!dev) {
220		pci_release_regions(pdev) ;
221		return -ENOMEM ;
222	}
223
224	olympic_priv = dev->priv ;
225
226	init_waitqueue_head(&olympic_priv->srb_wait);
227	init_waitqueue_head(&olympic_priv->trb_wait);
228#if OLYMPIC_DEBUG
229	printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, dev->priv);
230#endif
231	dev->irq=pdev->irq;
232	dev->base_addr=pci_resource_start(pdev, 0);
233	dev->init=NULL; /* Must be NULL otherwise we get called twice */
234	olympic_priv->olympic_card_name = (char *)pdev->name ;
235	olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
236	olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
237	olympic_priv->pdev = pdev ;
238
239	if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
240		olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
241	else
242		olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
243
244	dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
245	olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
246	olympic_priv->olympic_message_level = message_level[card_no] ;
247	olympic_priv->olympic_network_monitor = network_monitor[card_no];
248
249	if((i = olympic_init(dev))) {
250		iounmap(olympic_priv->olympic_mmio) ;
251		iounmap(olympic_priv->olympic_lap) ;
252		kfree(dev) ;
253		pci_release_regions(pdev) ;
254		return i ;
255	}
256
257	dev->open=&olympic_open;
258	dev->hard_start_xmit=&olympic_xmit;
259	dev->change_mtu=&olympic_change_mtu;
260	dev->stop=&olympic_close;
261	dev->do_ioctl=NULL;
262	dev->set_multicast_list=&olympic_set_rx_mode;
263	dev->get_stats=&olympic_get_stats ;
264	dev->set_mac_address=&olympic_set_mac_address ;
265	SET_MODULE_OWNER(dev) ;
266
267	pci_set_drvdata(pdev,dev) ;
268	register_netdev(dev) ;
269	printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
270	if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
271		char proc_name[20] ;
272		strcpy(proc_name,"net/olympic_") ;
273		strcat(proc_name,dev->name) ;
274		create_proc_read_entry(proc_name,0,0,olympic_proc_info,(void *)dev) ;
275		printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
276	}
277	return  0 ;
278}
279
280static int __devinit olympic_init(struct net_device *dev)
281{
282    	struct olympic_private *olympic_priv;
283	u8 *olympic_mmio, *init_srb,*adapter_addr;
284	unsigned long t;
285	unsigned int uaa_addr;
286
287    	olympic_priv=(struct olympic_private *)dev->priv;
288	olympic_mmio=olympic_priv->olympic_mmio;
289
290	printk("%s \n", version);
291	printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
292
293	writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
294	t=jiffies;
295	while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
296		schedule();
297		if(jiffies-t > 40*HZ) {
298			printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
299			return -ENODEV;
300		}
301	}
302
303	spin_lock_init(&olympic_priv->olympic_lock) ;
304
305	/* Needed for cardbus */
306	if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
307		writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
308	}
309
310#if OLYMPIC_DEBUG
311	printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
312	printk("GPR: %x\n",readw(olympic_mmio+GPR));
313	printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
314#endif
315	/* Aaaahhh, You have got to be real careful setting GPR, the card
316	   holds the previous values from flash memory, including autosense
317           and ring speed */
318
319	writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
320
321	if (olympic_priv->olympic_ring_speed  == 0) { /* Autosense */
322		writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
323		if (olympic_priv->olympic_message_level)
324			printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
325	} else if (olympic_priv->olympic_ring_speed == 16) {
326		if (olympic_priv->olympic_message_level)
327			printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
328		writew(GPR_16MBPS, olympic_mmio+GPR);
329	} else if (olympic_priv->olympic_ring_speed == 4) {
330		if (olympic_priv->olympic_message_level)
331			printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
332		writew(0, olympic_mmio+GPR);
333	}
334
335	writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
336
337#if OLYMPIC_DEBUG
338	printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
339#endif
340	/* Solo has been paused to meet the Cardbus power
341	 * specs if the adapter is cardbus. Check to
342	 * see its been paused and then restart solo. The
343	 * adapter should set the pause bit within 1 second.
344	 */
345
346	if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
347		t=jiffies;
348		while (!readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE) {
349			schedule() ;
350			if(jiffies-t > 2*HZ) {
351				printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
352				return -ENODEV;
353			}
354		}
355		writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
356	}
357
358	/* start solo init */
359	writel((1<<15),olympic_mmio+SISR_MASK_SUM);
360
361	t=jiffies;
362	while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
363		schedule();
364		if(jiffies-t > 15*HZ) {
365			printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
366			return -ENODEV;
367		}
368	}
369
370	writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
371
372#if OLYMPIC_DEBUG
373	printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
374#endif
375
376	init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
377
378#if OLYMPIC_DEBUG
379{
380	int i;
381	printk("init_srb(%p): ",init_srb);
382	for(i=0;i<20;i++)
383		printk("%x ",readb(init_srb+i));
384	printk("\n");
385}
386#endif
387	if(readw(init_srb+6)) {
388		printk(KERN_INFO "tokenring card intialization failed. errorcode : %x\n",readw(init_srb+6));
389		return -ENODEV;
390	}
391
392	if (olympic_priv->olympic_message_level) {
393		if ( readb(init_srb +2) & 0x40) {
394			printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
395		} else {
396			printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
397		}
398	}
399
400	uaa_addr=swab16(readw(init_srb+8));
401
402#if OLYMPIC_DEBUG
403	printk("UAA resides at %x\n",uaa_addr);
404#endif
405
406	writel(uaa_addr,olympic_mmio+LAPA);
407	adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
408
409#if OLYMPIC_DEBUG
410	printk("adapter address: %02x:%02x:%02x:%02x:%02x:%02x\n",
411			readb(adapter_addr), readb(adapter_addr+1),readb(adapter_addr+2),
412			readb(adapter_addr+3),readb(adapter_addr+4),readb(adapter_addr+5));
413#endif
414
415	memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
416
417	olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
418	olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
419
420	return 0;
421
422}
423
424static int olympic_open(struct net_device *dev)
425{
426	struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
427	u8 *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
428	unsigned long flags, t;
429	char open_error[255] ;
430	int i, open_finished = 1 ;
431
432	DECLARE_WAITQUEUE(wait,current) ;
433
434	if(request_irq(dev->irq, &olympic_interrupt, SA_SHIRQ , "olympic", dev)) {
435		return -EAGAIN;
436	}
437
438#if OLYMPIC_DEBUG
439	printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
440	printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
441#endif
442
443	writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
444
445	writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
446
447	writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
448
449	/* adapter is closed, so SRB is pointed to by LAPWWO */
450
451	writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
452	init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
453
454#if OLYMPIC_DEBUG
455	printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
456	printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
457	printk("Before the open command \n");
458#endif
459	do {
460		int i;
461
462		for(i=0;i<SRB_COMMAND_SIZE;i+=4)
463			writel(0,init_srb+i);
464		if(SRB_COMMAND_SIZE & 2)
465			writew(0,init_srb+(SRB_COMMAND_SIZE & ~3));
466		if(SRB_COMMAND_SIZE & 1)
467			writeb(0,init_srb+(SRB_COMMAND_SIZE & ~1));
468
469		writeb(SRB_OPEN_ADAPTER,init_srb) ; 	/* open */
470		writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
471
472		/* If Network Monitor, instruct card to copy MAC frames through the ARB */
473		if (olympic_priv->olympic_network_monitor)
474			writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
475		else
476			writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
477
478		/* Test OR of first 3 bytes as its totally possible for
479		 * someone to set the first 2 bytes to be zero, although this
480		 * is an error, the first byte must have bit 6 set to 1  */
481
482		if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
483			writeb(olympic_priv->olympic_laa[0],init_srb+12);
484			writeb(olympic_priv->olympic_laa[1],init_srb+13);
485			writeb(olympic_priv->olympic_laa[2],init_srb+14);
486			writeb(olympic_priv->olympic_laa[3],init_srb+15);
487			writeb(olympic_priv->olympic_laa[4],init_srb+16);
488			writeb(olympic_priv->olympic_laa[5],init_srb+17);
489			memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
490		}
491		writeb(1,init_srb+30);
492
493		spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
494		olympic_priv->srb_queued=1;
495
496		writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
497		spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
498
499		t = jiffies ;
500
501		add_wait_queue(&olympic_priv->srb_wait,&wait) ;
502		set_current_state(TASK_INTERRUPTIBLE) ;
503
504 		while(olympic_priv->srb_queued) {
505			schedule() ;
506        		if(signal_pending(current))	{
507				printk(KERN_WARNING "%s: Signal received in open.\n",
508                			dev->name);
509            			printk(KERN_WARNING "SISR=%x LISR=%x\n",
510                			readl(olympic_mmio+SISR),
511                			readl(olympic_mmio+LISR));
512            			olympic_priv->srb_queued=0;
513            			break;
514        		}
515			if ((jiffies-t) > 10*HZ) {
516				printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
517				olympic_priv->srb_queued=0;
518				break ;
519			}
520			set_current_state(TASK_INTERRUPTIBLE) ;
521    		}
522		remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
523		set_current_state(TASK_RUNNING) ;
524		olympic_priv->srb_queued = 0 ;
525#if OLYMPIC_DEBUG
526		printk("init_srb(%p): ",init_srb);
527		for(i=0;i<20;i++)
528			printk("%02x ",readb(init_srb+i));
529		printk("\n");
530#endif
531
532		/* If we get the same return response as we set, the interrupt wasn't raised and the open
533                 * timed out.
534		 */
535
536		if(readb(init_srb+2)== OLYMPIC_CLEAR_RET_CODE) {
537			printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
538			return -EIO ;
539		}
540
541		if(readb(init_srb+2)!=0) {
542			if (readb(init_srb+2) == 0x07) {
543				if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
544					printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name);
545					open_finished = 0 ;
546				} else {
547
548					strcpy(open_error, open_maj_error[(readb(init_srb+7) & 0xf0) >> 4]) ;
549					strcat(open_error," - ") ;
550					strcat(open_error, open_min_error[(readb(init_srb+7) & 0x0f)]) ;
551
552					if (!olympic_priv->olympic_ring_speed && ((readb(init_srb+7) & 0x0f) == 0x0d)) {
553						printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
554						printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name);
555						free_irq(dev->irq, dev);
556						return -EIO ;
557					}
558
559					printk(KERN_WARNING "%s: %s\n",dev->name,open_error);
560					free_irq(dev->irq,dev) ;
561					return -EIO ;
562
563				}	/* if autosense && open_finished */
564			} else if (init_srb[2] == 0x32) {
565				printk(KERN_WARNING "%s: Invalid LAA: %02x:%02x:%02x:%02x:%02x:%02x\n",
566					dev->name,
567					olympic_priv->olympic_laa[0],
568					olympic_priv->olympic_laa[1],
569					olympic_priv->olympic_laa[2],
570					olympic_priv->olympic_laa[3],
571					olympic_priv->olympic_laa[4],
572					olympic_priv->olympic_laa[5]) ;
573				free_irq(dev->irq,dev) ;
574				return -EIO ;
575			} else {
576				printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name,init_srb[2]);
577				free_irq(dev->irq, dev);
578				return -EIO;
579			}
580		} else
581			open_finished = 1 ;
582	} while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
583
584	if (readb(init_srb+18) & (1<<3))
585		if (olympic_priv->olympic_message_level)
586			printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
587
588	if (readb(init_srb+18) & (1<<1))
589		olympic_priv->olympic_ring_speed = 100 ;
590	else if (readb(init_srb+18) & 1)
591		olympic_priv->olympic_ring_speed = 16 ;
592	else
593		olympic_priv->olympic_ring_speed = 4 ;
594
595	if (olympic_priv->olympic_message_level)
596		printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
597
598	olympic_priv->asb = swab16(readw(init_srb+8));
599	olympic_priv->srb = swab16(readw(init_srb+10));
600	olympic_priv->arb = swab16(readw(init_srb+12));
601	olympic_priv->trb = swab16(readw(init_srb+16));
602
603	olympic_priv->olympic_receive_options = 0x01 ;
604	olympic_priv->olympic_copy_all_options = 0 ;
605
606	/* setup rx ring */
607
608	writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
609
610	writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
611
612	for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
613
614		struct sk_buff *skb;
615
616		skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
617		if(skb == NULL)
618			break;
619
620		skb->dev = dev;
621
622		olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
623							  skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
624		olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
625		olympic_priv->rx_ring_skb[i]=skb;
626	}
627
628	if (i==0) {
629		printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
630		free_irq(dev->irq, dev);
631		return -EIO;
632	}
633
634	olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
635					 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
636	writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
637	writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
638	writew(i, olympic_mmio+RXDESCQCNT);
639
640	olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
641						sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
642	writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
643	writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
644
645 	olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1;	/* last processed rx status */
646	olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
647
648	writew(i, olympic_mmio+RXSTATQCNT);
649
650#if OLYMPIC_DEBUG
651	printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
652	printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
653	printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
654	printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
655	printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7])  );
656
657	printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
658	printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr =
659%08x\n",olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
660#endif
661
662	writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
663
664#if OLYMPIC_DEBUG
665	printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
666	printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
667	printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
668#endif
669
670	writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
671
672	/* setup tx ring */
673
674	writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
675	for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
676		olympic_priv->olympic_tx_ring[i].buffer=0xdeadbeef;
677
678	olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
679	olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
680					 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
681	writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
682	writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
683	writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
684
685	olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
686						sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
687	writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
688	writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
689	writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
690
691	olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
692	olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
693
694	writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
695	writel(0,olympic_mmio+EISR) ;
696	writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
697	writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
698
699#if OLYMPIC_DEBUG
700	printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
701	printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
702#endif
703
704	if (olympic_priv->olympic_network_monitor) {
705		u8 *oat ;
706		u8 *opt ;
707		oat = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
708		opt = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
709
710		printk("%s: Node Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name,
711			readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)),
712			readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1),
713			readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2),
714			readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3),
715			readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4),
716			readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5));
717		printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
718			readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
719			readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
720			readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
721			readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
722		printk("%s: NAUN Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name,
723			readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)),
724			readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1),
725			readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2),
726			readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3),
727			readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4),
728			readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5));
729	}
730
731	netif_start_queue(dev);
732	return 0;
733
734}
735
736/*
737 *	When we enter the rx routine we do not know how many frames have been
738 *	queued on the rx channel.  Therefore we start at the next rx status
739 *	position and travel around the receive ring until we have completed
740 *	all the frames.
741 *
742 *	This means that we may process the frame before we receive the end
743 *	of frame interrupt. This is why we always test the status instead
744 *	of blindly processing the next frame.
745 *
746 *	We also remove the last 4 bytes from the packet as well, these are
747 *	just token ring trailer info and upset protocols that don't check
748 *	their own length, i.e. SNA.
749 *
750 */
751static void olympic_rx(struct net_device *dev)
752{
753	struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
754	u8 *olympic_mmio=olympic_priv->olympic_mmio;
755	struct olympic_rx_status *rx_status;
756	struct olympic_rx_desc *rx_desc ;
757	int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
758	struct sk_buff *skb, *skb2;
759	int i;
760
761	rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
762
763	while (rx_status->status_buffercnt) {
764                u32 l_status_buffercnt;
765
766		olympic_priv->rx_status_last_received++ ;
767		olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
768#if OLYMPIC_DEBUG
769		printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
770#endif
771		length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
772		buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
773		i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
774		frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
775
776#if OLYMPIC_DEBUG
777		printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
778#endif
779                l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
780		if(l_status_buffercnt & 0xC0000000) {
781			if (l_status_buffercnt & 0x3B000000) {
782				if (olympic_priv->olympic_message_level) {
783					if (l_status_buffercnt & (1<<29))  /* Rx Frame Truncated */
784						printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);
785					if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
786						printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);
787					if (l_status_buffercnt & (1<<27)) /* No receive buffers */
788						printk(KERN_WARNING "%s: No receive buffers \n",dev->name);
789					if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
790						printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);
791					if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
792						printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);
793				}
794				olympic_priv->rx_ring_last_received += i ;
795				olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
796				olympic_priv->olympic_stats.rx_errors++;
797			} else {
798
799				if (buffer_cnt == 1) {
800					skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
801				} else {
802					skb = dev_alloc_skb(length) ;
803				}
804
805				if (skb == NULL) {
806					printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
807					olympic_priv->olympic_stats.rx_dropped++ ;
808					/* Update counters even though we don't transfer the frame */
809					olympic_priv->rx_ring_last_received += i ;
810					olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
811				} else  {
812					skb->dev = dev ;
813
814					/* Optimise based upon number of buffers used.
815			   	   	   If only one buffer is used we can simply swap the buffers around.
816			   	   	   If more than one then we must use the new buffer and copy the information
817			   	   	   first. Ideally all frames would be in a single buffer, this can be tuned by
818                               	   	   altering the buffer size. If the length of the packet is less than
819					   1500 bytes we're going to copy it over anyway to stop packets getting
820					   dropped from sockets with buffers smaller than our pkt_buf_sz. */
821
822 					if (buffer_cnt==1) {
823						olympic_priv->rx_ring_last_received++ ;
824						olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
825						rx_ring_last_received = olympic_priv->rx_ring_last_received ;
826						if (length > 1500) {
827							skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
828							/* unmap buffer */
829							pci_unmap_single(olympic_priv->pdev,
830								le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
831								olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
832							skb_put(skb2,length-4);
833							skb2->protocol = tr_type_trans(skb2,dev);
834							olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
835								cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
836								olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
837							olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
838								cpu_to_le32(olympic_priv->pkt_buf_sz);
839							olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
840							netif_rx(skb2) ;
841						} else {
842							pci_dma_sync_single(olympic_priv->pdev,
843								le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
844								olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
845							memcpy(skb_put(skb,length-4),olympic_priv->rx_ring_skb[rx_ring_last_received]->data,length-4) ;
846							skb->protocol = tr_type_trans(skb,dev) ;
847							netif_rx(skb) ;
848						}
849					} else {
850						do { /* Walk the buffers */
851							olympic_priv->rx_ring_last_received++ ;
852							olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
853							rx_ring_last_received = olympic_priv->rx_ring_last_received ;
854							pci_dma_sync_single(olympic_priv->pdev,
855								le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
856								olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
857							rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
858							cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
859							memcpy(skb_put(skb, cpy_length), olympic_priv->rx_ring_skb[rx_ring_last_received]->data, cpy_length) ;
860						} while (--i) ;
861						skb_trim(skb,skb->len-4) ;
862						skb->protocol = tr_type_trans(skb,dev);
863						netif_rx(skb) ;
864					}
865					dev->last_rx = jiffies ;
866					olympic_priv->olympic_stats.rx_packets++ ;
867					olympic_priv->olympic_stats.rx_bytes += length ;
868				} /* if skb == null */
869			} /* If status & 0x3b */
870
871		} else { /*if buffercnt & 0xC */
872			olympic_priv->rx_ring_last_received += i ;
873			olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
874		}
875
876		rx_status->fragmentcnt_framelen = 0 ;
877		rx_status->status_buffercnt = 0 ;
878		rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
879
880		writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) |  buffer_cnt , olympic_mmio+RXENQ);
881	} /* while */
882
883}
884
885static void olympic_freemem(struct net_device *dev)
886{
887	struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
888	int i;
889
890	for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
891		dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
892		if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) {
893			pci_unmap_single(olympic_priv->pdev,
894			le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
895			olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
896		}
897		olympic_priv->rx_status_last_received++;
898		olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
899	}
900	/* unmap rings */
901	pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
902		sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
903	pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
904		sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
905
906	pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
907		sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
908	pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
909		sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
910
911	return ;
912}
913
914static void olympic_interrupt(int irq, void *dev_id, struct pt_regs *regs)
915{
916	struct net_device *dev= (struct net_device *)dev_id;
917	struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
918	u8 *olympic_mmio=olympic_priv->olympic_mmio;
919	u32 sisr;
920	u8 *adapter_check_area ;
921
922	/*
923	 *  Read sisr but don't reset it yet.
924	 *  The indication bit may have been set but the interrupt latch
925	 *  bit may not be set, so we'd lose the interrupt later.
926	 */
927	sisr=readl(olympic_mmio+SISR) ;
928	if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
929		return ;
930	sisr=readl(olympic_mmio+SISR_RR) ;  /* Read & Reset sisr */
931
932	spin_lock(&olympic_priv->olympic_lock);
933
934	/* Hotswap gives us this on removal */
935	if (sisr == 0xffffffff) {
936		printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
937		olympic_freemem(dev) ;
938		free_irq(dev->irq, dev) ;
939		dev->stop = NULL ;
940		spin_unlock(&olympic_priv->olympic_lock) ;
941		return ;
942	}
943
944	if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
945			SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
946
947		/* If we ever get this the adapter is seriously dead. Only a reset is going to
948		 * bring it back to life. We're talking pci bus errors and such like :( */
949		if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
950			printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
951			printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
952			printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
953			printk(KERN_ERR "or the linux-tr mailing list.\n") ;
954			olympic_freemem(dev) ;
955			free_irq(dev->irq, dev) ;
956			dev->stop = NULL ;
957			spin_unlock(&olympic_priv->olympic_lock) ;
958			return ;
959		} /* SISR_ERR */
960
961		if(sisr & SISR_SRB_REPLY) {
962			if(olympic_priv->srb_queued==1) {
963				wake_up_interruptible(&olympic_priv->srb_wait);
964			} else if (olympic_priv->srb_queued==2) {
965				olympic_srb_bh(dev) ;
966			}
967			olympic_priv->srb_queued=0;
968		} /* SISR_SRB_REPLY */
969
970		/* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
971		   we get all tx completions. */
972		if (sisr & SISR_TX1_EOF) {
973			while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
974				olympic_priv->tx_ring_last_status++;
975				olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
976				olympic_priv->free_tx_ring_entries++;
977				olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
978				olympic_priv->olympic_stats.tx_packets++ ;
979				pci_unmap_single(olympic_priv->pdev,
980					le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
981					olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
982				dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
983				olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=0xdeadbeef;
984				olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
985			}
986			netif_wake_queue(dev);
987		} /* SISR_TX1_EOF */
988
989		if (sisr & SISR_RX_STATUS) {
990			olympic_rx(dev);
991		} /* SISR_RX_STATUS */
992
993		if (sisr & SISR_ADAPTER_CHECK) {
994			netif_stop_queue(dev);
995			printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
996			writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
997			adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
998			printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
999			olympic_freemem(dev) ;
1000			free_irq(dev->irq, dev) ;
1001			dev->stop = NULL ;
1002			spin_unlock(&olympic_priv->olympic_lock) ;
1003			return ;
1004		} /* SISR_ADAPTER_CHECK */
1005
1006		if (sisr & SISR_ASB_FREE) {
1007			/* Wake up anything that is waiting for the asb response */
1008			if (olympic_priv->asb_queued) {
1009				olympic_asb_bh(dev) ;
1010			}
1011		} /* SISR_ASB_FREE */
1012
1013		if (sisr & SISR_ARB_CMD) {
1014			olympic_arb_cmd(dev) ;
1015		} /* SISR_ARB_CMD */
1016
1017		if (sisr & SISR_TRB_REPLY) {
1018			/* Wake up anything that is waiting for the trb response */
1019			if (olympic_priv->trb_queued) {
1020				wake_up_interruptible(&olympic_priv->trb_wait);
1021			}
1022			olympic_priv->trb_queued = 0 ;
1023		} /* SISR_TRB_REPLY */
1024
1025		if (sisr & SISR_RX_NOBUF) {
1026			/* According to the documentation, we don't have to do anything, but trapping it keeps it out of
1027                  	   	   /var/log/messages.  */
1028		} /* SISR_RX_NOBUF */
1029	} else {
1030		printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
1031		printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
1032	} /* One if the interrupts we want */
1033	writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
1034
1035	spin_unlock(&olympic_priv->olympic_lock) ;
1036}
1037
1038static int olympic_xmit(struct sk_buff *skb, struct net_device *dev)
1039{
1040	struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1041	u8 *olympic_mmio=olympic_priv->olympic_mmio;
1042	unsigned long flags ;
1043
1044	spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
1045
1046	netif_stop_queue(dev);
1047
1048	if(olympic_priv->free_tx_ring_entries) {
1049		olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
1050			cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
1051		olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
1052		olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
1053		olympic_priv->free_tx_ring_entries--;
1054
1055        	olympic_priv->tx_ring_free++;
1056        	olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
1057		writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
1058		netif_wake_queue(dev);
1059		spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1060		return 0;
1061	} else {
1062		spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1063		return 1;
1064	}
1065
1066}
1067
1068
1069static int olympic_close(struct net_device *dev)
1070{
1071	struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1072    	u8 *olympic_mmio=olympic_priv->olympic_mmio,*srb;
1073	unsigned long t,flags;
1074
1075	DECLARE_WAITQUEUE(wait,current) ;
1076
1077	netif_stop_queue(dev);
1078
1079	writel(olympic_priv->srb,olympic_mmio+LAPA);
1080	srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1081
1082    	writeb(SRB_CLOSE_ADAPTER,srb+0);
1083	writeb(0,srb+1);
1084	writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1085
1086	spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
1087	olympic_priv->srb_queued=1;
1088
1089	writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1090	spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1091
1092	t = jiffies ;
1093
1094	add_wait_queue(&olympic_priv->srb_wait,&wait) ;
1095	set_current_state(TASK_INTERRUPTIBLE) ;
1096
1097	while(olympic_priv->srb_queued) {
1098		schedule() ;
1099        	if(signal_pending(current))	{
1100			printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
1101            		printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
1102            		olympic_priv->srb_queued=0;
1103            		break;
1104        	}
1105		if ((jiffies-t) > 60*HZ) {
1106			printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
1107			olympic_priv->srb_queued=0;
1108			break ;
1109		}
1110		set_current_state(TASK_INTERRUPTIBLE) ;
1111    	}
1112	remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
1113	set_current_state(TASK_RUNNING) ;
1114
1115	olympic_priv->rx_status_last_received++;
1116	olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1117
1118	olympic_freemem(dev) ;
1119
1120	/* reset tx/rx fifo's and busmaster logic */
1121
1122	writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1123	udelay(1);
1124	writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1125
1126#if OLYMPIC_DEBUG
1127	{
1128	int i ;
1129	printk("srb(%p): ",srb);
1130	for(i=0;i<4;i++)
1131		printk("%x ",readb(srb+i));
1132	printk("\n");
1133	}
1134#endif
1135	free_irq(dev->irq,dev);
1136
1137	return 0;
1138
1139}
1140
1141static void olympic_set_rx_mode(struct net_device *dev)
1142{
1143	struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1144   	u8 *olympic_mmio = olympic_priv->olympic_mmio ;
1145	u8 options = 0;
1146	u8 *srb;
1147	struct dev_mc_list *dmi ;
1148	unsigned char dev_mc_address[4] ;
1149	int i ;
1150
1151	writel(olympic_priv->srb,olympic_mmio+LAPA);
1152	srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1153	options = olympic_priv->olympic_copy_all_options;
1154
1155	if (dev->flags&IFF_PROMISC)
1156		options |= 0x61 ;
1157	else
1158		options &= ~0x61 ;
1159
1160	/* Only issue the srb if there is a change in options */
1161
1162	if ((options ^ olympic_priv->olympic_copy_all_options)) {
1163
1164		/* Now to issue the srb command to alter the copy.all.options */
1165
1166		writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
1167		writeb(0,srb+1);
1168		writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1169		writeb(0,srb+3);
1170		writeb(olympic_priv->olympic_receive_options,srb+4);
1171		writeb(options,srb+5);
1172
1173		olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1174
1175		writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1176
1177		olympic_priv->olympic_copy_all_options = options ;
1178
1179		return ;
1180	}
1181
1182	/* Set the functional addresses we need for multicast */
1183
1184	dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1185
1186	for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
1187		dev_mc_address[0] |= dmi->dmi_addr[2] ;
1188		dev_mc_address[1] |= dmi->dmi_addr[3] ;
1189		dev_mc_address[2] |= dmi->dmi_addr[4] ;
1190		dev_mc_address[3] |= dmi->dmi_addr[5] ;
1191	}
1192
1193	writeb(SRB_SET_FUNC_ADDRESS,srb+0);
1194	writeb(0,srb+1);
1195	writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1196	writeb(0,srb+3);
1197	writeb(0,srb+4);
1198	writeb(0,srb+5);
1199	writeb(dev_mc_address[0],srb+6);
1200	writeb(dev_mc_address[1],srb+7);
1201	writeb(dev_mc_address[2],srb+8);
1202	writeb(dev_mc_address[3],srb+9);
1203
1204	olympic_priv->srb_queued = 2 ;
1205	writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1206
1207}
1208
1209static void olympic_srb_bh(struct net_device *dev)
1210{
1211	struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1212   	u8 *olympic_mmio = olympic_priv->olympic_mmio ;
1213	u8 *srb;
1214
1215	writel(olympic_priv->srb,olympic_mmio+LAPA);
1216	srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1217
1218	switch (readb(srb)) {
1219
1220		/* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
1221                 * At some point we should do something if we get an error, such as
1222                 * resetting the IFF_PROMISC flag in dev
1223		 */
1224
1225		case SRB_MODIFY_RECEIVE_OPTIONS:
1226			switch (readb(srb+2)) {
1227				case 0x01:
1228					printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
1229					break ;
1230				case 0x04:
1231					printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1232					break ;
1233				default:
1234					if (olympic_priv->olympic_message_level)
1235						printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
1236					break ;
1237			} /* switch srb[2] */
1238			break ;
1239
1240		/* SRB_SET_GROUP_ADDRESS - Multicast group setting
1241                 */
1242
1243		case SRB_SET_GROUP_ADDRESS:
1244			switch (readb(srb+2)) {
1245				case 0x00:
1246					break ;
1247				case 0x01:
1248					printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1249					break ;
1250				case 0x04:
1251					printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1252					break ;
1253				case 0x3c:
1254					printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
1255					break ;
1256				case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
1257					printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
1258					break ;
1259				case 0x55:
1260					printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
1261					break ;
1262				default:
1263					break ;
1264			} /* switch srb[2] */
1265			break ;
1266
1267		/* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
1268 		 */
1269
1270		case SRB_RESET_GROUP_ADDRESS:
1271			switch (readb(srb+2)) {
1272				case 0x00:
1273					break ;
1274				case 0x01:
1275					printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1276					break ;
1277				case 0x04:
1278					printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1279					break ;
1280				case 0x39: /* Must deal with this if individual multicast addresses used */
1281					printk(KERN_INFO "%s: Group address not found \n",dev->name);
1282					break ;
1283				default:
1284					break ;
1285			} /* switch srb[2] */
1286			break ;
1287
1288
1289		/* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
1290		 */
1291
1292		case SRB_SET_FUNC_ADDRESS:
1293			switch (readb(srb+2)) {
1294				case 0x00:
1295					if (olympic_priv->olympic_message_level)
1296						printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ;
1297					break ;
1298				case 0x01:
1299					printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1300					break ;
1301				case 0x04:
1302					printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1303					break ;
1304				default:
1305					break ;
1306			} /* switch srb[2] */
1307			break ;
1308
1309		/* SRB_READ_LOG - Read and reset the adapter error counters
1310 		 */
1311
1312		case SRB_READ_LOG:
1313			switch (readb(srb+2)) {
1314				case 0x00:
1315					if (olympic_priv->olympic_message_level)
1316						printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
1317					break ;
1318				case 0x01:
1319					printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1320					break ;
1321				case 0x04:
1322					printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1323					break ;
1324
1325			} /* switch srb[2] */
1326			break ;
1327
1328		/* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
1329
1330		case SRB_READ_SR_COUNTERS:
1331			switch (readb(srb+2)) {
1332				case 0x00:
1333					if (olympic_priv->olympic_message_level)
1334						printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
1335					break ;
1336				case 0x01:
1337					printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1338					break ;
1339				case 0x04:
1340					printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1341					break ;
1342				default:
1343					break ;
1344			} /* switch srb[2] */
1345			break ;
1346
1347		default:
1348			printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
1349			break ;
1350	} /* switch srb[0] */
1351
1352}
1353
1354static struct net_device_stats * olympic_get_stats(struct net_device *dev)
1355{
1356	struct olympic_private *olympic_priv ;
1357	olympic_priv=(struct olympic_private *) dev->priv;
1358	return (struct net_device_stats *) &olympic_priv->olympic_stats;
1359}
1360
1361static int olympic_set_mac_address (struct net_device *dev, void *addr)
1362{
1363	struct sockaddr *saddr = addr ;
1364	struct olympic_private *olympic_priv = (struct olympic_private *)dev->priv ;
1365
1366	if (netif_running(dev)) {
1367		printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1368		return -EIO ;
1369	}
1370
1371	memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
1372
1373	if (olympic_priv->olympic_message_level) {
1374 		printk(KERN_INFO "%s: MAC/LAA Set to  = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
1375		olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
1376		olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
1377		olympic_priv->olympic_laa[5]);
1378	}
1379
1380	return 0 ;
1381}
1382
1383static void olympic_arb_cmd(struct net_device *dev)
1384{
1385	struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
1386    	u8 *olympic_mmio=olympic_priv->olympic_mmio;
1387	u8 *arb_block, *asb_block, *srb  ;
1388	u8 header_len ;
1389	u16 frame_len, buffer_len ;
1390	struct sk_buff *mac_frame ;
1391	u8 *buf_ptr ;
1392	u8 *frame_data ;
1393	u16 buff_off ;
1394	u16 lan_status = 0, lan_status_diff  ; /* Initialize to stop compiler warning */
1395	u8 fdx_prot_error ;
1396	u16 next_ptr;
1397	int i ;
1398
1399	arb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->arb) ;
1400	asb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->asb) ;
1401	srb = (u8 *)(olympic_priv->olympic_lap + olympic_priv->srb) ;
1402
1403	if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
1404
1405		header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
1406		frame_len = swab16(readw(arb_block + 10)) ;
1407
1408		buff_off = swab16(readw(arb_block + 6)) ;
1409
1410		buf_ptr = olympic_priv->olympic_lap + buff_off ;
1411
1412#if OLYMPIC_DEBUG
1413{
1414		int i;
1415		frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1416
1417		for (i=0 ;  i < 14 ; i++) {
1418			printk("Loc %d = %02x\n",i,readb(frame_data + i));
1419		}
1420
1421		printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1422}
1423#endif
1424		mac_frame = dev_alloc_skb(frame_len) ;
1425		if (!mac_frame) {
1426			printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
1427			goto drop_frame;
1428		}
1429
1430		/* Walk the buffer chain, creating the frame */
1431
1432		do {
1433			frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1434			buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1435			memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
1436			next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
1437		} while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + ntohs(next_ptr)));
1438
1439		if (olympic_priv->olympic_network_monitor) {
1440			struct trh_hdr *mac_hdr ;
1441			printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ;
1442			mac_hdr = (struct trh_hdr *)mac_frame->data ;
1443			printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ;
1444			printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ;
1445		}
1446		mac_frame->dev = dev ;
1447		mac_frame->protocol = tr_type_trans(mac_frame,dev);
1448		netif_rx(mac_frame) ;
1449		dev->last_rx = jiffies;
1450
1451drop_frame:
1452		/* Now tell the card we have dealt with the received frame */
1453
1454		/* Set LISR Bit 1 */
1455		writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
1456
1457		/* Is the ASB free ? */
1458
1459		if (readb(asb_block + 2) != 0xff) {
1460			olympic_priv->asb_queued = 1 ;
1461			writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1462			return ;
1463			/* Drop out and wait for the bottom half to be run */
1464		}
1465
1466		writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1467		writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1468		writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1469		writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1470
1471		writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1472
1473		olympic_priv->asb_queued = 2 ;
1474
1475		return ;
1476
1477	} else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
1478		lan_status = swab16(readw(arb_block+6));
1479		fdx_prot_error = readb(arb_block+8) ;
1480
1481		/* Issue ARB Free */
1482		writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
1483
1484		lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
1485
1486		if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1487			if (lan_status_diff & LSC_LWF)
1488					printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1489			if (lan_status_diff & LSC_ARW)
1490					printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1491			if (lan_status_diff & LSC_FPE)
1492					printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1493			if (lan_status_diff & LSC_RR)
1494					printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1495
1496			/* Adapter has been closed by the hardware */
1497
1498			/* reset tx/rx fifo's and busmaster logic */
1499
1500			writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1501			udelay(1);
1502			writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1503			netif_stop_queue(dev);
1504			olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
1505			for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
1506				dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
1507				if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) {
1508					pci_unmap_single(olympic_priv->pdev,
1509						le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
1510						olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
1511				}
1512				olympic_priv->rx_status_last_received++;
1513				olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1514			}
1515			/* unmap rings */
1516			pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
1517				sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
1518			pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
1519				sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
1520
1521			pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
1522				sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
1523			pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
1524				sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
1525
1526			free_irq(dev->irq,dev);
1527			dev->stop=NULL;
1528			printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
1529		} /* If serious error */
1530
1531		if (olympic_priv->olympic_message_level) {
1532			if (lan_status_diff & LSC_SIG_LOSS)
1533					printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
1534			if (lan_status_diff & LSC_HARD_ERR)
1535					printk(KERN_INFO "%s: Beaconing \n",dev->name);
1536			if (lan_status_diff & LSC_SOFT_ERR)
1537					printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
1538			if (lan_status_diff & LSC_TRAN_BCN)
1539					printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
1540			if (lan_status_diff & LSC_SS)
1541					printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
1542			if (lan_status_diff & LSC_RING_REC)
1543					printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1544			if (lan_status_diff & LSC_FDX_MODE)
1545					printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1546		}
1547
1548		if (lan_status_diff & LSC_CO) {
1549
1550				if (olympic_priv->olympic_message_level)
1551					printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
1552
1553				/* Issue READ.LOG command */
1554
1555				writeb(SRB_READ_LOG, srb);
1556				writeb(0,srb+1);
1557				writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1558				writeb(0,srb+3);
1559				writeb(0,srb+4);
1560				writeb(0,srb+5);
1561
1562				olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1563
1564				writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1565
1566		}
1567
1568		if (lan_status_diff & LSC_SR_CO) {
1569
1570				if (olympic_priv->olympic_message_level)
1571					printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1572
1573				/* Issue a READ.SR.COUNTERS */
1574
1575				writeb(SRB_READ_SR_COUNTERS,srb);
1576				writeb(0,srb+1);
1577				writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1578				writeb(0,srb+3);
1579
1580				olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1581
1582				writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1583
1584		}
1585
1586		olympic_priv->olympic_lan_status = lan_status ;
1587
1588	}  /* Lan.change.status */
1589	else
1590		printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
1591}
1592
1593static void olympic_asb_bh(struct net_device *dev)
1594{
1595	struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1596	u8 *arb_block, *asb_block ;
1597
1598	arb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->arb) ;
1599	asb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->asb) ;
1600
1601	if (olympic_priv->asb_queued == 1) {   /* Dropped through the first time */
1602
1603		writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1604		writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1605		writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1606		writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1607
1608		writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1609		olympic_priv->asb_queued = 2 ;
1610
1611		return ;
1612	}
1613
1614	if (olympic_priv->asb_queued == 2) {
1615		switch (readb(asb_block+2)) {
1616			case 0x01:
1617				printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
1618				break ;
1619			case 0x26:
1620				printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
1621				break ;
1622			case 0xFF:
1623				/* Valid response, everything should be ok again */
1624				break ;
1625			default:
1626				printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
1627				break ;
1628		}
1629	}
1630	olympic_priv->asb_queued = 0 ;
1631}
1632
1633static int olympic_change_mtu(struct net_device *dev, int mtu)
1634{
1635	struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
1636	u16 max_mtu ;
1637
1638	if (olympic_priv->olympic_ring_speed == 4)
1639		max_mtu = 4500 ;
1640	else
1641		max_mtu = 18000 ;
1642
1643	if (mtu > max_mtu)
1644		return -EINVAL ;
1645	if (mtu < 100)
1646		return -EINVAL ;
1647
1648	dev->mtu = mtu ;
1649	olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
1650
1651	return 0 ;
1652}
1653
1654static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
1655{
1656	struct net_device *dev = (struct net_device *)data ;
1657	struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1658	u8 *oat = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
1659	u8 *opt = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
1660	int size = 0 ;
1661	int len=0;
1662	off_t begin=0;
1663	off_t pos=0;
1664
1665	size = sprintf(buffer,
1666		"IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
1667	size += sprintf(buffer+size, "\n%6s: Adapter Address   : Node Address      : Functional Addr\n",
1668 	   dev->name);
1669
1670	size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x\n",
1671	   dev->name,
1672           dev->dev_addr[0],
1673	   dev->dev_addr[1],
1674	   dev->dev_addr[2],
1675 	   dev->dev_addr[3],
1676	   dev->dev_addr[4],
1677	   dev->dev_addr[5],
1678	   readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)),
1679	   readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1),
1680	   readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2),
1681	   readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3),
1682	   readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4),
1683	   readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5),
1684	   readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
1685	   readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
1686	   readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
1687	   readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
1688
1689	size += sprintf(buffer+size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1690
1691	size += sprintf(buffer+size, "%6s: Physical Addr : Up Node Address   : Poll Address      : AccPri : Auth Src : Att Code :\n",
1692	  dev->name) ;
1693
1694	size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x   : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %04x   : %04x     :  %04x    :\n",
1695	  dev->name,
1696	  readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
1697	  readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
1698	  readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
1699	  readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
1700	  readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)),
1701	  readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1),
1702	  readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2),
1703	  readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3),
1704	  readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4),
1705	  readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5),
1706	  readb(opt+offsetof(struct olympic_parameters_table, poll_addr)),
1707	  readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+1),
1708	  readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+2),
1709	  readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+3),
1710	  readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+4),
1711	  readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+5),
1712	  swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
1713	  swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
1714	  swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
1715
1716	size += sprintf(buffer+size, "%6s: Source Address    : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
1717	  dev->name) ;
1718
1719	size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %04x  : %04x   : %04x   : %04x   : %04x    :     %04x     : \n",
1720	  dev->name,
1721	  readb(opt+offsetof(struct olympic_parameters_table, source_addr)),
1722	  readb(opt+offsetof(struct olympic_parameters_table, source_addr)+1),
1723	  readb(opt+offsetof(struct olympic_parameters_table, source_addr)+2),
1724	  readb(opt+offsetof(struct olympic_parameters_table, source_addr)+3),
1725	  readb(opt+offsetof(struct olympic_parameters_table, source_addr)+4),
1726	  readb(opt+offsetof(struct olympic_parameters_table, source_addr)+5),
1727	  swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
1728	  swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
1729	  swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
1730	  swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
1731	  swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
1732	  swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
1733
1734	size += sprintf(buffer+size, "%6s: Beacon Details :  Tx  :  Rx  : NAUN Node Address : NAUN Node Phys : \n",
1735	  dev->name) ;
1736
1737	size += sprintf(buffer+size, "%6s:                :  %02x  :  %02x  : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x    : \n",
1738	  dev->name,
1739	  swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
1740	  swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
1741	  readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)),
1742	  readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+1),
1743	  readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+2),
1744	  readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+3),
1745	  readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+4),
1746	  readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+5),
1747	  readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
1748	  readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
1749	  readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
1750	  readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
1751
1752	len=size;
1753	pos=begin+size;
1754	if (pos<offset) {
1755		len=0;
1756		begin=pos;
1757	}
1758	*start=buffer+(offset-begin);	/* Start of wanted data */
1759	len-=(offset-begin);		/* Start slop */
1760	if(len>length)
1761		len=length;		/* Ending slop */
1762	return len;
1763}
1764
1765static void __devexit olympic_remove_one(struct pci_dev *pdev)
1766{
1767	struct net_device *dev = pci_get_drvdata(pdev) ;
1768	struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1769
1770	if (olympic_priv->olympic_network_monitor) {
1771		char proc_name[20] ;
1772		strcpy(proc_name,"net/olympic_") ;
1773		strcat(proc_name,dev->name) ;
1774		remove_proc_entry(proc_name,NULL);
1775	}
1776	unregister_trdev(dev) ;
1777	iounmap(olympic_priv->olympic_mmio) ;
1778	iounmap(olympic_priv->olympic_lap) ;
1779	pci_release_regions(pdev) ;
1780	pci_set_drvdata(pdev,NULL) ;
1781	kfree(dev) ;
1782}
1783
1784static struct pci_driver olympic_driver = {
1785	name:		"olympic",
1786	id_table:	olympic_pci_tbl,
1787	probe:		olympic_probe,
1788	remove:		__devexit_p(olympic_remove_one),
1789};
1790
1791static int __init olympic_pci_init(void)
1792{
1793	return pci_module_init (&olympic_driver) ;
1794}
1795
1796static void __exit olympic_pci_cleanup(void)
1797{
1798	return pci_unregister_driver(&olympic_driver) ;
1799}
1800
1801
1802module_init(olympic_pci_init) ;
1803module_exit(olympic_pci_cleanup) ;
1804
1805MODULE_LICENSE("GPL");
1806