1/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2   munged into HPPA boxen .
3
4   This driver is based upon 82596.c, original credits are below...
5   but there were too many hoops which HP wants jumped through to
6   keep this code in there in a sane manner.
7
8   3 primary sources of the mess --
9   1) hppa needs *lots* of cacheline flushing to keep this kind of
10   MMIO running.
11
12   2) The 82596 needs to see all of its pointers as their physical
13   address.  Thus virt_to_bus/bus_to_virt are *everywhere*.
14
15   3) The implementation HP is using seems to be significantly pickier
16   about when and how the command and RX units are started.  some
17   command ordering was changed.
18
19   Examination of the mach driver leads one to believe that there
20   might be a saner way to pull this off...  anyone who feels like a
21   full rewrite can be my guest.
22
23   Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
24
25   02/01/2000  Initial modifications for parisc by Helge Deller (deller@gmx.de)
26   03/02/2000  changes for better/correct(?) cache-flushing (deller)
27*/
28
29/* 82596.c: A generic 82596 ethernet driver for linux. */
30/*
31   Based on Apricot.c
32   Written 1994 by Mark Evans.
33   This driver is for the Apricot 82596 bus-master interface
34
35   Modularised 12/94 Mark Evans
36
37
38   Modified to support the 82596 ethernet chips on 680x0 VME boards.
39   by Richard Hirst <richard@sleepie.demon.co.uk>
40   Renamed to be 82596.c
41
42   980825:  Changed to receive directly in to sk_buffs which are
43   allocated at open() time.  Eliminates copy on incoming frames
44   (small ones are still copied).  Shared data now held in a
45   non-cached page, so we can run on 68060 in copyback mode.
46
47   TBD:
48   * look at deferring rx frames rather than discarding (as per tulip)
49   * handle tx ring full as per tulip
50   * performace test to tune rx_copybreak
51
52   Most of my modifications relate to the braindead big-endian
53   implementation by Intel.  When the i596 is operating in
54   'big-endian' mode, it thinks a 32 bit value of 0x12345678
55   should be stored as 0x56781234.  This is a real pain, when
56   you have linked lists which are shared by the 680x0 and the
57   i596.
58
59   Driver skeleton
60   Written 1993 by Donald Becker.
61   Copyright 1993 United States Government as represented by the Director,
62   National Security Agency. This software may only be used and distributed
63   according to the terms of the GNU General Public License as modified by SRC,
64   incorporated herein by reference.
65
66   The author may be reached as becker@scyld.com, or C/O
67   Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
68
69 */
70
71#include <linux/module.h>
72
73#include <linux/kernel.h>
74#include <linux/sched.h>
75#include <linux/string.h>
76#include <linux/ptrace.h>
77#include <linux/errno.h>
78#include <linux/ioport.h>
79#include <linux/slab.h>
80#include <linux/interrupt.h>
81#include <linux/delay.h>
82#include <linux/netdevice.h>
83#include <linux/etherdevice.h>
84#include <linux/skbuff.h>
85#include <linux/init.h>
86#include <linux/pci.h>
87#include <linux/types.h>
88
89#include <asm/bitops.h>
90#include <asm/io.h>
91#include <asm/pgtable.h>
92#include <asm/pgalloc.h>
93#include <asm/irq.h>
94
95#include <asm/pdc.h>
96#include <asm/gsc.h>
97#include <asm/cache.h>
98
99static char version[] __devinitdata =
100	"82596.c $Revision: 1.1.1.1 $\n";
101
102/* DEBUG flags
103 */
104
105#define DEB_INIT	0x0001
106#define DEB_PROBE	0x0002
107#define DEB_SERIOUS	0x0004
108#define DEB_ERRORS	0x0008
109#define DEB_MULTI	0x0010
110#define DEB_TDR		0x0020
111#define DEB_OPEN	0x0040
112#define DEB_RESET	0x0080
113#define DEB_ADDCMD	0x0100
114#define DEB_STATUS	0x0200
115#define DEB_STARTTX	0x0400
116#define DEB_RXADDR	0x0800
117#define DEB_TXADDR	0x1000
118#define DEB_RXFRAME	0x2000
119#define DEB_INTS	0x4000
120#define DEB_STRUCT	0x8000
121#define DEB_ANY		0xffff
122
123
124#define DEB(x,y)	if (i596_debug & (x)) { y; }
125
126
127#define  CHECK_WBACK(addr,len) \
128	do { if (!dma_consistent) dma_cache_wback((unsigned long)addr,len); } while (0)
129
130#define  CHECK_INV(addr,len) \
131	do { if (!dma_consistent) dma_cache_inv((unsigned long)addr,len); } while(0)
132
133#define  CHECK_WBACK_INV(addr,len) \
134	do { if (!dma_consistent) dma_cache_wback_inv((unsigned long)addr,len); } while (0)
135
136
137#define PA_I82596_RESET		0	/* Offsets relative to LASI-LAN-Addr.*/
138#define PA_CPU_PORT_L_ACCESS	4
139#define PA_CHANNEL_ATTENTION	8
140
141
142/*
143 * Define various macros for Channel Attention, word swapping etc., dependent
144 * on architecture.  MVME and BVME are 680x0 based, otherwise it is Intel.
145 */
146
147#ifdef __BIG_ENDIAN
148#define WSWAPrfd(x)  (((u32)(x)<<16) | ((((u32)(x)))>>16))
149#define WSWAPrbd(x)  (((u32)(x)<<16) | ((((u32)(x)))>>16))
150#define WSWAPiscp(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
151#define WSWAPscb(x)  (((u32)(x)<<16) | ((((u32)(x)))>>16))
152#define WSWAPcmd(x)  (((u32)(x)<<16) | ((((u32)(x)))>>16))
153#define WSWAPtbd(x)  (((u32)(x)<<16) | ((((u32)(x)))>>16))
154#define WSWAPchar(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
155#define ISCP_BUSY	0x00010000
156#define MACH_IS_APRICOT	0
157#else
158#define WSWAPrfd(x)     ((struct i596_rfd *)(x))
159#define WSWAPrbd(x)     ((struct i596_rbd *)(x))
160#define WSWAPiscp(x)    ((struct i596_iscp *)(x))
161#define WSWAPscb(x)     ((struct i596_scb *)(x))
162#define WSWAPcmd(x)     ((struct i596_cmd *)(x))
163#define WSWAPtbd(x)     ((struct i596_tbd *)(x))
164#define WSWAPchar(x)    ((char *)(x))
165#define ISCP_BUSY	0x0001
166#define MACH_IS_APRICOT	1
167#endif
168
169/*
170 * The MPU_PORT command allows direct access to the 82596. With PORT access
171 * the following commands are available (p5-18). The 32-bit port command
172 * must be word-swapped with the most significant word written first.
173 * This only applies to VME boards.
174 */
175#define PORT_RESET		0x00	/* reset 82596 */
176#define PORT_SELFTEST		0x01	/* selftest */
177#define PORT_ALTSCP		0x02	/* alternate SCB address */
178#define PORT_ALTDUMP		0x03	/* Alternate DUMP address */
179
180static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
181
182MODULE_AUTHOR("Richard Hirst");
183MODULE_DESCRIPTION("i82596 driver");
184MODULE_LICENSE("GPL");
185MODULE_PARM(i596_debug, "i");
186MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask");
187EXPORT_NO_SYMBOLS;
188
189/* Copy frames shorter than rx_copybreak, otherwise pass on up in
190 * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
191 */
192static int rx_copybreak = 100;
193
194#define MAX_DRIVERS	4	/* max count of drivers */
195
196#define PKT_BUF_SZ	1536
197#define MAX_MC_CNT	64
198
199#define I596_NULL ((u32)0xffffffff)
200
201#define CMD_EOL		0x8000	/* The last command of the list, stop. */
202#define CMD_SUSP	0x4000	/* Suspend after doing cmd. */
203#define CMD_INTR	0x2000	/* Interrupt after doing cmd. */
204
205#define CMD_FLEX	0x0008	/* Enable flexible memory model */
206
207enum commands {
208	CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
209	CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
210};
211
212#define STAT_C		0x8000	/* Set to 0 after execution */
213#define STAT_B		0x4000	/* Command being executed */
214#define STAT_OK		0x2000	/* Command executed ok */
215#define STAT_A		0x1000	/* Command aborted */
216
217#define	 CUC_START	0x0100
218#define	 CUC_RESUME	0x0200
219#define	 CUC_SUSPEND    0x0300
220#define	 CUC_ABORT	0x0400
221#define	 RX_START	0x0010
222#define	 RX_RESUME	0x0020
223#define	 RX_SUSPEND	0x0030
224#define	 RX_ABORT	0x0040
225
226#define TX_TIMEOUT	5
227
228#define OPT_SWAP_PORT	0x0001	/* Need to wordswp on the MPU port */
229
230
231struct i596_reg {
232	unsigned short porthi;
233	unsigned short portlo;
234	u32            ca;
235};
236
237#define EOF		0x8000
238#define SIZE_MASK	0x3fff
239
240struct i596_tbd {
241	unsigned short size;
242	unsigned short pad;
243	dma_addr_t     next;
244	dma_addr_t     data;
245	u32 cache_pad[5];		/* Total 32 bytes... */
246};
247
248/* The command structure has two 'next' pointers; v_next is the address of
249 * the next command as seen by the CPU, b_next is the address of the next
250 * command as seen by the 82596.  The b_next pointer, as used by the 82596
251 * always references the status field of the next command, rather than the
252 * v_next field, because the 82596 is unaware of v_next.  It may seem more
253 * logical to put v_next at the end of the structure, but we cannot do that
254 * because the 82596 expects other fields to be there, depending on command
255 * type.
256 */
257
258struct i596_cmd {
259	struct i596_cmd *v_next;	/* Address from CPUs viewpoint */
260	unsigned short status;
261	unsigned short command;
262	dma_addr_t     b_next;	/* Address from i596 viewpoint */
263};
264
265struct tx_cmd {
266	struct i596_cmd cmd;
267	dma_addr_t     tbd;
268	unsigned short size;
269	unsigned short pad;
270	struct sk_buff *skb;		/* So we can free it after tx */
271	dma_addr_t dma_addr;
272#ifdef __LP64__
273	u32 cache_pad[6];		/* Total 64 bytes... */
274#else
275	u32 cache_pad[1];		/* Total 32 bytes... */
276#endif
277};
278
279struct tdr_cmd {
280	struct i596_cmd cmd;
281	unsigned short status;
282	unsigned short pad;
283};
284
285struct mc_cmd {
286	struct i596_cmd cmd;
287	short mc_cnt;
288	char mc_addrs[MAX_MC_CNT*6];
289};
290
291struct sa_cmd {
292	struct i596_cmd cmd;
293	char eth_addr[8];
294};
295
296struct cf_cmd {
297	struct i596_cmd cmd;
298	char i596_config[16];
299};
300
301struct i596_rfd {
302	unsigned short stat;
303	unsigned short cmd;
304	dma_addr_t     b_next;	/* Address from i596 viewpoint */
305	dma_addr_t     rbd;
306	unsigned short count;
307	unsigned short size;
308	struct i596_rfd *v_next;	/* Address from CPUs viewpoint */
309	struct i596_rfd *v_prev;
310#ifndef __LP64__
311	u32 cache_pad[2];		/* Total 32 bytes... */
312#endif
313};
314
315struct i596_rbd {
316    /* hardware data */
317    unsigned short count;
318    unsigned short zero1;
319    dma_addr_t     b_next;
320    dma_addr_t     b_data;		/* Address from i596 viewpoint */
321    unsigned short size;
322    unsigned short zero2;
323    /* driver data */
324    struct sk_buff *skb;
325    struct i596_rbd *v_next;
326    dma_addr_t     b_addr;		/* This rbd addr from i596 view */
327    unsigned char *v_data;		/* Address from CPUs viewpoint */
328					/* Total 32 bytes... */
329#ifdef __LP64__
330    u32 cache_pad[4];
331#endif
332};
333
334/* These values as chosen so struct i596_private fits in one page... */
335
336#define TX_RING_SIZE 32
337#define RX_RING_SIZE 16
338
339struct i596_scb {
340	unsigned short status;
341	unsigned short command;
342	dma_addr_t    cmd;
343	dma_addr_t    rfd;
344	u32           crc_err;
345	u32           align_err;
346	u32           resource_err;
347	u32           over_err;
348	u32           rcvdt_err;
349	u32           short_err;
350	unsigned short t_on;
351	unsigned short t_off;
352};
353
354struct i596_iscp {
355	u32           stat;
356	dma_addr_t    scb;
357};
358
359struct i596_scp {
360	u32           sysbus;
361	u32            pad;
362	dma_addr_t    iscp;
363};
364
365struct i596_private {
366	volatile struct i596_scp scp		__attribute__((aligned(32)));
367	volatile struct i596_iscp iscp		__attribute__((aligned(32)));
368	volatile struct i596_scb scb		__attribute__((aligned(32)));
369	struct sa_cmd sa_cmd			__attribute__((aligned(32)));
370	struct cf_cmd cf_cmd			__attribute__((aligned(32)));
371	struct tdr_cmd tdr_cmd			__attribute__((aligned(32)));
372	struct mc_cmd mc_cmd			__attribute__((aligned(32)));
373	struct i596_rfd rfds[RX_RING_SIZE]	__attribute__((aligned(32)));
374	struct i596_rbd rbds[RX_RING_SIZE]	__attribute__((aligned(32)));
375	struct tx_cmd tx_cmds[TX_RING_SIZE]	__attribute__((aligned(32)));
376	struct i596_tbd tbds[TX_RING_SIZE]	__attribute__((aligned(32)));
377	u32    stat;
378	int last_restart;
379	struct i596_rfd *rfd_head;
380	struct i596_rbd *rbd_head;
381	struct i596_cmd *cmd_tail;
382	struct i596_cmd *cmd_head;
383	int cmd_backlog;
384	u32    last_cmd;
385	struct net_device_stats stats;
386	int next_tx_cmd;
387	int options;
388	spinlock_t lock;
389	dma_addr_t dma_addr;
390};
391
392static char init_setup[] =
393{
394	0x8E,			/* length, prefetch on */
395	0xC8,			/* fifo to 8, monitor off */
396	0x80,			/* don't save bad frames */
397	0x2E,			/* No source address insertion, 8 byte preamble */
398	0x00,			/* priority and backoff defaults */
399	0x60,			/* interframe spacing */
400	0x00,			/* slot time LSB */
401	0xf2,			/* slot time and retries */
402	0x00,			/* promiscuous mode */
403	0x00,			/* collision detect */
404	0x40,			/* minimum frame length */
405	0xff,
406	0x00,
407	0x7f /*  *multi IA */ };
408
409static struct pci_dev *fake_pci_dev; /* The fake pci_dev needed for
410					pci_* functions under ccio. */
411static int dma_consistent = 1;	/* Zero if pci_alloc_consistent() fails */
412
413static int i596_open(struct net_device *dev);
414static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
415static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
416static int i596_close(struct net_device *dev);
417static struct net_device_stats *i596_get_stats(struct net_device *dev);
418static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
419static void i596_tx_timeout (struct net_device *dev);
420static void print_eth(unsigned char *buf, char *str);
421static void set_multicast_list(struct net_device *dev);
422
423static int rx_ring_size = RX_RING_SIZE;
424static int ticks_limit = 100;
425static int max_cmd_backlog = TX_RING_SIZE-1;
426
427
428static inline void CA(struct net_device *dev)
429{
430	gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION);
431}
432
433
434static inline void MPU_PORT(struct net_device *dev, int c, dma_addr_t x)
435{
436	struct i596_private *lp = (struct i596_private *) dev->priv;
437
438	u32 v = (u32) (c) | (u32) (x);
439	u16 a, b;
440
441	if (lp->options & OPT_SWAP_PORT) {
442		a = v >> 16;
443		b = v & 0xffff;
444	} else {
445		a = v & 0xffff;
446		b = v >> 16;
447	}
448
449	gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS);
450	udelay(1);
451	gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS);
452}
453
454
455static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
456{
457	CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
458	while (--delcnt && lp->iscp.stat) {
459		udelay(10);
460		CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
461	}
462	if (!delcnt) {
463		printk("%s: %s, iscp.stat %04x, didn't clear\n",
464		     dev->name, str, lp->iscp.stat);
465		return -1;
466	}
467	else
468		return 0;
469}
470
471
472static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
473{
474	CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
475	while (--delcnt && lp->scb.command) {
476		udelay(10);
477		CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
478	}
479	if (!delcnt) {
480		printk("%s: %s, status %4.4x, cmd %4.4x.\n",
481		     dev->name, str, lp->scb.status, lp->scb.command);
482		return -1;
483	}
484	else
485		return 0;
486}
487
488
489static void i596_display_data(struct net_device *dev)
490{
491	struct i596_private *lp = (struct i596_private *) dev->priv;
492	struct i596_cmd *cmd;
493	struct i596_rfd *rfd;
494	struct i596_rbd *rbd;
495
496	printk("lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
497	       &lp->scp, lp->scp.sysbus, lp->scp.iscp);
498	printk("iscp at %p, iscp.stat = %08x, .scb = %08x\n",
499	       &lp->iscp, lp->iscp.stat, lp->iscp.scb);
500	printk("scb at %p, scb.status = %04x, .command = %04x,"
501		" .cmd = %08x, .rfd = %08x\n",
502	       &lp->scb, lp->scb.status, lp->scb.command,
503		lp->scb.cmd, lp->scb.rfd);
504	printk("   errors: crc %x, align %x, resource %x,"
505               " over %x, rcvdt %x, short %x\n",
506		lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
507		lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
508	cmd = lp->cmd_head;
509	while (cmd != NULL) {
510		printk("cmd at %p, .status = %04x, .command = %04x, .b_next = %08x\n",
511		  cmd, cmd->status, cmd->command, cmd->b_next);
512		cmd = cmd->v_next;
513	}
514	rfd = lp->rfd_head;
515	printk("rfd_head = %p\n", rfd);
516	do {
517		printk ("   %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
518                        " count %04x\n",
519			rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
520			rfd->count);
521		rfd = rfd->v_next;
522	} while (rfd != lp->rfd_head);
523	rbd = lp->rbd_head;
524	printk("rbd_head = %p\n", rbd);
525	do {
526		printk("   %p .count %04x, b_next %08x, b_data %08x, size %04x\n",
527			rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
528		rbd = rbd->v_next;
529	} while (rbd != lp->rbd_head);
530	CHECK_INV(lp, sizeof(struct i596_private));
531}
532
533
534#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
535static void i596_error(int irq, void *dev_id, struct pt_regs *regs)
536{
537	struct net_device *dev = dev_id;
538	volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
539
540	pcc2[0x28] = 1;
541	pcc2[0x2b] = 0x1d;
542	printk("%s: Error interrupt\n", dev->name);
543	i596_display_data(dev);
544}
545#endif
546
547#define virt_to_dma(lp,v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)(lp)))
548
549static inline void init_rx_bufs(struct net_device *dev)
550{
551	struct i596_private *lp = (struct i596_private *)dev->priv;
552	int i;
553	struct i596_rfd *rfd;
554	struct i596_rbd *rbd;
555
556	/* First build the Receive Buffer Descriptor List */
557
558	for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
559		dma_addr_t dma_addr;
560		struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ + 4);
561
562		if (skb == NULL)
563			panic("82596: alloc_skb() failed");
564		skb_reserve(skb, 2);
565		dma_addr = pci_map_single(fake_pci_dev, skb->tail,PKT_BUF_SZ,
566					PCI_DMA_FROMDEVICE);
567		skb->dev = dev;
568		rbd->v_next = rbd+1;
569		rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1));
570		rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd));
571		rbd->skb = skb;
572		rbd->v_data = skb->tail;
573		rbd->b_data = WSWAPchar(dma_addr);
574		rbd->size = PKT_BUF_SZ;
575	}
576	lp->rbd_head = lp->rbds;
577	rbd = lp->rbds + rx_ring_size - 1;
578	rbd->v_next = lp->rbds;
579	rbd->b_next = WSWAPrbd(virt_to_dma(lp,lp->rbds));
580
581	/* Now build the Receive Frame Descriptor List */
582
583	for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
584		rfd->rbd = I596_NULL;
585		rfd->v_next = rfd+1;
586		rfd->v_prev = rfd-1;
587		rfd->b_next = WSWAPrfd(virt_to_dma(lp,rfd+1));
588		rfd->cmd = CMD_FLEX;
589	}
590	lp->rfd_head = lp->rfds;
591	lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
592	rfd = lp->rfds;
593	rfd->rbd = WSWAPrbd(virt_to_dma(lp,lp->rbd_head));
594	rfd->v_prev = lp->rfds + rx_ring_size - 1;
595	rfd = lp->rfds + rx_ring_size - 1;
596	rfd->v_next = lp->rfds;
597	rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds));
598	rfd->cmd = CMD_EOL|CMD_FLEX;
599
600	CHECK_WBACK_INV(lp, sizeof(struct i596_private));
601}
602
603static inline void remove_rx_bufs(struct net_device *dev)
604{
605	struct i596_private *lp = (struct i596_private *)dev->priv;
606	struct i596_rbd *rbd;
607	int i;
608
609	for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
610		if (rbd->skb == NULL)
611			break;
612		pci_unmap_single(fake_pci_dev,
613				 (dma_addr_t)WSWAPchar(rbd->b_data),
614				 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
615		dev_kfree_skb(rbd->skb);
616	}
617}
618
619
620static void rebuild_rx_bufs(struct net_device *dev)
621{
622	struct i596_private *lp = (struct i596_private *) dev->priv;
623	int i;
624
625	/* Ensure rx frame/buffer descriptors are tidy */
626
627	for (i = 0; i < rx_ring_size; i++) {
628		lp->rfds[i].rbd = I596_NULL;
629		lp->rfds[i].cmd = CMD_FLEX;
630	}
631	lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
632	lp->rfd_head = lp->rfds;
633	lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
634	lp->rbd_head = lp->rbds;
635	lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds));
636
637	CHECK_WBACK_INV(lp, sizeof(struct i596_private));
638}
639
640
641static int init_i596_mem(struct net_device *dev)
642{
643	struct i596_private *lp = (struct i596_private *) dev->priv;
644	unsigned long flags;
645
646	disable_irq(dev->irq);	/* disable IRQs from LAN */
647	DEB(DEB_INIT,
648		printk("RESET 82596 port: %08lX (with IRQ%d disabled)\n",
649		       dev->base_addr + PA_I82596_RESET,
650		       dev->irq));
651
652	gsc_writel(0, (void*)(dev->base_addr + PA_I82596_RESET)); /* Hard Reset */
653	udelay(100);			/* Wait 100us - seems to help */
654
655	/* change the scp address */
656
657	lp->last_cmd = jiffies;
658
659
660	lp->scp.sysbus = 0x0000006c;
661	lp->scp.iscp = WSWAPiscp(virt_to_dma(lp,&(lp->iscp)));
662	lp->iscp.scb = WSWAPscb(virt_to_dma(lp,&(lp->scb)));
663	lp->iscp.stat = ISCP_BUSY;
664	lp->cmd_backlog = 0;
665
666	lp->cmd_head = NULL;
667        lp->scb.cmd = I596_NULL;
668
669	DEB(DEB_INIT,printk("%s: starting i82596.\n", dev->name));
670
671	CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp));
672	CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp));
673
674	MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp));
675
676	CA(dev);
677
678	if (wait_istat(dev,lp,1000,"initialization timed out"))
679		goto failed;
680	DEB(DEB_INIT,printk("%s: i82596 initialization successful\n", dev->name));
681
682	/* Ensure rx frame/buffer descriptors are tidy */
683	rebuild_rx_bufs(dev);
684
685	lp->scb.command = 0;
686	CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
687
688	enable_irq(dev->irq);	/* enable IRQs from LAN */
689
690	DEB(DEB_INIT,printk("%s: queuing CmdConfigure\n", dev->name));
691	memcpy(lp->cf_cmd.i596_config, init_setup, 14);
692	lp->cf_cmd.cmd.command = CmdConfigure;
693	CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd));
694	i596_add_cmd(dev, &lp->cf_cmd.cmd);
695
696	DEB(DEB_INIT,printk("%s: queuing CmdSASetup\n", dev->name));
697	memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
698	lp->sa_cmd.cmd.command = CmdSASetup;
699	CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd));
700	i596_add_cmd(dev, &lp->sa_cmd.cmd);
701
702	DEB(DEB_INIT,printk("%s: queuing CmdTDR\n", dev->name));
703	lp->tdr_cmd.cmd.command = CmdTDR;
704	CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd));
705	i596_add_cmd(dev, &lp->tdr_cmd.cmd);
706
707	spin_lock_irqsave (&lp->lock, flags);
708
709	if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
710		spin_unlock_irqrestore (&lp->lock, flags);
711		goto failed;
712	}
713	DEB(DEB_INIT,printk("%s: Issuing RX_START\n", dev->name));
714	lp->scb.command = RX_START;
715	lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
716	CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
717
718	CA(dev);
719
720	spin_unlock_irqrestore (&lp->lock, flags);
721
722	if (wait_cmd(dev,lp,1000,"RX_START not processed"))
723		goto failed;
724	DEB(DEB_INIT,printk("%s: Receive unit started OK\n", dev->name));
725
726	return 0;
727
728failed:
729	printk("%s: Failed to initialise 82596\n", dev->name);
730	MPU_PORT(dev, PORT_RESET, 0);
731	return -1;
732}
733
734
735static inline int i596_rx(struct net_device *dev)
736{
737	struct i596_private *lp = (struct i596_private *)dev->priv;
738	struct i596_rfd *rfd;
739	struct i596_rbd *rbd;
740	int frames = 0;
741
742	DEB(DEB_RXFRAME,printk ("i596_rx(), rfd_head %p, rbd_head %p\n",
743			lp->rfd_head, lp->rbd_head));
744
745
746	rfd = lp->rfd_head;		/* Ref next frame to check */
747
748	CHECK_INV(rfd, sizeof(struct i596_rfd));
749	while ((rfd->stat) & STAT_C) {	/* Loop while complete frames */
750		if (rfd->rbd == I596_NULL)
751			rbd = NULL;
752		else if (rfd->rbd == lp->rbd_head->b_addr) {
753			rbd = lp->rbd_head;
754			CHECK_INV(rbd, sizeof(struct i596_rbd));
755		}
756		else {
757			printk("%s: rbd chain broken!\n", dev->name);
758			rbd = NULL;
759		}
760		DEB(DEB_RXFRAME, printk("  rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
761			rfd, rfd->rbd, rfd->stat));
762
763		if (rbd != NULL && ((rfd->stat) & STAT_OK)) {
764			/* a good frame */
765			int pkt_len = rbd->count & 0x3fff;
766			struct sk_buff *skb = rbd->skb;
767			int rx_in_place = 0;
768
769			DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
770			frames++;
771
772			/* Check if the packet is long enough to just accept
773			 * without copying to a properly sized skbuff.
774			 */
775
776			if (pkt_len > rx_copybreak) {
777				struct sk_buff *newskb;
778				dma_addr_t dma_addr;
779
780				pci_unmap_single(fake_pci_dev,(dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
781				/* Get fresh skbuff to replace filled one. */
782				newskb = dev_alloc_skb(PKT_BUF_SZ + 4);
783				if (newskb == NULL) {
784					skb = NULL;	/* drop pkt */
785					goto memory_squeeze;
786				}
787				skb_reserve(newskb, 2);
788
789				/* Pass up the skb already on the Rx ring. */
790				skb_put(skb, pkt_len);
791				rx_in_place = 1;
792				rbd->skb = newskb;
793				newskb->dev = dev;
794				dma_addr = pci_map_single(fake_pci_dev, newskb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
795				rbd->v_data = newskb->tail;
796				rbd->b_data = WSWAPchar(dma_addr);
797				CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
798			}
799			else
800				skb = dev_alloc_skb(pkt_len + 2);
801memory_squeeze:
802			if (skb == NULL) {
803				printk ("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
804				lp->stats.rx_dropped++;
805			}
806			else {
807				skb->dev = dev;
808				if (!rx_in_place) {
809					/* 16 byte align the data fields */
810					pci_dma_sync_single(fake_pci_dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
811					skb_reserve(skb, 2);
812					memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
813				}
814				skb->len = pkt_len;
815				skb->protocol=eth_type_trans(skb,dev);
816				netif_rx(skb);
817				dev->last_rx = jiffies;
818				lp->stats.rx_packets++;
819				lp->stats.rx_bytes+=pkt_len;
820			}
821		}
822		else {
823			DEB(DEB_ERRORS, printk("%s: Error, rfd.stat = 0x%04x\n",
824					dev->name, rfd->stat));
825			lp->stats.rx_errors++;
826			if ((rfd->stat) & 0x0001)
827				lp->stats.collisions++;
828			if ((rfd->stat) & 0x0080)
829				lp->stats.rx_length_errors++;
830			if ((rfd->stat) & 0x0100)
831				lp->stats.rx_over_errors++;
832			if ((rfd->stat) & 0x0200)
833				lp->stats.rx_fifo_errors++;
834			if ((rfd->stat) & 0x0400)
835				lp->stats.rx_frame_errors++;
836			if ((rfd->stat) & 0x0800)
837				lp->stats.rx_crc_errors++;
838			if ((rfd->stat) & 0x1000)
839				lp->stats.rx_length_errors++;
840		}
841
842		/* Clear the buffer descriptor count and EOF + F flags */
843
844		if (rbd != NULL && (rbd->count & 0x4000)) {
845			rbd->count = 0;
846			lp->rbd_head = rbd->v_next;
847			CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
848		}
849
850		/* Tidy the frame descriptor, marking it as end of list */
851
852		rfd->rbd = I596_NULL;
853		rfd->stat = 0;
854		rfd->cmd = CMD_EOL|CMD_FLEX;
855		rfd->count = 0;
856
857		/* Remove end-of-list from old end descriptor */
858
859		rfd->v_prev->cmd = CMD_FLEX;
860
861		/* Update record of next frame descriptor to process */
862
863		lp->scb.rfd = rfd->b_next;
864		lp->rfd_head = rfd->v_next;
865		CHECK_WBACK_INV(rfd->v_prev, sizeof(struct i596_rfd));
866		CHECK_WBACK_INV(rfd, sizeof(struct i596_rfd));
867		rfd = lp->rfd_head;
868		CHECK_INV(rfd, sizeof(struct i596_rfd));
869	}
870
871	DEB(DEB_RXFRAME,printk ("frames %d\n", frames));
872
873	return 0;
874}
875
876
877static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
878{
879	struct i596_cmd *ptr;
880
881	while (lp->cmd_head != NULL) {
882		ptr = lp->cmd_head;
883		lp->cmd_head = ptr->v_next;
884		lp->cmd_backlog--;
885
886		switch ((ptr->command) & 0x7) {
887		case CmdTx:
888			{
889				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
890				struct sk_buff *skb = tx_cmd->skb;
891				pci_unmap_single(fake_pci_dev, tx_cmd->dma_addr, skb->len, PCI_DMA_TODEVICE);
892
893				dev_kfree_skb(skb);
894
895				lp->stats.tx_errors++;
896				lp->stats.tx_aborted_errors++;
897
898				ptr->v_next = NULL;
899				ptr->b_next = I596_NULL;
900				tx_cmd->cmd.command = 0;  /* Mark as free */
901				break;
902			}
903		default:
904			ptr->v_next = NULL;
905			ptr->b_next = I596_NULL;
906		}
907		CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd));
908	}
909
910	wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
911	lp->scb.cmd = I596_NULL;
912	CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
913}
914
915
916static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
917{
918	unsigned long flags;
919
920	DEB(DEB_RESET,printk("i596_reset\n"));
921
922	spin_lock_irqsave (&lp->lock, flags);
923
924	wait_cmd(dev,lp,100,"i596_reset timed out");
925
926	netif_stop_queue(dev);
927
928	lp->scb.command = CUC_ABORT | RX_ABORT;
929	CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
930	CA(dev);
931
932	/* wait for shutdown */
933	wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
934	spin_unlock_irqrestore (&lp->lock, flags);
935
936	i596_cleanup_cmd(dev,lp);
937	i596_rx(dev);
938
939	netif_start_queue(dev);
940	init_i596_mem(dev);
941}
942
943
944static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
945{
946	struct i596_private *lp = (struct i596_private *) dev->priv;
947	unsigned long flags;
948
949	DEB(DEB_ADDCMD,printk("i596_add_cmd cmd_head %p\n", lp->cmd_head));
950
951	cmd->status = 0;
952	cmd->command |= (CMD_EOL | CMD_INTR);
953	cmd->v_next = NULL;
954	cmd->b_next = I596_NULL;
955	CHECK_WBACK(cmd, sizeof(struct i596_cmd));
956
957	spin_lock_irqsave (&lp->lock, flags);
958
959	if (lp->cmd_head != NULL) {
960		lp->cmd_tail->v_next = cmd;
961		lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status));
962		CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd));
963	} else {
964		lp->cmd_head = cmd;
965		wait_cmd(dev,lp,100,"i596_add_cmd timed out");
966		lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
967		lp->scb.command = CUC_START;
968		CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
969		CA(dev);
970	}
971	lp->cmd_tail = cmd;
972	lp->cmd_backlog++;
973
974	spin_unlock_irqrestore (&lp->lock, flags);
975
976	if (lp->cmd_backlog > max_cmd_backlog) {
977		unsigned long tickssofar = jiffies - lp->last_cmd;
978
979		if (tickssofar < ticks_limit)
980			return;
981
982		printk("%s: command unit timed out, status resetting.\n", dev->name);
983		i596_reset(dev, lp);
984	}
985}
986
987
988
989static int i596_open(struct net_device *dev)
990{
991	DEB(DEB_OPEN,printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
992
993	MOD_INC_USE_COUNT;
994
995	if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
996		printk("%s: IRQ %d not free\n", dev->name, dev->irq);
997		goto out;
998	}
999
1000	init_rx_bufs(dev);
1001
1002	if (init_i596_mem(dev)) {
1003		printk("%s: Failed to init memory\n", dev->name);
1004		goto out_remove_rx_bufs;
1005	}
1006
1007	netif_start_queue(dev);
1008
1009	return 0;
1010
1011out_remove_rx_bufs:
1012	remove_rx_bufs(dev);
1013	free_irq(dev->irq, dev);
1014out:
1015	MOD_DEC_USE_COUNT;
1016
1017	return -EAGAIN;
1018}
1019
1020static void i596_tx_timeout (struct net_device *dev)
1021{
1022	struct i596_private *lp = (struct i596_private *) dev->priv;
1023
1024	/* Transmitter timeout, serious problems. */
1025	DEB(DEB_ERRORS,printk("%s: transmit timed out, status resetting.\n",
1026			dev->name));
1027
1028	lp->stats.tx_errors++;
1029
1030	/* Try to restart the adaptor */
1031	if (lp->last_restart == lp->stats.tx_packets) {
1032		DEB(DEB_ERRORS,printk ("Resetting board.\n"));
1033		/* Shutdown and restart */
1034		i596_reset (dev, lp);
1035	} else {
1036		/* Issue a channel attention signal */
1037		DEB(DEB_ERRORS,printk ("Kicking board.\n"));
1038		lp->scb.command = CUC_START | RX_START;
1039		CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb));
1040		CA (dev);
1041		lp->last_restart = lp->stats.tx_packets;
1042	}
1043
1044	dev->trans_start = jiffies;
1045	netif_wake_queue (dev);
1046}
1047
1048
1049static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1050{
1051	struct i596_private *lp = (struct i596_private *) dev->priv;
1052	struct tx_cmd *tx_cmd;
1053	struct i596_tbd *tbd;
1054	short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
1055	dev->trans_start = jiffies;
1056
1057	DEB(DEB_STARTTX,printk("%s: i596_start_xmit(%x,%p) called\n", dev->name,
1058				skb->len, skb->data));
1059
1060	netif_stop_queue(dev);
1061
1062	tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1063	tbd = lp->tbds + lp->next_tx_cmd;
1064
1065	if (tx_cmd->cmd.command) {
1066		DEB(DEB_ERRORS,printk ("%s: xmit ring full, dropping packet.\n",
1067				dev->name));
1068		lp->stats.tx_dropped++;
1069
1070		dev_kfree_skb(skb);
1071	} else {
1072		if (++lp->next_tx_cmd == TX_RING_SIZE)
1073			lp->next_tx_cmd = 0;
1074		tx_cmd->tbd = WSWAPtbd(virt_to_dma(lp,tbd));
1075		tbd->next = I596_NULL;
1076
1077		tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1078		tx_cmd->skb = skb;
1079
1080		tx_cmd->pad = 0;
1081		tx_cmd->size = 0;
1082		tbd->pad = 0;
1083		tbd->size = EOF | length;
1084
1085		tx_cmd->dma_addr = pci_map_single(fake_pci_dev, skb->data, skb->len,
1086				PCI_DMA_TODEVICE);
1087		tbd->data = WSWAPchar(tx_cmd->dma_addr);
1088
1089		DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1090		CHECK_WBACK_INV(tx_cmd, sizeof(struct tx_cmd));
1091		CHECK_WBACK_INV(tbd, sizeof(struct i596_tbd));
1092		i596_add_cmd(dev, &tx_cmd->cmd);
1093
1094		lp->stats.tx_packets++;
1095		lp->stats.tx_bytes += length;
1096	}
1097
1098	netif_start_queue(dev);
1099
1100	return 0;
1101}
1102
1103static void print_eth(unsigned char *add, char *str)
1104{
1105	int i;
1106
1107	printk("i596 0x%p, ", add);
1108	for (i = 0; i < 6; i++)
1109		printk(" %02X", add[i + 6]);
1110	printk(" -->");
1111	for (i = 0; i < 6; i++)
1112		printk(" %02X", add[i]);
1113	printk(" %02X%02X, %s\n", add[12], add[13], str);
1114}
1115
1116
1117#define LAN_PROM_ADDR	0xF0810000
1118
1119static int __devinit i82596_probe(struct net_device *dev)
1120{
1121	int i;
1122	struct i596_private *lp;
1123	char eth_addr[6];
1124	dma_addr_t dma_addr;
1125
1126	/* This lot is ensure things have been cache line aligned. */
1127	if (sizeof(struct i596_rfd) != 32) {
1128	    printk("82596: sizeof(struct i596_rfd) = %d\n",
1129			    sizeof(struct i596_rfd));
1130	    return -ENODEV;
1131	}
1132	if ((sizeof(struct i596_rbd) % 32) != 0) {
1133	    printk("82596: sizeof(struct i596_rbd) = %d\n",
1134			    sizeof(struct i596_rbd));
1135	    return -ENODEV;
1136	}
1137	if ((sizeof(struct tx_cmd) % 32) != 0) {
1138	    printk("82596: sizeof(struct tx_cmd) = %d\n",
1139			    sizeof(struct tx_cmd));
1140	    return -ENODEV;
1141	}
1142	if (sizeof(struct i596_tbd) != 32) {
1143	    printk("82596: sizeof(struct i596_tbd) = %d\n",
1144			    sizeof(struct i596_tbd));
1145	    return -ENODEV;
1146	}
1147#ifndef __LP64__
1148	if (sizeof(struct i596_private) > 4096) {
1149	    printk("82596: sizeof(struct i596_private) = %d\n",
1150			    sizeof(struct i596_private));
1151	    return -ENODEV;
1152	}
1153#endif
1154
1155	if (!dev->base_addr || !dev->irq)
1156		return -ENODEV;
1157
1158	if (pdc_lan_station_id(eth_addr, dev->base_addr)) {
1159		for (i=0; i < 6; i++) {
1160			eth_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
1161		}
1162		printk("82596.c: MAC of HP700 LAN read from EEPROM\n");
1163	}
1164
1165	dev->mem_start = (unsigned long) pci_alloc_consistent(fake_pci_dev,
1166		sizeof(struct i596_private), &dma_addr);
1167	if (!dev->mem_start) {
1168		printk("%s: Couldn't get consistent shared memory\n", dev->name);
1169		dma_consistent = 0;
1170		dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0);
1171		if (!dev->mem_start) {
1172			printk("%s: Couldn't get shared memory\n", dev->name);
1173			return -ENOMEM;
1174		}
1175		dma_addr = virt_to_bus(dev->mem_start);
1176	}
1177
1178	ether_setup(dev);
1179	DEB(DEB_PROBE,printk("%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1180
1181	for (i = 0; i < 6; i++)
1182		DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
1183
1184	DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
1185
1186	DEB(DEB_PROBE,printk(version));
1187
1188	/* The 82596-specific entries in the device structure. */
1189	dev->open = i596_open;
1190	dev->stop = i596_close;
1191	dev->hard_start_xmit = i596_start_xmit;
1192	dev->get_stats = i596_get_stats;
1193	dev->set_multicast_list = set_multicast_list;
1194	dev->tx_timeout = i596_tx_timeout;
1195	dev->watchdog_timeo = TX_TIMEOUT;
1196
1197	dev->priv = (void *)(dev->mem_start);
1198
1199	lp = (struct i596_private *) dev->priv;
1200	DEB(DEB_INIT,printk ("%s: lp at 0x%08lx (%d bytes), lp->scb at 0x%08lx\n",
1201		dev->name, (unsigned long)lp,
1202		sizeof(struct i596_private), (unsigned long)&lp->scb));
1203	memset(lp, 0, sizeof(struct i596_private));
1204
1205	lp->scb.command = 0;
1206	lp->scb.cmd = I596_NULL;
1207	lp->scb.rfd = I596_NULL;
1208	lp->lock = SPIN_LOCK_UNLOCKED;
1209	lp->dma_addr = dma_addr;
1210
1211	CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private));
1212
1213	return 0;
1214}
1215
1216
1217static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1218{
1219	struct net_device *dev = dev_id;
1220	struct i596_private *lp;
1221	unsigned short status, ack_cmd = 0;
1222
1223	if (dev == NULL) {
1224		printk("i596_interrupt(): irq %d for unknown device.\n", irq);
1225		return;
1226	}
1227
1228	lp = (struct i596_private *) dev->priv;
1229
1230	spin_lock (&lp->lock);
1231
1232	wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1233	status = lp->scb.status;
1234
1235	DEB(DEB_INTS,printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1236			dev->name, irq, status));
1237
1238	ack_cmd = status & 0xf000;
1239
1240	if (!ack_cmd) {
1241		DEB(DEB_ERRORS, printk("%s: interrupt with no events\n", dev->name));
1242		spin_unlock (&lp->lock);
1243		return;
1244	}
1245
1246	if ((status & 0x8000) || (status & 0x2000)) {
1247		struct i596_cmd *ptr;
1248
1249		if ((status & 0x8000))
1250			DEB(DEB_INTS,printk("%s: i596 interrupt completed command.\n", dev->name));
1251		if ((status & 0x2000))
1252			DEB(DEB_INTS,printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1253
1254		while (lp->cmd_head != NULL) {
1255			CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd));
1256			if (!(lp->cmd_head->status & STAT_C))
1257				break;
1258
1259			ptr = lp->cmd_head;
1260
1261			DEB(DEB_STATUS,printk("cmd_head->status = %04x, ->command = %04x\n",
1262				       lp->cmd_head->status, lp->cmd_head->command));
1263			lp->cmd_head = ptr->v_next;
1264			lp->cmd_backlog--;
1265
1266			switch ((ptr->command) & 0x7) {
1267			case CmdTx:
1268			    {
1269				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1270				struct sk_buff *skb = tx_cmd->skb;
1271
1272				if ((ptr->status) & STAT_OK) {
1273					DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
1274				} else {
1275					lp->stats.tx_errors++;
1276					if ((ptr->status) & 0x0020)
1277						lp->stats.collisions++;
1278					if (!((ptr->status) & 0x0040))
1279						lp->stats.tx_heartbeat_errors++;
1280					if ((ptr->status) & 0x0400)
1281						lp->stats.tx_carrier_errors++;
1282					if ((ptr->status) & 0x0800)
1283						lp->stats.collisions++;
1284					if ((ptr->status) & 0x1000)
1285						lp->stats.tx_aborted_errors++;
1286				}
1287				pci_unmap_single(fake_pci_dev, tx_cmd->dma_addr, skb->len, PCI_DMA_TODEVICE);
1288				dev_kfree_skb_irq(skb);
1289
1290				tx_cmd->cmd.command = 0; /* Mark free */
1291				break;
1292			    }
1293			case CmdTDR:
1294			    {
1295				unsigned short status = ((struct tdr_cmd *)ptr)->status;
1296
1297				if (status & 0x8000) {
1298					DEB(DEB_ANY,printk("%s: link ok.\n", dev->name));
1299				} else {
1300					if (status & 0x4000)
1301						printk("%s: Transceiver problem.\n", dev->name);
1302					if (status & 0x2000)
1303						printk("%s: Termination problem.\n", dev->name);
1304					if (status & 0x1000)
1305						printk("%s: Short circuit.\n", dev->name);
1306
1307					DEB(DEB_TDR,printk("%s: Time %d.\n", dev->name, status & 0x07ff));
1308				}
1309				break;
1310			    }
1311			case CmdConfigure:
1312				/* Zap command so set_multicast_list() knows it is free */
1313				ptr->command = 0;
1314				break;
1315			}
1316			ptr->v_next = NULL;
1317		        ptr->b_next = I596_NULL;
1318			CHECK_WBACK(ptr, sizeof(struct i596_cmd));
1319			lp->last_cmd = jiffies;
1320		}
1321
1322		/* This mess is arranging that only the last of any outstanding
1323		 * commands has the interrupt bit set.  Should probably really
1324		 * only add to the cmd queue when the CU is stopped.
1325		 */
1326		ptr = lp->cmd_head;
1327		while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1328			struct i596_cmd *prev = ptr;
1329
1330			ptr->command &= 0x1fff;
1331			ptr = ptr->v_next;
1332			CHECK_WBACK_INV(prev, sizeof(struct i596_cmd));
1333		}
1334
1335		if ((lp->cmd_head != NULL))
1336			ack_cmd |= CUC_START;
1337		lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status));
1338		CHECK_WBACK_INV(&lp->scb, sizeof(struct i596_scb));
1339	}
1340	if ((status & 0x1000) || (status & 0x4000)) {
1341		if ((status & 0x4000))
1342			DEB(DEB_INTS,printk("%s: i596 interrupt received a frame.\n", dev->name));
1343		i596_rx(dev);
1344		/* Only RX_START if stopped - RGH 07-07-96 */
1345		if (status & 0x1000) {
1346			if (netif_running(dev)) {
1347				DEB(DEB_ERRORS,printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1348				ack_cmd |= RX_START;
1349				lp->stats.rx_errors++;
1350				lp->stats.rx_fifo_errors++;
1351				rebuild_rx_bufs(dev);
1352			}
1353		}
1354	}
1355	wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1356	lp->scb.command = ack_cmd;
1357	CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
1358
1359	/* DANGER: I suspect that some kind of interrupt
1360	 acknowledgement aside from acking the 82596 might be needed
1361	 here...  but it's running acceptably without */
1362
1363	CA(dev);
1364
1365	wait_cmd(dev,lp,100,"i596 interrupt, exit timeout");
1366	DEB(DEB_INTS,printk("%s: exiting interrupt.\n", dev->name));
1367
1368	spin_unlock (&lp->lock);
1369	return;
1370}
1371
1372static int i596_close(struct net_device *dev)
1373{
1374	struct i596_private *lp = (struct i596_private *) dev->priv;
1375	unsigned long flags;
1376
1377	netif_stop_queue(dev);
1378
1379	DEB(DEB_INIT,printk("%s: Shutting down ethercard, status was %4.4x.\n",
1380		       dev->name, lp->scb.status));
1381
1382	save_flags(flags);
1383	cli();
1384
1385	wait_cmd(dev,lp,100,"close1 timed out");
1386	lp->scb.command = CUC_ABORT | RX_ABORT;
1387	CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
1388
1389	CA(dev);
1390
1391	wait_cmd(dev,lp,100,"close2 timed out");
1392	restore_flags(flags);
1393	DEB(DEB_STRUCT,i596_display_data(dev));
1394	i596_cleanup_cmd(dev,lp);
1395
1396	disable_irq(dev->irq);
1397
1398	free_irq(dev->irq, dev);
1399	remove_rx_bufs(dev);
1400
1401	MOD_DEC_USE_COUNT;
1402
1403	return 0;
1404}
1405
1406static struct net_device_stats *
1407 i596_get_stats(struct net_device *dev)
1408{
1409	struct i596_private *lp = (struct i596_private *) dev->priv;
1410
1411	return &lp->stats;
1412}
1413
1414/*
1415 *    Set or clear the multicast filter for this adaptor.
1416 */
1417
1418static void set_multicast_list(struct net_device *dev)
1419{
1420	struct i596_private *lp = (struct i596_private *) dev->priv;
1421	int config = 0, cnt;
1422
1423	DEB(DEB_MULTI,printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n", dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF", dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1424
1425	if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1426		lp->cf_cmd.i596_config[8] |= 0x01;
1427		config = 1;
1428	}
1429	if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1430		lp->cf_cmd.i596_config[8] &= ~0x01;
1431		config = 1;
1432	}
1433	if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1434		lp->cf_cmd.i596_config[11] &= ~0x20;
1435		config = 1;
1436	}
1437	if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1438		lp->cf_cmd.i596_config[11] |= 0x20;
1439		config = 1;
1440	}
1441	if (config) {
1442		if (lp->cf_cmd.cmd.command)
1443			printk("%s: config change request already queued\n",
1444			       dev->name);
1445		else {
1446			lp->cf_cmd.cmd.command = CmdConfigure;
1447			CHECK_WBACK_INV(&lp->cf_cmd, sizeof(struct cf_cmd));
1448			i596_add_cmd(dev, &lp->cf_cmd.cmd);
1449		}
1450	}
1451
1452	cnt = dev->mc_count;
1453	if (cnt > MAX_MC_CNT)
1454	{
1455		cnt = MAX_MC_CNT;
1456		printk("%s: Only %d multicast addresses supported",
1457			dev->name, cnt);
1458	}
1459
1460	if (dev->mc_count > 0) {
1461		struct dev_mc_list *dmi;
1462		unsigned char *cp;
1463		struct mc_cmd *cmd;
1464
1465		cmd = &lp->mc_cmd;
1466		cmd->cmd.command = CmdMulticastList;
1467		cmd->mc_cnt = dev->mc_count * 6;
1468		cp = cmd->mc_addrs;
1469		for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
1470			memcpy(cp, dmi->dmi_addr, 6);
1471			if (i596_debug > 1)
1472				DEB(DEB_MULTI,printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1473						dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
1474		}
1475		CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd));
1476		i596_add_cmd(dev, &cmd->cmd);
1477	}
1478}
1479
1480MODULE_PARM(debug, "i");
1481MODULE_PARM_DESC(debug, "lasi_82596 debug mask");
1482static int debug = -1;
1483
1484static int num_drivers;
1485static struct net_device *netdevs[MAX_DRIVERS];
1486
1487static int __devinit
1488lan_init_chip(struct parisc_device *dev)
1489{
1490	struct	net_device *netdevice;
1491	int	retval;
1492
1493	if (num_drivers >= MAX_DRIVERS) {
1494		/* max count of possible i82596 drivers reached */
1495		return -ENODEV;
1496	}
1497
1498	fake_pci_dev = ccio_get_fake(dev);
1499
1500	if (!dev->irq) {
1501		printk(KERN_ERR __FILE__ ": IRQ not found for i82596 at 0x%lx\n", dev->hpa);
1502		return -ENODEV;
1503	}
1504
1505	printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa, dev->irq);
1506
1507	netdevice = alloc_etherdev(0);
1508	if (!netdevice)
1509		return -ENOMEM;
1510
1511	netdevice->base_addr = dev->hpa;
1512	netdevice->irq = dev->irq;
1513	netdevice->init = i82596_probe;
1514
1515	retval = register_netdev(netdevice);
1516	if (retval) {
1517		printk(KERN_WARNING __FILE__ ": register_netdevice ret'd %d\n", retval);
1518		kfree(netdevice);
1519		return -ENODEV;
1520	};
1521	if (dev->id.sversion == 0x72) {
1522		((struct i596_private *)netdevice->priv)->options = OPT_SWAP_PORT;
1523	}
1524
1525	netdevs[num_drivers++] = netdevice;
1526
1527	return retval;
1528}
1529
1530
1531static struct parisc_device_id lan_tbl[] = {
1532	{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a },
1533	{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00072 },
1534	{ 0, }
1535};
1536
1537MODULE_DEVICE_TABLE(parisc, lan_tbl);
1538
1539static struct parisc_driver lan_driver = {
1540	name:		"Apricot",
1541	id_table:	lan_tbl,
1542	probe:		lan_init_chip,
1543};
1544
1545static int __devinit lasi_82596_init(void)
1546{
1547	if (debug >= 0)
1548		i596_debug = debug;
1549	return register_parisc_driver(&lan_driver);
1550}
1551
1552module_init(lasi_82596_init);
1553
1554static void __exit lasi_82596_exit(void)
1555{
1556	int i;
1557
1558	for (i=0; i<MAX_DRIVERS; i++) {
1559		struct i596_private *lp;
1560		struct net_device *netdevice;
1561
1562		netdevice = netdevs[i];
1563		if (!netdevice)
1564			continue;
1565
1566		unregister_netdev(netdevice);
1567
1568		lp = (struct i596_private *) netdevice->priv;
1569		if (dma_consistent)
1570			pci_free_consistent(fake_pci_dev,
1571					    sizeof(struct i596_private),
1572				(void *)netdevice->mem_start, lp->dma_addr);
1573		else
1574			free_page(netdevice->mem_start);
1575
1576		netdevice->priv = NULL;
1577	}
1578
1579	unregister_parisc_driver(&lan_driver);
1580}
1581
1582module_exit(lasi_82596_exit);
1583