1/*  *********************************************************************
2    *  Broadcom Common Firmware Environment (CFE)
3    *
4    *  BCM5700/Tigon3 (10/100/1000 EthernetMAC) driver	File: dev_bcm5700.c
5    *
6    *  Author:  Ed Satterthwaite
7    *
8    *********************************************************************
9    *
10    *  Copyright 2000,2001,2002,2003
11    *  Broadcom Corporation. All rights reserved.
12    *
13    *  This software is furnished under license and may be used and
14    *  copied only in accordance with the following terms and
15    *  conditions.  Subject to these conditions, you may download,
16    *  copy, install, use, modify and distribute modified or unmodified
17    *  copies of this software in source and/or binary form.  No title
18    *  or ownership is transferred hereby.
19    *
20    *  1) Any source code used, modified or distributed must reproduce
21    *     and retain this copyright notice and list of conditions
22    *     as they appear in the source file.
23    *
24    *  2) No right is granted to use any trade name, trademark, or
25    *     logo of Broadcom Corporation.  The "Broadcom Corporation"
26    *     name may not be used to endorse or promote products derived
27    *     from this software without the prior written permission of
28    *     Broadcom Corporation.
29    *
30    *  3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR
31    *     IMPLIED WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED
32    *     WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
33    *     PURPOSE, OR NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT
34    *     SHALL BROADCOM BE LIABLE FOR ANY DAMAGES WHATSOEVER, AND IN
35    *     PARTICULAR, BROADCOM SHALL NOT BE LIABLE FOR DIRECT, INDIRECT,
36    *     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
37    *     (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
38    *     GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39    *     BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
40    *     OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
41    *     TORT (INCLUDING NEGLIGENCE OR OTHERWISE), EVEN IF ADVISED OF
42    *     THE POSSIBILITY OF SUCH DAMAGE.
43    ********************************************************************* */
44
45
46#include "cfe.h"
47#include "lib_physio.h"
48#ifdef CPUCFG_MEMCPY
49#error "memcpy has been replaced by hs_memcpy_{to,from}_hs."
50extern void *CPUCFG_MEMCPY(void *dest, const void *src, size_t cnt);
51#define blockcopy CPUCFG_MEMCPY
52#else
53#define blockcopy memcpy
54#endif
55#include "cfe_irq.h"
56
57#include "net_enet.h"
58
59#include "pcivar.h"
60#include "pcireg.h"
61
62#include "bcm5700.h"
63#include "mii.h"
64
65
66
67
68static uint32_t l_phys_read32(uint32_t addr );
69static void l_phys_write32(uint32_t addr, uint32_t val);
70
71
72/* This is a driver for the Broadcom 570x ("Tigon 3") 10/100/1000 MAC.
73   Currently, the 5700, 5701, 5703C, 5704C and 5705 have been tested.
74   Only 10/100/1000 BASE-T PHYs are supported; variants with SerDes
75   PHYs are not supported.
76
77   Reference:
78     Host Programmer Interface Specification for the BCM570X Family
79       of Highly-Integrated Media Access Controllers, 570X-PG106-R.
80     Broadcom Corp., 16215 Alton Parkway, Irvine CA, 09/27/02
81
82   This driver takes advantage of DMA coherence in systems that
83   support it (e.g., SB1250).  For systems without coherent DMA (e.g.,
84   BCM47xx SOCs), descriptor and packet buffer memory is explicitly
85   flushed, while status memory is always referenced uncached.
86
87   The driver prefers "preserve bit lanes" mode for big-endian
88   systems that provide the option, but it can use "preserve byte
89   lanes" as well.
90
91   Note that the 5705 does not fully map all address ranges.  Per
92   the manual, reads and writes of the unmapped regions are permitted
93   and do not fault; however, it apparently has some poisoned registers,
94   at least in early revs, that should not be touched.  See the
95   conditionals in the code. */
96
97/* PIOSWAP controls whether word-swapping takes place for transactions
98   in which the 570x is the target device.  In theory, either value
99   should work (with access macros adjusted as below) and it should be
100   set to be consistent with the settings for 570x as initiator.
101   Empirically, however, some combinations only work with no swap.
102   For big-endian systems:
103
104                          SWAP=0    SWAP=1
105   5700     32 PCI          OK        OK
106   5700     64 Sturgeon     OK        OK
107   5701-32  32 PCI          OK        OK
108   5701-32  64 Sturgeon     OK        OK
109   5701-32  64 Golem        OK        OK
110   5701-64  64 Sturgeon     OK        OK
111   5701-64  64 Golem        OK       FAIL
112   5705     32 PCI          OK        OK
113   5705     64 Sturgeon    (OK)*     FAIL
114   5705     64 Golem        OK        OK
115
116   For little-endian systems, only SWAP=1 appears to work.
117
118   * PCI status/interrupt ordering problem under load.  */
119
120#ifndef T3_DEBUG
121#define T3_DEBUG 0
122#endif
123
124#ifndef T3_BRINGUP
125#define T3_BRINGUP 0
126#endif
127
128#if ((ENDIAN_BIG + ENDIAN_LITTLE) != 1)
129#error "dev_bcm5700: system endian not set"
130#endif
131
132#if ENDIAN_LITTLE
133#define PIOSWAP 1
134#else
135#define PIOSWAP 0
136#endif
137
138/* Temporary, until configs supply MATCH_BYTES */
139#if defined(MPC824X)  /* any machine without preserve-bits for PIO */
140#define MATCH_BYTES  1
141#else
142#define MATCH_BYTES  0
143#endif
144
145/* Broadcom recommends using PHY interrupts instead of autopolling,
146   but I haven't made it work yet. */
147#define T3_AUTOPOLL 1
148
149/* Set IPOLL to drive processing through the interrupt dispatcher.
150   Set XPOLL to drive processing by an external polling agent.  One
151   must be set; setting both is ok. */
152
153#ifndef IPOLL
154#define IPOLL 0
155#endif
156#ifndef XPOLL
157#define XPOLL 1
158#endif
159
160#define MIN_ETHER_PACK  (ENET_MIN_PKT+ENET_CRC_SIZE)  /* min packet size */
161#define MAX_ETHER_PACK  (ENET_MAX_PKT+ENET_CRC_SIZE)  /* max packet size */
162#define VLAN_TAG_LEN    4                             /* VLAN type plus tag */
163
164/* Packet buffers.  For the Tigon 3, packet buffer alignment is
165   arbitrary and can be to any byte boundary.  We would like it
166   aligned to a cache line boundary for performance, although there is
167   a trade-off with IP/TCP header alignment.  Jumbo frames are not
168   currently supported.  */
169
170#define ETH_PKTBUF_LEN      (((MAX_ETHER_PACK+31)/32)*32)
171
172typedef struct eth_pkt_s {
173    queue_t next;			/*  8 */
174    uint8_t *buffer;			/*  4 */
175    uint32_t flags;			/*  4 */
176    int32_t length;			/*  4 */
177    uint32_t unused[3];			/* 12 */
178    uint8_t data[ETH_PKTBUF_LEN];
179} eth_pkt_t;
180
181#define CACHE_ALIGN       32
182#define ALIGN(n,align)    (((n)+((align)-1)) & ~((align)-1))
183
184#define ETH_PKTBUF_LINES  ((sizeof(eth_pkt_t) + (CACHE_ALIGN-1))/CACHE_ALIGN)
185#define ETH_PKTBUF_SIZE   (ETH_PKTBUF_LINES*CACHE_ALIGN)
186#define ETH_PKTBUF_OFFSET (offsetof(eth_pkt_t, data))
187
188#define ETH_PKT_BASE(data) ((eth_pkt_t *)((data) - ETH_PKTBUF_OFFSET))
189
190static void
191show_packet(char c, eth_pkt_t *pkt)
192{
193    int i;
194    int n = (pkt->length < 32 ? pkt->length : 32);
195
196    xprintf("%c[%4d]:", c, pkt->length);
197    for (i = 0; i < n; i++) {
198	if (i % 4 == 0)
199	    xprintf(" ");
200	xprintf("%02x", pkt->buffer[i]);
201	}
202    xprintf("\n");
203}
204
205
206static void t3_ether_probe(cfe_driver_t *drv,
207			   unsigned long probe_a, unsigned long probe_b,
208			   void *probe_ptr);
209
210
211/* BCM570X Hardware Common Data Structures
212   XXX Should they move to the header file? */
213
214/* Chip documentation numbers the rings with 1-origin.  */
215
216#define RI(n)                 ((n)-1)
217
218/* BCM570x Ring Sizes (no external memory).  Pages 97-98 */
219
220#define TXP_MAX_RINGS         16
221#define TXP_INTERNAL_RINGS    4
222#define TXP_RING_ENTRIES      512
223
224#define RXP_STD_ENTRIES       512
225
226#define RXR_MAX_RINGS         16
227#define RXR_RING_ENTRIES      1024
228
229#define RXR_MAX_RINGS_05      1
230#define RXR_RING_ENTRIES_05   512
231
232#define RXR_MAX_RINGS_BCM571X_FAMILY    1
233#define RXR_RING_ENTRIES_BCM571X_FAMILY 512
234
235
236#define BCM571X_FAMILY_DEVICE(dev )  ( (dev) == K_PCI_ID_BCM5780 )
237
238/* BCM570x Send Buffer Descriptors as a struct.  Pages 100-101 */
239
240typedef struct t3_snd_bd_s {
241    uint32_t  bufptr_hi;
242    uint32_t  bufptr_lo;
243#if ENDIAN_BIG
244    uint16_t  length;
245    uint16_t  flags;
246    uint16_t  pad;
247    uint16_t  vlan_tag;
248#elif ENDIAN_LITTLE
249    uint16_t  flags;
250    uint16_t  length;
251    uint16_t  vlan_tag;
252    uint16_t  pad;
253#else
254#error "bcm5700: endian not set"
255#endif
256} t3_snd_bd_t;
257
258#define SND_BD_SIZE           16
259
260#define TX_FLAG_TCP_CKSUM     0x0001
261#define TX_FLAG_IP_CKSUM      0x0002
262#define TX_FLAG_PACKET_END    0x0004
263#define TX_FLAG_IP_FRAG       0x0008
264#define TX_FLAG_IP_FRAG_END   0x0010
265#define TX_FLAG_VLAN_TAG      0x0040
266#define TX_FLAG_COAL_NOW      0x0080
267#define TX_FLAG_CPU_PRE_DMA   0x0100
268#define TX_FLAG_CPU_POST_DMA  0x0200
269#define TX_FLAG_ADD_SRC       0x1000
270#define TX_FLAG_SRC_ADDR_SEL  0x6000
271#define TX_FLAG_NO_CRC        0x8000
272
273/* BCM570x Receive Buffer Descriptors as a struct.  Pages 105-107 */
274
275typedef struct t3_rcv_bd_s {
276    uint32_t  bufptr_hi;
277    uint32_t  bufptr_lo;
278#if ENDIAN_BIG
279    uint16_t  index;
280    uint16_t  length;
281    uint16_t  type;
282    uint16_t  flags;
283    uint16_t  ip_cksum;
284    uint16_t  tcp_cksum;
285    uint16_t  error_flag;
286    uint16_t  vlan_tag;
287#elif ENDIAN_LITTLE
288    uint16_t  length;
289    uint16_t  index;
290    uint16_t  flags;
291    uint16_t  type;
292    uint16_t  tcp_cksum;
293    uint16_t  ip_cksum;
294    uint16_t  vlan_tag;
295    uint16_t  error_flag;
296#else
297#error "bcm5700: endian not set"
298#endif
299    uint32_t  pad;
300    uint32_t  opaque;
301} t3_rcv_bd_t;
302
303#define RCV_BD_SIZE           32
304
305#define RX_FLAG_PACKET_END    0x0004
306#define RX_FLAG_JUMBO_RING    0x0020
307#define RX_FLAG_VLAN_TAG      0x0040
308#define RX_FLAG_ERROR         0x0400
309#define RX_FLAG_MINI_RING     0x0800
310#define RX_FLAG_IP_CKSUM      0x1000
311#define RX_FLAG_TCP_CKSUM     0x2000
312#define RX_FLAG_IS_TCP        0x4000
313
314#define RX_ERR_BAD_CRC        0x0001
315#define RX_ERR_COLL_DETECT    0x0002
316#define RX_ERR_LINK_LOST      0x0004
317#define RX_ERR_PHY_DECODE     0x0008
318#define RX_ERR_DRIBBLE        0x0010
319#define RX_ERR_MAC_ABORT      0x0020
320#define RX_ERR_SHORT_PKT      0x0040
321#define RX_ERR_TRUNC_NO_RES   0x0080
322#define RX_ERR_GIANT_PKT      0x0100
323
324/* BCM570x Status Block format as a struct (not BCM5705).  Pages 110-111. */
325
326typedef struct t3_status_s {
327    uint32_t status;
328    uint32_t tag;
329#if ENDIAN_BIG
330    uint16_t rxc_std_index;
331    uint16_t rxc_jumbo_index;
332    uint16_t reserved2;
333    uint16_t rxc_mini_index;
334    struct {
335	uint16_t send_c;
336	uint16_t return_p;
337    } index [16];
338#elif ENDIAN_LITTLE
339    uint16_t rxc_jumbo_index;
340    uint16_t rxc_std_index;
341    uint16_t rxc_mini_index;
342    uint16_t reserved2;
343    struct {
344	uint16_t return_p;
345	uint16_t send_c;
346    } index [16];
347#else
348#error "bcm5700: endian not set"
349#endif
350} t3_status_t;
351
352#define M_STATUS_UPDATED        0x00000001
353#define M_STATUS_LINKCHNG       0x00000002
354#define M_STATUS_ERROR          0x00000004
355
356/* BCM570x Statistics Block format as a struct.  Pages 112-120 */
357
358typedef struct t3_stats_s {
359    uint64_t stats[L_MAC_STATS/sizeof(uint64_t)];
360} t3_stats_t;
361
362/* A common memory area for supplying zeros to clear the on-chip stats. */
363static t3_stats_t *zero_stats = NULL;
364
365/* Encoded status transfer block size (32, 64 or 80 bytes.  Page 412 */
366
367#define STATUS_BLOCK_SIZE(rings) \
368         ((rings) <= 4  ? K_HCM_SBSIZE_32 : \
369          (rings) <= 12 ? K_HCM_SBSIZE_64 : \
370          K_HCM_SBSIZE_80)
371
372/* End of 570X defined data structures */
373
374/* The maximum supported BD ring index (QOS) for transmit or receive. */
375
376#define MAX_RI                 1
377
378
379typedef enum {
380    eth_state_uninit,
381    eth_state_off,
382    eth_state_on,
383} eth_state_t;
384
385typedef struct t3_ether_s {
386    /* status block */
387    volatile t3_status_t *status;  /* should be cache-aligned */
388
389    /* PCI access information */
390    uint32_t  regbase;
391    uint32_t  membase;
392    uint8_t   irq;
393    pcitag_t  tag;		   /* tag for configuration registers */
394
395    uint8_t   hwaddr[6];
396    uint16_t  device;              /* chip device code */
397    uint8_t   revision;            /* chip revision */
398    uint16_t  asic_revision;       /* mask revision */
399
400    eth_state_t state;             /* current state */
401    uint32_t intmask;              /* interrupt mask */
402
403    int linkspeed;		   /* encodings from cfe_ioctl */
404
405    /* packet lists */
406    queue_t freelist;
407    uint8_t *pktpool;
408    queue_t rxqueue;
409
410    /* rings */
411    /* For now, support only the standard Rx Producer Ring */
412    t3_rcv_bd_t *rxp_std;          /* Standard Rx Producer Ring */
413    uint32_t  rxp_std_index;
414    uint32_t  prev_rxp_std_index;
415
416   /* For now, support only 1 priority */
417    uint32_t  rxr_entries;
418    t3_rcv_bd_t *rxr_1;            /* Rx Return Ring 1 */
419    uint32_t  rxr_1_index;
420    t3_snd_bd_t *txp_1;            /* Send Ring 1 */
421    uint32_t  txp_1_index;
422    uint32_t  txc_1_index;
423
424    cfe_devctx_t *devctx;
425
426    /* PHY access */
427    int      phy_addr;
428    uint16_t phy_status;
429    uint16_t phy_ability;
430    uint16_t phy_xability;
431    uint32_t phy_vendor;
432    uint16_t phy_device;
433
434    /* MII polling control */
435    int      phy_change;
436    int      mii_polling;
437
438    /* statistics block */
439    volatile t3_stats_t *stats;    /* should be cache-aligned */
440
441    /* additional driver statistics */
442    uint32_t rx_interrupts;
443    uint32_t tx_interrupts;
444    uint32_t bogus_interrupts;
445} t3_ether_t;
446
447
448/* Address mapping macros */
449
450#if CFG_L2_RAM   /* Temporarily here for SiByte SOCs running from L2 */
451#define PTR_TO_PHYS(x) (((uintptr_t)(x)) + 0xD0000000)
452#define PHYS_TO_PTR(a) ((uint8_t *)((a) - 0xD0000000))
453#else
454#define PTR_TO_PHYS(x) (PHYSADDR((uintptr_t)(x)))
455#define PHYS_TO_PTR(a) ((uint8_t *)KERNADDR(a))
456#endif
457
458#define PCI_TO_PTR(a)  (PHYS_TO_PTR(PCI_TO_PHYS(a)))
459#define PTR_TO_PCI(x)  (PHYS_TO_PCI(PTR_TO_PHYS(x)))
460
461/* Chip access macros */
462
463#if (ENDIAN_BIG && MATCH_BYTES)
464/* The host cannot support match-bits mode when operating big-endian.
465   The 5700 swapping control can deal with this, but for now, just
466   use byte-swapped access to the CSRs.  */
467
468#define CSR_MATCH_MODE        PCI_MATCH_BYTES
469
470#define READCSR(sc,csr)       (phys_read32_swapped((sc)->regbase + (csr)))
471#define WRITECSR(sc,csr,val)  (phys_write32_swapped((sc)->regbase + (csr), (val)))
472
473#if PIOSWAP
474#define READMBOX(sc,csr)      (phys_read32_swapped((sc)->regbase+((csr)^4)))
475#define WRITEMBOX(sc,csr,val) (phys_write32_swapped((sc)->regbase+((csr)^4), (val)))
476
477#define READMEM(sc,csr)       (phys_read32_swapped((sc)->membase+(csr)))
478#define WRITEMEM(sc,csr,val)  (phys_write32_swapped((sc)->membase+(csr), (val)))
479
480#else
481#define READMBOX(sc,csr)      (phys_read32_swapped((sc)->regbase+(csr)))
482#define WRITEMBOX(sc,csr,val) (phys_write32_swapped((sc)->regbase+(csr), (val)))
483
484#define READMEM(sc,csr)       (phys_read32_swapped((sc)->membase+((csr)^4)))
485#define WRITEMEM(sc,csr,val)  (phys_write32_swapped((sc)->membase+((csr)^4), (val)))
486
487#endif
488#else  /* !ENDIAN_BIG || !MATCH_BYTES */
489/* These macros attempt to be compatible with match-bits mode,
490   which may put the data and byte masks into the wrong 32-bit word
491   for 64-bit accesses.  See the comment above on PIOSWAP.
492   Externally mastered DMA (control and data) uses match-bits and does
493   specify word-swaps when operating big endian.  */
494
495/* Most registers are 32 bits wide and are accessed by 32-bit
496   transactions.  The mailbox registers and on-chip RAM are 64-bits
497   wide but are generally accessed by 32-bit transactions.
498   Furthermore, the documentation is ambiguous about which 32-bits of
499   the mailbox is significant.  To localize the potential confusions,
500   we define macros for the 3 different cases.  */
501
502#define CSR_MATCH_MODE        PCI_MATCH_BITS
503
504#define READCSR(sc,csr)       (l_phys_read32((sc)->regbase + (csr)))
505#define WRITECSR(sc,csr,val)  (l_phys_write32((sc)->regbase + (csr), (val)))
506
507#if PIOSWAP
508#define READMBOX(sc,csr)      (phys_read32((sc)->regbase+((csr)^4)))
509#define WRITEMBOX(sc,csr,val) (phys_write32((sc)->regbase+((csr)^4), (val)))
510
511#define READMEM(sc,csr)       (phys_read32((sc)->membase+(csr)))
512#define WRITEMEM(sc,csr,val)  (phys_write32((sc)->membase+(csr), (val)))
513
514#else
515#define READMBOX(sc,csr)      (l_phys_read32((sc)->regbase+(csr)))
516#define WRITEMBOX(sc,csr,val) (l_phys_write32((sc)->regbase+(csr), (val)))
517
518#define READMEM(sc,csr)       (l_phys_read32((sc)->membase+((csr)^4)))
519#define WRITEMEM(sc,csr,val)  (l_phys_write32((sc)->membase+((csr)^4), (val)))
520
521#endif
522#endif  /* MATCH_BYTES */
523
524
525/* 64-bit swap macros. */
526
527#if ENDIAN_LITTLE
528/* For little-endian systems, we use PCI swap settings that preserve
529   the offsets of 32-bit fields in control structs (e.g.,
530   descriptors).  As a result, upper and lower halves of 64-bit
531   control fields are swapped.  We deal with this explicitly for
532   addresses but must normalize MIB counters.  This choice could be
533   reconsidered. */
534
535static uint64_t
536ctoh64(uint64_t x)
537{
538    return ((x & 0xFFFFFFFF) << 32) | ((x >> 32) & 0xFFFFFFFF);
539}
540
541#else
542#define ctoh64(x) ((uint64_t)(x))
543
544#endif /* ENDIAN_LITTLE */
545
546
547/* Entry to and exit from critical sections (currently relative to
548   interrupts only, not SMP) */
549
550#if CFG_INTERRUPTS
551#define CS_ENTER(sc) cfe_disable_irq(sc->irq)
552#define CS_EXIT(sc)  cfe_enable_irq(sc->irq)
553#else
554#define CS_ENTER(sc) ((void)0)
555#define CS_EXIT(sc)  ((void)0)
556#endif
557
558
559static void
560dumpseq(t3_ether_t *sc, int start, int next)
561{
562    int offset, i, j;
563    int columns = 4;
564    int lines = (((next - start)/4 + 1) + 3)/columns;
565    int step = lines*4;
566
567    offset = start;
568    for (i = 0; i < lines; i++) {
569	xprintf("\nCSR");
570	for (j = 0; j < columns; j++) {
571	    if (offset + j*step < next)
572		xprintf(" %04X: %08X ",
573			offset+j*step, READCSR(sc, offset+j*step));
574	    }
575	offset += 4;
576	}
577    xprintf("\n");
578}
579
580static void
581dumpcsrs(t3_ether_t *sc, const char *legend)
582{
583    xprintf("%s:\n", legend);
584
585    /* Some device-specific PCI configuration registers */
586    xprintf("-----PCI-----");
587    dumpseq(sc, 0x68, 0x78);
588
589    /* Some general control registers */
590    xprintf("---General---");
591    dumpseq(sc, 0x6800, 0x6810);
592
593    xprintf("-------------\n");
594}
595
596
597/* Packet management.  Note that MIN_RXP_STD_BDS must be at least as
598   big as STD_RCV_BD_THRESH */
599
600#define ETH_PKTPOOL_SIZE  16
601#define MIN_RXP_STD_BDS   8
602
603
604static eth_pkt_t *
605eth_alloc_pkt(t3_ether_t *sc)
606{
607    eth_pkt_t *pkt;
608
609    CS_ENTER(sc);
610    pkt = (eth_pkt_t *) q_deqnext(&sc->freelist);
611    CS_EXIT(sc);
612    if (!pkt) return NULL;
613
614    pkt->buffer = pkt->data;
615    pkt->length = ETH_PKTBUF_LEN;
616    pkt->flags = 0;
617
618    return pkt;
619}
620
621
622static void
623eth_free_pkt(t3_ether_t *sc, eth_pkt_t *pkt)
624{
625    CS_ENTER(sc);
626    q_enqueue(&sc->freelist, &pkt->next);
627    CS_EXIT(sc);
628}
629
630static void
631eth_initfreelist(t3_ether_t *sc)
632{
633    int idx;
634    uint8_t *ptr;
635    eth_pkt_t *pkt;
636
637    q_init(&sc->freelist);
638
639    ptr = sc->pktpool;
640    for (idx = 0; idx < ETH_PKTPOOL_SIZE; idx++) {
641	pkt = (eth_pkt_t *) ptr;
642	eth_free_pkt(sc, pkt);
643	ptr += ETH_PKTBUF_SIZE;
644	}
645}
646
647
648/* Utilities */
649
650static const char *
651t3_devname(t3_ether_t *sc)
652{
653    return (sc->devctx != NULL ? cfe_device_name(sc->devctx) : "eth?");
654}
655
656
657/* CRCs */
658
659uint32_t eth_crc32(const uint8_t *databuf, unsigned int datalen);
660/*static*/ uint32_t
661eth_crc32(const uint8_t *databuf, unsigned int datalen)
662{
663    unsigned int idx, bit, data;
664    uint32_t crc;
665
666    crc = 0xFFFFFFFFUL;
667    for (idx = 0; idx < datalen; idx++)
668	for (data = *databuf++, bit = 0; bit < 8; bit++, data >>= 1)
669	    crc = (crc >> 1) ^ (((crc ^ data) & 1) ? ENET_CRC32_POLY : 0);
670    return crc;
671}
672
673
674/* Descriptor ring management */
675
676/* Modular arithmetic is done by masking (assumes powers of 2) */
677#define RXP_STD_MASK  (RXP_STD_ENTRIES-1)
678#define TXP_RING_MASK (TXP_RING_ENTRIES-1)
679
680static int
681t3_add_rcvbuf(t3_ether_t *sc, eth_pkt_t *pkt)
682{
683    t3_rcv_bd_t *rxp;
684
685    rxp = &(sc->rxp_std[sc->rxp_std_index]);
686    rxp->bufptr_lo = PTR_TO_PCI(pkt->buffer);
687    rxp->length = ETH_PKTBUF_LEN;
688    CACHE_DMA_SYNC(rxp, sizeof(t3_rcv_bd_t));
689    sc->rxp_std_index = (sc->rxp_std_index + 1) & RXP_STD_MASK;
690    return 0;
691}
692
693static void
694t3_fillrxring(t3_ether_t *sc)
695{
696    eth_pkt_t *pkt;
697    unsigned rxp_ci, rxp_onring;
698
699    rxp_ci = sc->status->rxc_std_index;  /* Get a snapshot */
700    rxp_onring = (sc->rxp_std_index - rxp_ci) & RXP_STD_MASK;
701
702    while (rxp_onring < MIN_RXP_STD_BDS) {
703	pkt = eth_alloc_pkt(sc);
704	if (pkt == NULL) {
705	    /* could not allocate a buffer */
706	    break;
707	    }
708	if (t3_add_rcvbuf(sc, pkt) != 0) {
709	    /* could not add buffer to ring */
710	    eth_free_pkt(sc, pkt);
711	    break;
712	    }
713	rxp_onring++;
714	}
715}
716
717static void
718t3_rx_callback(t3_ether_t *sc, eth_pkt_t *pkt)
719{
720    if (T3_DEBUG) show_packet('>', pkt);   /* debug */
721
722    CS_ENTER(sc);
723    q_enqueue(&sc->rxqueue, &pkt->next);
724    CS_EXIT(sc);
725}
726
727static void
728t3_procrxring(t3_ether_t *sc)
729{
730    eth_pkt_t   *pkt;
731    t3_rcv_bd_t *rxc;
732    uint32_t     rxr_1_index;
733    volatile t3_status_t *status = sc->status;
734
735    rxr_1_index = sc->rxr_1_index;
736    rxc = &(sc->rxr_1[rxr_1_index]);
737
738#if T3_BRINGUP
739	    xprintf("%s: rx error %04X\n", t3_devname(sc), rxc->error_flag);
740#endif
741
742    do {
743	CACHE_DMA_INVAL(rxc, sizeof(t3_rcv_bd_t));
744	pkt = ETH_PKT_BASE(PCI_TO_PTR(rxc->bufptr_lo));
745	pkt->length = rxc->length - ENET_CRC_SIZE;
746	if ((rxc->flags & RX_FLAG_ERROR) == 0)
747	    t3_rx_callback(sc, pkt);
748	else {
749#if T3_BRINGUP
750	    xprintf("%s: rx error %04X\n", t3_devname(sc), rxc->error_flag);
751#endif
752	    eth_free_pkt(sc, pkt);   /* Could optimize */
753	    }
754	rxr_1_index++;
755	rxc++;
756	if (rxr_1_index == sc->rxr_entries) {
757	    rxr_1_index = 0;
758	    rxc = sc->rxr_1;
759	    }
760	} while (status->index[RI(1)].return_p != rxr_1_index);
761
762    /* Update the return ring */
763    sc->rxr_1_index = rxr_1_index;
764    WRITEMBOX(sc, R_RCV_BD_RTN_CI(1), rxr_1_index);
765
766    /* Refill the producer ring */
767    t3_fillrxring(sc);
768}
769
770
771static int
772t3_transmit(t3_ether_t *sc, eth_pkt_t *pkt)
773{
774    t3_snd_bd_t *txp;
775    uint32_t     txp_1_next;
776
777    if (T3_DEBUG) show_packet('<', pkt);   /* debug */
778
779    txp_1_next = (sc->txp_1_index + 1) & TXP_RING_MASK;
780    if (txp_1_next == sc->txc_1_index)
781	return -1;
782
783    txp = &(sc->txp_1[sc->txp_1_index]);
784    txp->bufptr_hi = 0;
785    txp->bufptr_lo = PTR_TO_PCI(pkt->buffer);
786    txp->length = pkt->length;
787    txp->flags = TX_FLAG_PACKET_END;
788    CACHE_DMA_SYNC(txp, sizeof(t3_snd_bd_t));
789
790    sc->txp_1_index = txp_1_next;
791    WRITEMBOX(sc, R_SND_BD_PI(1), txp_1_next);
792
793    return 0;
794}
795
796
797static void
798t3_proctxring(t3_ether_t *sc)
799{
800    eth_pkt_t   *pkt;
801    t3_snd_bd_t *txc;
802    uint32_t     txc_1_index;
803    volatile t3_status_t *status = sc->status;
804
805    txc_1_index = sc->txc_1_index;
806    txc = &(sc->txp_1[txc_1_index]);
807    do {
808	CACHE_DMA_INVAL(txc, sizeof(t3_snd_bd_t));
809	pkt = ETH_PKT_BASE(PCI_TO_PTR(txc->bufptr_lo));
810	eth_free_pkt(sc, pkt);
811	txc_1_index = (txc_1_index + 1) & TXP_RING_MASK;
812	txc++;
813	if (txc_1_index == 0) txc = sc->txp_1;
814	} while (status->index[RI(1)].send_c != txc_1_index);
815
816    sc->txc_1_index = txc_1_index;
817}
818
819
820static void
821t3_initrings(t3_ether_t *sc)
822{
823    int  i;
824    t3_rcv_bd_t *rxp;
825    volatile t3_status_t *status = sc->status;
826
827    /* Clear all Producer BDs */
828    rxp = &(sc->rxp_std[0]);
829    for (i = 0; i < RXP_STD_ENTRIES; i++) {
830        rxp->bufptr_hi = rxp->bufptr_lo = 0;
831	rxp->length = 0;
832	rxp->index = i;
833	rxp->flags = 0;
834	rxp->type = 0;
835	rxp->ip_cksum = rxp->tcp_cksum = 0;
836	rxp++;
837	}
838    CACHE_DMA_SYNC(sc->rxp_std, sizeof(t3_rcv_bd_t)*RXP_STD_ENTRIES);
839    CACHE_DMA_INVAL(sc->rxp_std, sizeof(t3_rcv_bd_t)*RXP_STD_ENTRIES);
840
841    /* Init the ring pointers */
842
843    sc->rxp_std_index = 0;  status->rxc_std_index = 0;
844    sc->rxr_1_index = 0;    status->index[RI(1)].return_p = 0;
845    sc->txp_1_index = 0;    status->index[RI(1)].send_c = 0;
846
847    /* Allocate some initial buffers for the Producer BD ring */
848    sc->prev_rxp_std_index = 0;
849    t3_fillrxring(sc);
850
851    /* Nothing consumed yet */
852    sc->txc_1_index = 0;
853}
854
855/* Allocate an integral number of cache lines suitable for DMA access. */
856static uint8_t *
857dma_alloc(size_t size, unsigned int align)
858{
859    uint8_t *base;
860    size_t len = ALIGN(size, CACHE_ALIGN);
861
862    base = KMALLOC(len, ALIGN(align, CACHE_ALIGN));
863    if (base != NULL)
864	CACHE_DMA_INVAL(base, len);
865    return base;
866}
867
868static void
869t3_init(t3_ether_t *sc)
870{
871    /* Allocate buffer pool */
872    sc->pktpool = dma_alloc(ETH_PKTPOOL_SIZE*ETH_PKTBUF_SIZE, CACHE_ALIGN);
873
874    if (sc->pktpool != NULL) {
875	eth_initfreelist(sc);
876	q_init(&sc->rxqueue);
877
878	t3_initrings(sc);
879	}
880}
881
882static void
883t3_reinit(t3_ether_t *sc)
884{
885    eth_initfreelist(sc);
886    q_init(&sc->rxqueue);
887
888    t3_initrings(sc);
889}
890
891
892#if ENDIAN_BIG
893/* Byte swap utilities. */
894
895#define SWAP4(x) \
896    ((((x) & 0x00FF) << 24) | \
897     (((x) & 0xFF00) << 8)  | \
898     (((x) >> 8) & 0xFF00)  | \
899     (((x) >> 24) & 0x00FF))
900
901static uint32_t
902swap4(uint32_t x)
903{
904    uint32_t t;
905
906    t = ((x & 0xFF00FF00) >> 8) | ((x & 0x00FF00FF) << 8);
907    return (t >> 16) | ((t & 0xFFFF) << 16);
908}
909#endif /* ENDIAN_BIG */
910
911
912/* EEPROM access functions (BCM5700 and BCM5701 version) */
913
914/* The 570x chips support multiple access methods.  We use "Auto Access",
915   which requires that
916     Miscellaneous_Local_Control.Auto_SEEPROM_Access be set,
917     Serial_EEprom.Address.HalfClock be programmed for <= 400 Hz.
918   (both done by initialization code) */
919
920#define EP_MAX_RETRIES  500
921#define EP_DEVICE_ID    0x00           /* default ATMEL device ID */
922
923static void
924eeprom_access_init(t3_ether_t *sc)
925{
926  uint32_t mlctl;
927
928  WRITECSR(sc, R_EEPROM_ADDR, M_EPADDR_RESET | V_EPADDR_HPERIOD(0x60));
929
930  mlctl = READCSR(sc, R_MISC_LOCAL_CTRL);
931  mlctl |= M_MLCTL_EPAUTOACCESS;
932  WRITECSR(sc, R_MISC_LOCAL_CTRL, mlctl);
933}
934
935
936static uint32_t
937eeprom_read_word(t3_ether_t *sc, unsigned int offset)
938{
939    /* Assumes that SEEPROM is already set up for auto access. */
940    uint32_t epaddr, epdata;
941    volatile uint32_t temp;
942    int i;
943
944    epaddr = READCSR(sc, R_EEPROM_ADDR);
945    epaddr &= M_EPADDR_HPERIOD;
946    epaddr |= (V_EPADDR_ADDR(offset) | V_EPADDR_DEVID(EP_DEVICE_ID)
947	       | M_EPADDR_RW | M_EPADDR_START | M_EPADDR_COMPLETE);
948    WRITECSR(sc, R_EEPROM_ADDR, epaddr);
949    temp = READCSR(sc, R_EEPROM_ADDR);   /* push */
950
951    for (i = 0; i < EP_MAX_RETRIES; i++) {
952        temp = READCSR(sc, R_EEPROM_ADDR);
953	if ((temp & M_EPADDR_COMPLETE) != 0)
954	    break;
955	cfe_usleep(10);
956    }
957    if (i == EP_MAX_RETRIES)
958	xprintf("%s: eeprom_read_word: no SEEPROM response @ %x\n",
959		t3_devname(sc), offset);
960
961    epdata = READCSR(sc, R_EEPROM_DATA);   /* little endian */
962#if ENDIAN_BIG
963    return swap4(epdata);
964#else
965    return epdata;
966#endif
967}
968
969static int
970eeprom_read_range(t3_ether_t *sc, unsigned int offset, unsigned int len,
971		  uint32_t buf[])
972{
973    int index;
974
975    offset &= ~3;  len &= ~3;     /* 4-byte words only */
976    index = 0;
977
978    while (len > 0) {
979	buf[index++] = eeprom_read_word(sc, offset);
980	offset += 4;  len -= 4;
981	}
982
983    return index;
984}
985
986static void
987eeprom_dump_range(const char *label,
988		  uint32_t buf[], unsigned int offset, unsigned int len)
989{
990    int index;
991
992    xprintf("EEPROM: %s", label);
993
994    offset &= ~3;  len &= ~3;     /* 4-byte words only */
995    index = 0;
996
997    for (index = 0; len > 0; index++) {
998	if (index % 8 == 0)
999	    xprintf("\n %04x: ", offset);
1000	xprintf(" %08x", buf[offset/4]);
1001	offset += 4;  len -= 4;
1002	}
1003    xprintf("\n");
1004}
1005
1006
1007/* MII access functions.  */
1008
1009/* BCM5401 device specific registers */
1010
1011#define MII_ISR         0x1A    /* Interrupt Status Register */
1012#define MII_IMR         0x1B    /* Interrupt Mask Register */
1013
1014#define M_INT_LINKCHNG  0x0002
1015
1016
1017/* The 570x chips support multiple access methods.  We use "Auto
1018   Access", which requires that MDI_Control_Register.MDI_Select be
1019   clear (done by initialization code) */
1020
1021#define MII_MAX_RETRIES 5000
1022
1023static void
1024mii_access_init(t3_ether_t *sc)
1025{
1026    WRITECSR(sc, R_MDI_CTRL, 0);                    /* here for now */
1027#if !T3_AUTOPOLL
1028    WRITECSR(sc, R_MI_MODE, V_MIMODE_CLKCNT(0x1F));  /* max divider */
1029#endif
1030}
1031
1032/* XXX Autopolling should be disabled during reads and writes per the
1033   manual, but doing so currently generates recurvise LINKCHNG
1034   attentions. */
1035
1036static uint16_t
1037mii_read_register(t3_ether_t *sc, int phy, int index)
1038{
1039    uint32_t mode;
1040    uint32_t comm, val = 0;
1041    int   i;
1042
1043    mode = READCSR(sc, R_MI_MODE);
1044#if 0 /* for now */
1045    if (mode & M_MIMODE_POLLING) {
1046	WRITECSR(sc, R_MI_MODE, mode & ~M_MIMODE_POLLING);
1047	cfe_usleep(40);
1048	}
1049#endif
1050
1051    comm = (V_MICOMM_CMD_RD | V_MICOMM_PHY(phy) | V_MICOMM_REG(index)
1052	    | M_MICOMM_BUSY);
1053    WRITECSR(sc, R_MI_COMM, comm);
1054
1055    for (i = 0; i < MII_MAX_RETRIES; i++) {
1056	val = READCSR(sc, R_MI_COMM);
1057	if ((val & M_MICOMM_BUSY) == 0)
1058	    break;
1059	}
1060    if (i == MII_MAX_RETRIES)
1061	xprintf("%s: mii_read_register: MII always busy\n", t3_devname(sc));
1062
1063#if 0
1064    if (mode & M_MIMODE_POLLING)
1065	WRITECSR(sc, R_MI_MODE, mode);
1066#endif
1067
1068    return G_MICOMM_DATA(val);
1069}
1070
1071/* Register reads occasionally return spurious 0's.  Verify a zero by
1072   doing a second read, or spinning when a zero is "impossible".  */
1073static uint16_t
1074mii_read_register_v(t3_ether_t *sc, int phy, int index, int spin)
1075{
1076    uint32_t val;
1077
1078    val = mii_read_register(sc, phy, index);
1079    if (val == 0) {
1080	do {
1081	    val = mii_read_register(sc, phy, index);
1082	    } while (spin && val == 0);
1083	}
1084    return val;
1085}
1086
1087static void
1088mii_write_register(t3_ether_t *sc, int phy, int index, uint16_t value)
1089{
1090    uint32_t mode;
1091    uint32_t comm, val;
1092    int   i;
1093
1094    mode = READCSR(sc, R_MI_MODE);
1095#if 0 /* for now */
1096    if (mode & M_MIMODE_POLLING) {
1097	WRITECSR(sc, R_MI_MODE, mode & ~M_MIMODE_POLLING);
1098	cfe_usleep(40);
1099	}
1100#endif
1101
1102    comm = (V_MICOMM_CMD_WR | V_MICOMM_PHY(phy) | V_MICOMM_REG(index)
1103	    | V_MICOMM_DATA(value) | M_MICOMM_BUSY);
1104    WRITECSR(sc, R_MI_COMM, comm);
1105
1106    for (i = 0; i < MII_MAX_RETRIES; i++) {
1107	val = READCSR(sc, R_MI_COMM);
1108	if ((val & M_MICOMM_BUSY) == 0)
1109	    break;
1110	}
1111    if (i == MII_MAX_RETRIES)
1112	xprintf("%s: mii_write_register: MII always busy\n", t3_devname(sc));
1113
1114#if 0
1115    if (mode & M_MIMODE_POLLING)
1116	WRITECSR(sc, R_MI_MODE, mode);
1117#endif
1118}
1119
1120static int
1121mii_probe(t3_ether_t *sc)
1122{
1123    uint16_t id1, id2;
1124
1125#if T3_AUTOPOLL   /* With autopolling, the code below is not reliable.  */
1126    sc->phy_addr = 1;     /* Guaranteed for integrated PHYs */
1127    id1 = mii_read_register(sc, 1, MII_PHYIDR1);
1128    id2 = mii_read_register(sc, 1, MII_PHYIDR2);
1129    sc->phy_vendor = ((uint32_t)id1 << 6) | ((id2 >> 10) & 0x3F);
1130    sc->phy_device = (id2 >> 4) & 0x3F;
1131    return 0;
1132#else
1133    int i;
1134
1135    for (i = 0; i < 32; i++) {
1136        id1 = mii_read_register(sc, i, MII_PHYIDR1);
1137	id2 = mii_read_register(sc, i, MII_PHYIDR2);
1138	if ((id1 != 0x0000 && id1 != 0xFFFF) ||
1139	    (id2 != 0x0000 && id2 != 0xFFFF)) {
1140	    if (id1 != id2) {
1141	        sc->phy_addr = i;
1142		sc->phy_vendor = ((uint32_t)id1 << 6) | ((id2 >> 10) & 0x3F);
1143		sc->phy_device = (id2 >> 4) & 0x3F;
1144		return 0;
1145		}
1146	    }
1147	}
1148    return -1;
1149#endif
1150}
1151
1152#if T3_DEBUG
1153static void
1154mii_dump(t3_ether_t *sc, const char *label)
1155{
1156    int i;
1157    uint16_t  r;
1158
1159    xprintf("%s, MII:\n", label);
1160
1161    /* Required registers */
1162    for (i = 0x0; i <= 0x6; ++i) {
1163	r = mii_read_register(sc, sc->phy_addr, i);
1164	xprintf(" REG%02X: %04X", i, r);
1165	if (i == 3 || i == 6)
1166	    xprintf("\n");
1167	}
1168
1169    /* GMII extensions */
1170    for (i = 0x9; i <= 0xA; ++i) {
1171	r = mii_read_register(sc, sc->phy_addr, i);
1172	xprintf(" REG%02X: %04X", i, r);
1173	}
1174    r = mii_read_register(sc, sc->phy_addr, 0xF);
1175    xprintf(" REG%02X: %04X\n", 0xF, r);
1176
1177    /* Broadcom extensions (54xx family) */
1178    if (sc->phy_vendor == OUI_BCM) {
1179	for (i = 0x10; i <= 0x14; i++) {
1180	    r = mii_read_register(sc, sc->phy_addr, i);
1181	    xprintf(" REG%02X: %04X", i, r);
1182	    }
1183	xprintf("\n");
1184	for (i = 0x18; i <= 0x1A; i++) {
1185	    r = mii_read_register(sc, sc->phy_addr, i);
1186	    xprintf(" REG%02X: %04X", i, r);
1187	    }
1188	xprintf("\n");
1189	}
1190}
1191#else
1192#define mii_dump(sc,label)
1193#endif
1194
1195static void
1196mii_enable_interrupts(t3_ether_t *sc)
1197{
1198  mii_write_register(sc, sc->phy_addr, MII_IMR, ~M_INT_LINKCHNG);
1199}
1200
1201
1202/* For 5700/5701, LINKCHNG is read-only in the status register and
1203   cleared by writing to CFGCHNG | SYNCCHNG.  For the 5705
1204   (empirically), LINKCHNG is cleared by writing a one, while CFGCHNG
1205   and SYNCCHNG are unimplemented.  Thus we can safely clear the
1206   interrupt by writing ones to all the above bits.  */
1207
1208#define M_LINKCHNG_CLR \
1209    (M_EVT_LINKCHNG | M_MACSTAT_CFGCHNG | M_MACSTAT_SYNCCHNG)
1210
1211static int
1212mii_poll(t3_ether_t *sc)
1213{
1214    uint32_t  macstat;
1215    uint16_t  status, ability, xability;
1216    uint16_t isr;
1217
1218    macstat = READCSR(sc, R_MAC_STATUS);
1219    if ((macstat & (M_EVT_LINKCHNG | M_EVT_MIINT)) != 0)
1220	WRITECSR(sc, R_MAC_STATUS, M_LINKCHNG_CLR);
1221
1222    /* BMSR has read-to-clear bits; read twice.  */
1223
1224    status = mii_read_register(sc, sc->phy_addr, MII_BMSR);
1225    status = mii_read_register_v(sc, sc->phy_addr, MII_BMSR, 1);
1226    ability = mii_read_register_v(sc, sc->phy_addr, MII_ANLPAR, 0);
1227    if (status & BMSR_1000BT_XSR)
1228	xability = mii_read_register_v(sc, sc->phy_addr, MII_K1STSR, 0);
1229    else
1230	xability = 0;
1231    isr = mii_read_register(sc, sc->phy_addr, MII_ISR);
1232
1233    if (status != sc->phy_status
1234	|| ability != sc->phy_ability || xability != sc->phy_xability) {
1235#if T3_DEBUG
1236	xprintf("[%04x]", isr);
1237	xprintf((macstat & (M_EVT_LINKCHNG | M_EVT_MIINT)) != 0 ? "+" : "-");
1238
1239	if (status != sc->phy_status)
1240	    xprintf(" ST: %04x %04x", sc->phy_status, status);
1241	if (ability != sc->phy_ability)
1242	    xprintf(" AB: %04x %04x", sc->phy_ability, ability);
1243	if (xability != sc->phy_xability)
1244	    xprintf(" XA: %04x %04x", sc->phy_xability, xability);
1245	xprintf("\n");
1246#endif
1247        sc->phy_status = status;
1248	sc->phy_ability = ability;
1249	sc->phy_xability = xability;
1250	return 1;
1251	}
1252    else if ((macstat & (M_EVT_LINKCHNG | M_EVT_MIINT)) != 0) {
1253	isr = mii_read_register(sc, sc->phy_addr, MII_ISR);
1254	}
1255    return 0;
1256}
1257
1258static void
1259mii_set_speed(t3_ether_t *sc, int speed)
1260{
1261    uint16_t  control;
1262
1263    control = mii_read_register(sc, sc->phy_addr, MII_BMCR);
1264
1265    control &= ~(BMCR_ANENABLE | BMCR_RESTARTAN);
1266    mii_write_register(sc, sc->phy_addr, MII_BMCR, control);
1267    control &= ~(BMCR_SPEED0 | BMCR_SPEED1 | BMCR_DUPLEX);
1268
1269    switch (speed) {
1270	case ETHER_SPEED_10HDX:
1271	default:
1272	    break;
1273	case ETHER_SPEED_10FDX:
1274	    control |= BMCR_DUPLEX;
1275	    break;
1276	case ETHER_SPEED_100HDX:
1277	    control |= BMCR_SPEED100;
1278	    break;
1279	case ETHER_SPEED_100FDX:
1280	    control |= BMCR_SPEED100 | BMCR_DUPLEX ;
1281	    break;
1282	}
1283
1284    mii_write_register(sc, sc->phy_addr, MII_BMCR, control);
1285}
1286
1287static void
1288mii_autonegotiate(t3_ether_t *sc)
1289{
1290    uint16_t  control, status, remote, xremote;
1291    unsigned int  timeout;
1292    int linkspeed;
1293    uint32_t mode;
1294
1295    linkspeed = ETHER_SPEED_UNKNOWN;
1296
1297    /* Read twice to clear latching bits */
1298    status = mii_read_register(sc, sc->phy_addr, MII_BMSR);
1299    status = mii_read_register_v(sc, sc->phy_addr, MII_BMSR, 1);
1300    mii_dump(sc, "query PHY");
1301
1302    if ((status & (BMSR_AUTONEG | BMSR_LINKSTAT)) ==
1303        (BMSR_AUTONEG | BMSR_LINKSTAT))
1304	control = mii_read_register(sc, sc->phy_addr, MII_BMCR);
1305    else {
1306	for (timeout = 4*CFE_HZ; timeout > 0; timeout -= CFE_HZ/2) {
1307	    status = mii_read_register(sc, sc->phy_addr, MII_BMSR);
1308	    if ((status & BMSR_ANCOMPLETE) != 0)
1309		break;
1310	    cfe_sleep(CFE_HZ/2);
1311	    }
1312	}
1313
1314    remote = mii_read_register_v(sc, sc->phy_addr, MII_ANLPAR, 0);
1315
1316    /* XXX Empirically, it appears best to set/keep PortMode non-null to
1317       get STATUS_LINKCHNG assertions. */
1318    mode = READCSR(sc, R_MAC_MODE);
1319
1320#if T3_DEBUG
1321    xprintf("Mode0:%08X ", (int) mode);
1322#endif
1323
1324    xprintf("%s: Link speed: ", t3_devname(sc));
1325    if ((status & BMSR_ANCOMPLETE) != 0) {
1326	/* A link partner was negogiated... */
1327
1328	if (status & BMSR_1000BT_XSR)
1329	    xremote = mii_read_register_v(sc, sc->phy_addr, MII_K1STSR, 0);
1330	else
1331	    xremote = 0;
1332
1333	mode &= ~(M_MACM_PORTMODE | M_MACM_HALFDUPLEX);
1334
1335#if T3_DEBUG
1336	xprintf("Mode1:%08X ", (int) mode);
1337#endif
1338
1339	if ((xremote & K1STSR_LP1KFD) != 0) {
1340	    xprintf("1000BaseT FDX\n");
1341	    linkspeed = ETHER_SPEED_1000FDX;
1342	    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_GMII);
1343	    }
1344	else if ((xremote & K1STSR_LP1KHD) != 0) {
1345	    xprintf("1000BaseT HDX\n");
1346	    linkspeed = ETHER_SPEED_1000HDX;
1347	    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_GMII) | M_MACM_HALFDUPLEX;
1348	    }
1349	else if ((remote & ANLPAR_TXFD) != 0) {
1350	    xprintf("100BaseT FDX\n");
1351	    linkspeed = ETHER_SPEED_100FDX;
1352	    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII);
1353	    }
1354	else if ((remote & ANLPAR_TXHD) != 0) {
1355	    xprintf("100BaseT HDX\n");
1356	    linkspeed = ETHER_SPEED_100HDX;
1357	    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII) | M_MACM_HALFDUPLEX;
1358	    }
1359	else if ((remote & ANLPAR_10FD) != 0) {
1360	    xprintf("10BaseT FDX\n");
1361	    linkspeed = ETHER_SPEED_10FDX;
1362	    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII);
1363	    }
1364	else if ((remote & ANLPAR_10HD) != 0) {
1365	    xprintf("10BaseT HDX\n");
1366	    linkspeed = ETHER_SPEED_10HDX;
1367	    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII) | M_MACM_HALFDUPLEX;
1368	    }
1369
1370	WRITECSR(sc, R_MAC_MODE, mode);
1371#if T3_DEBUG
1372    	xprintf("Mode2:%08X ", (int) mode);
1373#endif
1374	}
1375    else {
1376	/* no link partner convergence */
1377	xprintf("Unknown\n");
1378	linkspeed = ETHER_SPEED_UNKNOWN;
1379	remote = xremote = 0;
1380	if (G_MACM_PORTMODE(mode) == K_MACM_PORTMODE_NONE) {
1381	    /* Keep any previous port mode as the one most likely to reappear.
1382	       Otherwise, choose one, and 10/100FDX is more likely. */
1383	    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII);
1384	    WRITECSR(sc, R_MAC_MODE, mode);
1385	    }
1386	}
1387    sc->linkspeed = linkspeed;
1388
1389    /* clear latching bits, XXX fix flakey reads */
1390    status = mii_read_register_v(sc, sc->phy_addr, MII_BMSR, 1);
1391    (void)mii_read_register(sc, sc->phy_addr, MII_ISR);
1392
1393    sc->phy_status = status;
1394    sc->phy_ability = remote;
1395    sc->phy_xability = xremote;
1396
1397    mii_dump(sc, "final PHY");
1398}
1399
1400
1401static void
1402t3_clear(t3_ether_t *sc, unsigned reg, uint32_t mask)
1403{
1404    uint32_t val;
1405    int timeout;
1406
1407    val = READCSR(sc, reg);
1408    val &= ~mask;
1409    WRITECSR(sc, reg, val);
1410    val = READCSR(sc, reg);
1411
1412    for (timeout = 4000; (val & mask) != 0 && timeout > 0; timeout -= 100) {
1413	cfe_usleep(100);
1414	val = READCSR(sc, reg);
1415	}
1416    if (timeout <= 0)
1417	xprintf("%s: cannot clear %04X/%08X\n", t3_devname(sc), reg, mask);
1418}
1419
1420
1421/* The following functions collectively implement the recommended
1422   BCM5700 Initialization Procedure (Section 8: Device Control) */
1423
1424static int
1425t3_coldreset(t3_ether_t *sc)
1426{
1427    pcireg_t cmd;
1428    pcireg_t bhlc, subsysid;
1429    pcireg_t bar0, bar1;
1430    pcireg_t cmdx;
1431    uint32_t mhc, mcr, mcfg;
1432    uint32_t mode;
1433    int timeout;
1434
1435    /* Steps 1-18 */
1436    /* Enable memory, also clear R/WC status bits (1) */
1437    cmd = pci_conf_read(sc->tag, PCI_COMMAND_STATUS_REG);
1438    cmd |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
1439    pci_conf_write(sc->tag, PCI_COMMAND_STATUS_REG, cmd);
1440
1441    /* Clear and disable INTA output. (2) */
1442    mhc = READCSR(sc, R_MISC_HOST_CTRL);
1443    mhc |= M_MHC_MASKPCIINT | M_MHC_CLEARINTA | M_MHC_ENINDIRECT | M_MHC_ENCLKCTRLRW;
1444    WRITECSR(sc, R_MISC_HOST_CTRL, mhc);
1445
1446    /* Save some config registers modified by core clock reset (3). */
1447    bhlc = pci_conf_read(sc->tag, PCI_BHLC_REG);
1448    subsysid = pci_conf_read(sc->tag, PCI_SUBSYS_ID_REG);
1449    /* Empirically, these are clobbered too. */
1450    bar0 = pci_conf_read(sc->tag, PCI_MAPREG(0));
1451    bar1 = pci_conf_read(sc->tag, PCI_MAPREG(1));
1452
1453    /* Reset the core clocks (4, 5). */
1454    mcfg = READCSR(sc, R_MISC_CFG);
1455    mcfg |= M_MCFG_CORERESET;
1456    WRITECSR(sc, R_MISC_CFG, mcfg);
1457    cfe_usleep(100);    /* 100 usec delay */
1458
1459    /* NB: Until the BARs are restored and reenabled, only PCI
1460       configuration reads and writes will succeed.  */
1461
1462    /* Reenable MAC memory (7) */
1463    pci_conf_write(sc->tag, PCI_MAPREG(0), bar0);
1464    pci_conf_write(sc->tag, PCI_MAPREG(1), bar1);
1465    (void)pci_conf_read(sc->tag, PCI_MAPREG(1));  /* push */
1466    pci_conf_write(sc->tag, PCI_COMMAND_STATUS_REG, cmd);
1467    (void)pci_conf_read(sc->tag, PCI_COMMAND_STATUS_REG);  /* push */
1468
1469    /* Undo some of the resets (6) */
1470    mhc = READCSR(sc, R_MISC_HOST_CTRL);
1471    mhc |= M_MHC_MASKPCIINT | M_MHC_ENINDIRECT | M_MHC_ENCLKCTRLRW;
1472    WRITECSR(sc, R_MISC_HOST_CTRL, mhc);
1473
1474    /* Verify that core clock resets completed and autocleared. */
1475    mcfg = READCSR(sc, R_MISC_CFG);
1476    if ((mcfg & M_MCFG_CORERESET) != 0) {
1477	xprintf("bcm5700: core clocks stuck in reset\n");
1478	}
1479
1480    /* Configure PCI-X (8) */
1481    if (sc->device != K_PCI_ID_BCM5705) {
1482	cmdx = pci_conf_read(sc->tag, PCI_PCIX_CMD_REG);
1483	cmdx &= ~PCIX_CMD_RLXORDER_ENABLE;
1484	pci_conf_write(sc->tag, PCI_PCIX_CMD_REG, cmdx);
1485	}
1486
1487    /* Enable memory arbiter (9)  */
1488    mode = READCSR(sc, R_MEM_MODE);
1489    mode |= M_MAM_ENABLE;    /* enable memory arbiter */
1490    WRITECSR(sc, R_MEM_MODE, mode);
1491
1492    /* Assume no external SRAM for now (10) */
1493
1494    /* Set up MHC for endianness and write enables (11-15) */
1495    mhc = READCSR(sc, R_MISC_HOST_CTRL);
1496    /* Since we use match-bits for Direct PCI access, don't swap bytes. */
1497    mhc &= ~M_MHC_ENBYTESWAP;
1498#if ENDIAN_LITTLE
1499    mhc |= M_MHC_ENWORDSWAP;
1500#endif
1501#if ENDIAN_BIG
1502#if PIOSWAP
1503    mhc |= M_MHC_ENWORDSWAP;
1504#endif
1505#endif
1506    mhc |= M_MHC_ENINDIRECT | M_MHC_ENPCISTATERW | M_MHC_ENCLKCTRLRW;
1507    WRITECSR(sc, R_MISC_HOST_CTRL, mhc);
1508
1509    /* Set byte swapping (16, 17) */
1510    mcr = READCSR(sc, R_MODE_CTRL);
1511#if ENDIAN_LITTLE
1512    mcr |= M_MCTL_BSWAPDATA | M_MCTL_WSWAPDATA;
1513    mcr |= M_MCTL_WSWAPCTRL;
1514#endif
1515#if ENDIAN_BIG
1516#if MATCH_BYTES
1517    mcr |= M_MCTL_BSWAPDATA | M_MCTL_WSWAPDATA;
1518    mcr |= M_MCTL_BSWAPCTRL | M_MCTL_WSWAPCTRL;
1519#else
1520    mcr &= ~(M_MCTL_BSWAPCTRL | M_MCTL_BSWAPDATA);
1521    mcr |= M_MCTL_WSWAPCTRL | M_MCTL_WSWAPDATA;
1522#endif
1523#endif
1524    WRITECSR(sc, R_MODE_CTRL, mcr);
1525
1526    /* Disable PXE restart, wait for firmware (18, 19) */
1527    if (READMEM(sc, A_PXE_MAILBOX) != T3_MAGIC_NUMBER) {
1528	/* Apparently, if the magic number is already set, firmware
1529	   ignores this attempted handshake. */
1530	WRITEMEM(sc, A_PXE_MAILBOX, T3_MAGIC_NUMBER);
1531	for (timeout = CFE_HZ; timeout > 0; timeout -= CFE_HZ/10) {
1532	    if (READMEM(sc, A_PXE_MAILBOX) == ~T3_MAGIC_NUMBER)
1533		break;
1534	    cfe_sleep(CFE_HZ/10);
1535	    }
1536	if (READMEM(sc, A_PXE_MAILBOX) != ~T3_MAGIC_NUMBER)
1537	    xprintf("bcm5700: no firmware PXE rendevous\n");
1538
1539#if T3_DEBUG
1540	uint32_t mag;
1541	mag = READMEM(sc, A_PXE_MAILBOX);
1542	xprintf("magic number: %X %X\n",mag,~T3_MAGIC_NUMBER);
1543
1544//	WRITEMEM(sc, A_PXE_MAILBOX, T3_MAGIC_NUMBER);
1545
1546	mag = READMEM(sc, A_PXE_MAILBOX);
1547	xprintf("magic number: %X %X\n",mag,~T3_MAGIC_NUMBER);
1548#endif
1549
1550	}
1551    else
1552    {
1553
1554#if T3_DEBUG
1555	uint32_t mag;
1556	mag = READMEM(sc, A_PXE_MAILBOX);
1557	xprintf("magic number: %X %X\n",mag,~T3_MAGIC_NUMBER);
1558#endif
1559
1560	xprintf("bcm5700: PXE magic number already set\n");
1561	}
1562
1563    /* Clear Ethernet MAC Mode (20) */
1564    WRITECSR(sc, R_MAC_MODE, 0x00000000);
1565
1566    /* Restore remaining config registers (21) */
1567    pci_conf_write(sc->tag, PCI_BHLC_REG, bhlc);
1568    pci_conf_write(sc->tag, PCI_SUBSYS_ID_REG, subsysid);
1569
1570    return 0;
1571}
1572
1573/* XXX Not clear that the following is useful. */
1574static int
1575t3_warmreset(t3_ether_t *sc)
1576{
1577    uint32_t mode;
1578
1579    /* Enable memory arbiter (9)  */
1580    mode = READCSR(sc, R_MEM_MODE);
1581    mode |= M_MAM_ENABLE;    /* enable memory arbiter */
1582    WRITECSR(sc, R_MEM_MODE, mode);
1583
1584    /* Clear Ethernet MAC Mode (20) */
1585    WRITECSR(sc, R_MAC_MODE, 0x00000000);
1586
1587    return 0;
1588}
1589
1590
1591static int
1592t3_init_registers(t3_ether_t *sc)
1593{
1594    unsigned offset;
1595    uint32_t dmac, mcr, mcfg;
1596
1597    /* Steps 22-29 */
1598
1599    /* Clear MAC statistics block (22) */
1600    for (offset = A_MAC_STATS; offset < A_MAC_STATS+L_MAC_STATS; offset += 4) {
1601	WRITEMEM(sc, offset, 0);
1602	}
1603
1604    /* Clear driver status memory region (23) */
1605    /* ASSERT (sizeof(t3_status_t) == L_MAC_STATUS) */
1606    memset((uint8_t *)sc->status, 0, sizeof(t3_status_t));
1607
1608    /* Set up PCI DMA control (24) */
1609    dmac = READCSR(sc, R_DMA_RW_CTRL);
1610    dmac &= ~(M_DMAC_RDCMD | M_DMAC_WRCMD | M_DMAC_MINDMA);
1611    dmac |= V_DMAC_RDCMD(K_PCI_MEMRD) | V_DMAC_WRCMD(K_PCI_MEMWR);
1612    switch (sc->device) {
1613	case K_PCI_ID_BCM5700:
1614	case K_PCI_ID_BCM5701:
1615	case K_PCI_ID_BCM5702:
1616	    dmac |= V_DMAC_MINDMA(0xF);    /* "Recommended" */
1617	    break;
1618
1619	case K_PCI_ID_BCM5780:
1620	    /* XXX magic values, Broadcom-supplied Linux driver */
1621	    dmac |= (1 << 20) | (1 << 18) | M_DMAC_ONEDMA;
1622#if T3_DEBUG
1623	    dmac |= 0x00144000;
1624#endif
1625	    break;
1626
1627     /* case other 5714 family */
1628	    /* dmac |= (1 << 20) | (1 << 18) | (1 << 15); */
1629
1630	default:
1631	    dmac |= V_DMAC_MINDMA(0x0);
1632	    break;
1633	}
1634    WRITECSR(sc, R_DMA_RW_CTRL, dmac);
1635
1636    /* Set DMA byte swapping (25) - XXX repeat of (17) */
1637    mcr = READCSR(sc, R_MODE_CTRL);
1638#if ENDIAN_LITTLE
1639    mcr |= M_MCTL_BSWAPDATA | M_MCTL_WSWAPDATA;
1640    mcr |= M_MCTL_WSWAPCTRL;
1641#endif
1642#if ENDIAN_BIG
1643#if MATCH_BYTES
1644    mcr |= M_MCTL_BSWAPDATA | M_MCTL_WSWAPDATA;
1645    mcr |= M_MCTL_BSWAPCTRL | M_MCTL_WSWAPCTRL;
1646#else
1647    mcr &= ~(M_MCTL_BSWAPCTRL | M_MCTL_BSWAPDATA);
1648    mcr |= M_MCTL_WSWAPCTRL | M_MCTL_WSWAPDATA;
1649#endif
1650#endif
1651    WRITECSR(sc, R_MODE_CTRL, mcr);
1652
1653    /* Configure host rings (26) */
1654    mcr |= M_MCTL_HOSTBDS;
1655    WRITECSR(sc, R_MODE_CTRL, mcr);
1656
1657    /* Indicate driver ready, disable checksums (27, 28) */
1658    mcr |= M_MCTL_HOSTUP;
1659    mcr |= (M_MCTL_NOTXPHSUM | M_MCTL_NORXPHSUM | M_MCTL_HOSTBDS);
1660    WRITECSR(sc, R_MODE_CTRL, mcr);
1661
1662    /* Configure timer (29) */
1663    mcfg = READCSR(sc, R_MISC_CFG);
1664    mcfg &= ~M_MCFG_PRESCALER;
1665    mcfg |= V_MCFG_PRESCALER(66-1);    /* 66 MHz */
1666    WRITECSR(sc, R_MISC_CFG, mcfg);
1667
1668    return 0;
1669}
1670
1671static int
1672t3_init_pools(t3_ether_t *sc)
1673{
1674    uint32_t mode;
1675    int timeout;
1676
1677    /* Steps 30-36.  These use "recommended" settings (p 150) */
1678
1679    /* Configure the MAC memory pool (30) */
1680    if ((sc->device != K_PCI_ID_BCM5705) &&
1681        !BCM571X_FAMILY_DEVICE(sc->device)) {
1682
1683        WRITECSR(sc, R_BMGR_MBUF_BASE, A_BUFFER_POOL);
1684        WRITECSR(sc, R_BMGR_MBUF_LEN, L_BUFFER_POOL);
1685
1686	}
1687    else
1688    {
1689        /* Note: manual appears to recommend not even writing these (?) */
1690        /* WRITECSR(sc, R_BMGR_MBUF_BASE, A_RXMBUF); */
1691        /* WRITECSR(sc, R_BMGR_MBUF_LEN, 0x8000); */
1692	}
1693
1694    if (BCM571X_FAMILY_DEVICE(sc->device))
1695    {
1696
1697       /* Configure the MAC memory watermarks for BCM571X family (32) */
1698        WRITECSR(sc, R_BMGR_MBUF_DMA_LOW, 0x0);
1699        WRITECSR(sc, R_BMGR_MBUF_RX_LOW,  0x10);
1700        WRITECSR(sc, R_BMGR_MBUF_HIGH,    0x60);
1701    }
1702    else
1703    {
1704
1705       /* Configure the MAC DMA resource pool (31) */
1706        WRITECSR(sc, R_BMGR_DMA_BASE, A_DMA_DESCS);
1707        WRITECSR(sc, R_BMGR_DMA_LEN,  L_DMA_DESCS);
1708
1709        /* Configure the MAC memory watermarks (32) */
1710        WRITECSR(sc, R_BMGR_MBUF_DMA_LOW, 0x50);
1711        WRITECSR(sc, R_BMGR_MBUF_RX_LOW,  0x20);
1712        WRITECSR(sc, R_BMGR_MBUF_HIGH,    0x60);
1713
1714        /* Configure the DMA resource watermarks (33) */
1715        WRITECSR(sc, R_BMGR_DMA_LOW,   5);
1716        WRITECSR(sc, R_BMGR_DMA_HIGH, 10);
1717
1718        /* Enable the buffer manager (34, 35) */
1719        mode = READCSR(sc, R_BMGR_MODE);
1720        mode |= (M_BMODE_ENABLE | M_BMODE_MBUFLOWATTN);
1721        WRITECSR(sc, R_BMGR_MODE, mode);
1722        for (timeout = CFE_HZ/2; timeout > 0; timeout -= CFE_HZ/10) {
1723            mode = READCSR(sc, R_BMGR_MODE);
1724            if ((mode & M_BMODE_ENABLE) != 0)
1725                break;
1726            cfe_sleep(CFE_HZ/10);
1727        }
1728        if ((mode & M_BMODE_ENABLE) == 0)
1729            xprintf("bcm5700: buffer manager not enabled\n");
1730    }
1731
1732    /* Enable internal queues (36) */
1733    WRITECSR(sc, R_FTQ_RESET, 0xFFFFFFFF);
1734#ifndef BCM47XX  /* XXX bus error on 5703 */
1735    (void)READCSR(sc, R_FTQ_RESET);    /* push */
1736#endif
1737    cfe_sleep(1);
1738    WRITECSR(sc, R_FTQ_RESET, 0x00000000);
1739
1740    return 0;
1741}
1742
1743static int
1744t3_init_rings(t3_ether_t *sc)
1745{
1746    unsigned rcbp;
1747    int i;
1748
1749    /* Steps 37-46 */
1750
1751    /* Initialize RCBs for Standard Receive Buffer Ring (37) */
1752    WRITECSR(sc, R_STD_RCV_BD_RCB+RCB_HOST_ADDR_HIGH, 0);
1753    WRITECSR(sc, R_STD_RCV_BD_RCB+RCB_HOST_ADDR_LOW, PTR_TO_PCI(sc->rxp_std));
1754    /* 5714 family device removed JUMBO ring */
1755    if ( !BCM571X_FAMILY_DEVICE(sc->device) )
1756    {
1757        WRITECSR(sc, R_STD_RCV_BD_RCB+RCB_CTRL, V_RCB_MAXLEN(ETH_PKTBUF_LEN));
1758    }
1759    else
1760    {
1761        WRITECSR(sc, R_STD_RCV_BD_RCB+RCB_CTRL, V_RCB_MAXLEN(RXP_STD_ENTRIES));
1762    }
1763    WRITECSR(sc, R_STD_RCV_BD_RCB+RCB_NIC_ADDR, A_STD_RCV_RINGS);
1764
1765    /* 5714 family device removed JUMBO ring */
1766    if ( !BCM571X_FAMILY_DEVICE(sc->device) )
1767    {
1768        /* Disable RCBs for Jumbo and Mini Receive Buffer Rings (38,39) */
1769        WRITECSR(sc, R_JUMBO_RCV_BD_RCB+RCB_CTRL,
1770                 RCB_FLAG_USE_EXT_RCV_BD | RCB_FLAG_RING_DISABLED);
1771        WRITECSR(sc, R_JUMBO_RCV_BD_RCB+RCB_NIC_ADDR, A_JUMBO_RCV_RINGS);
1772        WRITECSR(sc, R_MINI_RCV_BD_RCB+RCB_CTRL, RCB_FLAG_RING_DISABLED);
1773        WRITECSR(sc, R_MINI_RCV_BD_RCB+RCB_NIC_ADDR, 0xe000);
1774
1775        /* Set BD ring replenish thresholds (40) */
1776        WRITECSR(sc, R_MINI_RCV_BD_THRESH, 128);
1777    }
1778
1779#if T3_BRINGUP
1780    WRITECSR(sc, R_STD_RCV_BD_THRESH, 1);
1781#else
1782    /* Note that STD_RCV_BD_THRESH cannot exceed MIN_RXP_STD_BDS */
1783    WRITECSR(sc, R_STD_RCV_BD_THRESH, 6);
1784#endif
1785
1786    /* 5714 family device removed JUMBO ring */
1787    if ( !BCM571X_FAMILY_DEVICE(sc->device) )
1788    {
1789        WRITECSR(sc, R_JUMBO_RCV_BD_THRESH, 16);
1790    }
1791
1792      /* 5714 family device removed send rings 2-16 */
1793    if ( !BCM571X_FAMILY_DEVICE(sc->device) )
1794     {
1795        /* Disable unused send producer rings 2-16 (41) */
1796        for (rcbp = A_SND_RCB(1); rcbp <= A_SND_RCB(16); rcbp += RCB_SIZE)
1797            WRITEMEM(sc, rcbp+RCB_CTRL, RCB_FLAG_RING_DISABLED);
1798
1799        /* Initialize send producer index registers (42) */
1800        for (i = 1; i <= TXP_MAX_RINGS; i++) {
1801            WRITEMBOX(sc, R_SND_BD_PI(i), 0);
1802            WRITEMBOX(sc, R_SND_BD_NIC_PI(i), 0);
1803        }
1804    }
1805    else
1806    {
1807        WRITEMEM(sc, A_SND_RCB(1) + RCB_CTRL, RCB_FLAG_RING_DISABLED);
1808        WRITEMBOX(sc, R_SND_BD_PI(1), 0);
1809        WRITEMBOX(sc, R_SND_BD_NIC_PI(1), 0);
1810    }
1811
1812
1813    /* Initialize send producer ring 1 (43) */
1814    WRITEMEM(sc, A_SND_RCB(1)+RCB_HOST_ADDR_HIGH, 0);
1815    WRITEMEM(sc, A_SND_RCB(1)+RCB_HOST_ADDR_LOW, PTR_TO_PCI(sc->txp_1));
1816    WRITEMEM(sc, A_SND_RCB(1)+RCB_CTRL, V_RCB_MAXLEN(TXP_RING_ENTRIES));
1817    /* Only program send ring address for early chips */
1818    if ( !BCM571X_FAMILY_DEVICE(sc->device) &&
1819         (sc->device != K_PCI_ID_BCM5705) )
1820    {
1821        WRITEMEM(sc, A_SND_RCB(1)+RCB_NIC_ADDR, A_SND_RINGS);
1822    }
1823
1824    /* 5714 family device removed recieve return rings 2-16 */
1825    if ( !BCM571X_FAMILY_DEVICE(sc->device) )
1826    {
1827        /* Disable unused receive return rings (44) */
1828        for (rcbp = A_RTN_RCB(1); rcbp <= A_RTN_RCB(16); rcbp += RCB_SIZE)
1829            WRITEMEM(sc, rcbp+RCB_CTRL, RCB_FLAG_RING_DISABLED);
1830    }
1831    else
1832    {
1833        WRITEMEM(sc, A_RTN_RCB(1) + RCB_CTRL, RCB_FLAG_RING_DISABLED);
1834    }
1835
1836    /* Initialize receive return ring 1 (45) */
1837    WRITEMEM(sc, A_RTN_RCB(1)+RCB_HOST_ADDR_HIGH, 0);
1838    WRITEMEM(sc, A_RTN_RCB(1)+RCB_HOST_ADDR_LOW, PTR_TO_PCI(sc->rxr_1));
1839    WRITEMEM(sc, A_RTN_RCB(1)+RCB_CTRL, V_RCB_MAXLEN(sc->rxr_entries));
1840    WRITEMEM(sc, A_RTN_RCB(1)+RCB_NIC_ADDR, 0x0000);
1841
1842    /* Initialize receive producer ring mailboxes (46) */
1843    WRITEMBOX(sc, R_RCV_BD_STD_PI, 0);
1844
1845    /* 5714 family device removed jumbo / mini rings */
1846    if ( !BCM571X_FAMILY_DEVICE(sc->device) )
1847    {
1848        WRITEMBOX(sc, R_RCV_BD_JUMBO_PI, 0);
1849        WRITEMBOX(sc, R_RCV_BD_MINI_PI, 0);
1850    }
1851
1852    return 0;
1853}
1854
1855static int
1856t3_configure_mac(t3_ether_t *sc)
1857{
1858    uint32_t low, high;
1859    uint32_t seed;
1860    int i;
1861
1862    /* Steps 47-52 */
1863
1864    /* Configure the MAC unicast address (47) */
1865    high = (sc->hwaddr[0] << 8) | (sc->hwaddr[1]);
1866    low = ((sc->hwaddr[2] << 24) | (sc->hwaddr[3] << 16)
1867	   | (sc->hwaddr[4] << 8) | sc->hwaddr[5]);
1868    /* For now, use a single MAC address */
1869    WRITECSR(sc, R_MAC_ADDR1_HIGH, high);  WRITECSR(sc, R_MAC_ADDR1_LOW, low);
1870    WRITECSR(sc, R_MAC_ADDR2_HIGH, high);  WRITECSR(sc, R_MAC_ADDR2_LOW, low);
1871    WRITECSR(sc, R_MAC_ADDR3_HIGH, high);  WRITECSR(sc, R_MAC_ADDR3_LOW, low);
1872    WRITECSR(sc, R_MAC_ADDR4_HIGH, high);  WRITECSR(sc, R_MAC_ADDR4_LOW, low);
1873
1874    /* Configure the random backoff seed (48) */
1875    seed = 0;
1876    for (i = 0; i < 6; i++)
1877      seed += sc->hwaddr[i];
1878    seed &= 0x3FF;
1879    WRITECSR(sc, R_TX_BACKOFF, seed);
1880
1881    /* Configure the MTU (49) */
1882    WRITECSR(sc, R_RX_MTU, MAX_ETHER_PACK+VLAN_TAG_LEN);
1883
1884    /* Configure the tx IPG (50) */
1885    WRITECSR(sc, R_TX_LENS,
1886	     V_TXLEN_SLOT(0x20) | V_TXLEN_IPG(0x6) | V_TXLEN_IPGCRS(0x2));
1887
1888    /* Configure the default rx return ring 1 (51) */
1889    WRITECSR(sc, R_RX_RULES_CFG, V_RULESCFG_DEFAULT(1));
1890
1891    /* Configure the receive lists and enable statistics (52) */
1892    WRITECSR(sc, R_RCV_LIST_CFG,
1893	     V_LISTCFG_GROUP(1) | V_LISTCFG_ACTIVE(1) | V_LISTCFG_BAD(1));
1894    /* was V_LISTCFG_DEFAULT(1) | V_LISTCFG_ACTIVE(16) | V_LISTCFG_BAD(1) */
1895
1896    return 0;
1897}
1898
1899static int
1900t3_enable_stats(t3_ether_t *sc)
1901{
1902    uint32_t ctrl;
1903
1904    /* Steps 53-56 */
1905
1906    /* Enable rx stats (53,54) */
1907    WRITECSR(sc, R_RCV_LIST_STATS_ENB, 0xFFFFFF);
1908    ctrl = READCSR(sc, R_RCV_LIST_STATS_CTRL);
1909    ctrl |= M_STATS_ENABLE;
1910    WRITECSR(sc, R_RCV_LIST_STATS_CTRL, ctrl);
1911
1912    /* Enable tx stats (55,56) */
1913    WRITECSR(sc, R_SND_DATA_STATS_ENB, 0xFFFFFF);
1914    ctrl = READCSR(sc, R_SND_DATA_STATS_CTRL);
1915    ctrl |= (M_STATS_ENABLE | M_STATS_FASTUPDATE);
1916    WRITECSR(sc, R_SND_DATA_STATS_CTRL, ctrl);
1917
1918    return 0;
1919}
1920
1921static int
1922t3_init_coalescing(t3_ether_t *sc)
1923{
1924    uint32_t mode = 0;
1925    int timeout;
1926
1927    /* Steps 57-68 */
1928
1929    /* Disable the host coalescing engine (57, 58) */
1930    WRITECSR(sc, R_HOST_COAL_MODE, 0);
1931    for (timeout = CFE_HZ/2; timeout > 0; timeout -= CFE_HZ/10) {
1932	mode = READCSR(sc, R_HOST_COAL_MODE);
1933	if (mode == 0)
1934	    break;
1935	cfe_sleep(CFE_HZ/10);
1936	}
1937    if (mode != 0)
1938	xprintf("bcm5700: coalescing engine not disabled\n");
1939
1940    /* Set coalescing parameters (59-62) */
1941#if T3_BRINGUP
1942    WRITECSR(sc, R_RCV_COAL_TICKS, 0);
1943    WRITECSR(sc, R_RCV_COAL_MAX_CNT, 1);
1944#else
1945    WRITECSR(sc, R_RCV_COAL_TICKS, 150);
1946    WRITECSR(sc, R_RCV_COAL_MAX_CNT, 10);
1947#endif
1948
1949    WRITECSR(sc, R_RCV_COAL_INT_TICKS, 0);
1950    WRITECSR(sc, R_RCV_COAL_INT_CNT, 0);
1951#if T3_BRINGUP
1952    WRITECSR(sc, R_SND_COAL_TICKS, 0);
1953    WRITECSR(sc, R_SND_COAL_MAX_CNT, 1);
1954#else
1955    WRITECSR(sc, R_SND_COAL_TICKS, 150);
1956    WRITECSR(sc, R_SND_COAL_MAX_CNT, 10);
1957#endif
1958
1959    WRITECSR(sc, R_SND_COAL_INT_TICKS, 0);
1960    WRITECSR(sc, R_SND_COAL_INT_CNT, 0);
1961
1962    /* Initialize host status block address (63) */
1963    WRITECSR(sc, R_STATUS_HOST_ADDR, 0);
1964    WRITECSR(sc, R_STATUS_HOST_ADDR+4, PTR_TO_PCI(sc->status));
1965
1966    /* Initialize host statistics block address (64) */
1967    WRITECSR(sc, R_STATS_HOST_ADDR, 0);
1968    WRITECSR(sc, R_STATS_HOST_ADDR+4, PTR_TO_PCI(sc->stats));
1969
1970    /* Set statistics block NIC address and tick count (65, 66) */
1971    WRITECSR(sc, R_STATS_TICKS, 1000000);
1972    WRITECSR(sc, R_STATS_BASE_ADDR, A_MAC_STATS);
1973
1974    /* Set status block NIC address (67) */
1975    WRITECSR(sc, R_STATUS_BASE_ADDR, A_MAC_STATUS);
1976
1977    /* Select the status block transfer size. */
1978    if (sc->device == K_PCI_ID_BCM5700)
1979	mode = 0;          /* Truncated transfers not supported */
1980    else
1981	mode = V_HCM_SBSIZE(STATUS_BLOCK_SIZE(MAX_RI));
1982
1983    /* Enable the host coalescing engine (68) */
1984    mode |= M_HCM_ENABLE;
1985    WRITECSR(sc, R_HOST_COAL_MODE, mode);
1986
1987    return 0;
1988}
1989
1990static int
1991t3_init_dma(t3_ether_t *sc)
1992{
1993    uint32_t mode;
1994
1995    /* Steps 69-87 */
1996
1997    /* Enable receive BD completion, placement, and selector blocks (69-71) */
1998    WRITECSR(sc, R_RCV_BD_COMP_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
1999    WRITECSR(sc, R_RCV_LIST_MODE, M_MODE_ENABLE);
2000
2001    /* Turn on RX list selector state machine. */
2002    if ( (sc->device != K_PCI_ID_BCM5705)
2003         && !BCM571X_FAMILY_DEVICE(sc->device) ) {
2004
2005	    WRITECSR(sc, R_RCV_LIST_SEL_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2006	}
2007
2008    /* Enable DMA engines, enable and clear statistics (72, 73) */
2009    mode = READCSR(sc, R_MAC_MODE);
2010    mode |= (M_MACM_FHDEENB | M_MACM_RDEENB | M_MACM_TDEENB |
2011	     M_MACM_RXSTATSENB | M_MACM_RXSTATSCLR |
2012	     M_MACM_TXSTATSENB | M_MACM_TXSTATSCLR);
2013
2014#if T3_AUTOPOLL
2015    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII);
2016#endif
2017
2018    WRITECSR(sc, R_MAC_MODE, mode);
2019
2020#if T3_AUTOPOLL
2021    WRITECSR(sc, R_MISC_LOCAL_CTRL, M_MLCTL_INTATTN);
2022#endif
2023
2024    /* Configure GPIOs (74) - skipped */
2025
2026    /* Clear interrupt mailbox (75) */
2027    WRITEMBOX(sc, R_INT_MBOX(0), 0);
2028
2029    /* Enable DMA completion block (76) */
2030    if ( (sc->device != K_PCI_ID_BCM5705)
2031        && !BCM571X_FAMILY_DEVICE(sc->device) )
2032    {
2033        WRITECSR(sc, R_DMA_COMP_MODE, M_MODE_ENABLE);
2034    }
2035
2036    /* Configure write and read DMA modes (77, 78) */
2037    WRITECSR(sc, R_WR_DMA_MODE, M_MODE_ENABLE | M_ATTN_ALL);
2038    WRITECSR(sc, R_RD_DMA_MODE, M_MODE_ENABLE | M_ATTN_ALL);
2039
2040#if 0
2041    mode = M_MODE_ENABLE | M_ATTN_ALL;
2042    if ( sc->device == K_PCI_ID_BCM5705)
2043    {
2044      	mode |= RD_DMA_MODE_FIFO_SIZE_128;
2045    }
2046    else if (  BCM571X_FAMILY_DEVICE(sc->device) )
2047    {
2048        /*
2049         * XXX: magic values.
2050         * From Broadcom-supplied Linux driver;  apparently
2051         * required to workaround a DMA bug affecting TSO
2052         * on bcm575x/bcm5721?
2053         */
2054        mode |= (1 << 27);
2055    }
2056
2057    WRITECSR(sc, R_RD_DMA_MODE, mode );
2058#endif
2059
2060    return 0;
2061}
2062
2063static int
2064t3_init_enable(t3_ether_t *sc)
2065{
2066    uint32_t mhc;
2067    uint32_t pmcs;
2068#if T3_AUTOPOLL
2069    uint32_t mode, mask;
2070#else
2071    int  i;
2072#endif
2073
2074    /* Steps 79-97 */
2075
2076    /* Enable completion functional blocks (79-82) */
2077    WRITECSR(sc, R_RCV_COMP_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2078    if ( (sc->device != K_PCI_ID_BCM5705) &&
2079         !BCM571X_FAMILY_DEVICE(sc->device) ) {
2080
2081        WRITECSR(sc, R_MBUF_FREE_MODE, M_MODE_ENABLE);
2082
2083	}
2084
2085    if ( BCM571X_FAMILY_DEVICE(sc->device) )
2086    {
2087        WRITECSR(sc, R_SND_DATA_COMP_MODE, M_MODE_ENABLE | 0x8);
2088    }
2089    else
2090    {
2091        WRITECSR(sc, R_SND_DATA_COMP_MODE, M_MODE_ENABLE);
2092    }
2093
2094    WRITECSR(sc, R_SND_BD_COMP_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2095
2096    /* Enable initiator functional blocks (83-86) */
2097    WRITECSR(sc, R_RCV_BD_INIT_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2098    WRITECSR(sc, R_RCV_DATA_INIT_MODE, M_MODE_ENABLE | M_RCVINITMODE_RTNSIZE);
2099    WRITECSR(sc, R_SND_DATA_MODE, M_MODE_ENABLE);
2100    WRITECSR(sc, R_SND_BD_INIT_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2101
2102    /* Enable the send BD selector (87) */
2103    WRITECSR(sc, R_SND_BD_SEL_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2104
2105    /* Download firmware (88) - skipped */
2106
2107    /* Enable the MAC (89,90) */
2108    WRITECSR(sc, R_TX_MODE, M_MODE_ENABLE);   /* optional flow control */
2109    WRITECSR(sc, R_RX_MODE, M_MODE_ENABLE);   /* other options */
2110
2111    /* Disable auto-polling (91) */
2112    mii_access_init(sc);
2113
2114    /* Configure power state (92) */
2115    pmcs = READCSR(sc, PCI_PMCSR_REG);
2116    pmcs &= ~PCI_PMCSR_STATE_MASK;
2117    pmcs |= PCI_PMCSR_STATE_D0;
2118    WRITECSR(sc, PCI_PMCSR_REG, pmcs);
2119
2120#if T3_AUTOPOLL
2121    /* Program hardware LED control (93) */
2122    WRITECSR(sc, R_MAC_LED_CTRL, 0x00);   /* LEDs at PHY layer */
2123#endif
2124
2125#if T3_AUTOPOLL
2126    /* Ack/clear link change events */
2127    WRITECSR(sc, R_MAC_STATUS, M_LINKCHNG_CLR);
2128    WRITECSR(sc, R_MI_STATUS, 0);
2129
2130    /* Enable autopolling */
2131    mode = READCSR(sc, R_MI_MODE);
2132    mode |= M_MIMODE_POLLING | 0x000c000;
2133    WRITECSR(sc, R_MI_MODE, mode);
2134
2135    /* Enable link state attentions */
2136    mask = READCSR(sc, R_MAC_EVENT_ENB);
2137    mask |= M_EVT_LINKCHNG;
2138    WRITECSR(sc, R_MAC_EVENT_ENB, mask);
2139#else
2140    /* Initialize link (94) */
2141    WRITECSR(sc, R_MI_STATUS, M_MISTAT_LINKED);
2142
2143    /* Start autonegotiation (95) - see t3_initlink below */
2144
2145    /* Setup multicast filters (96) */
2146    for (i = 0; i < 4; i++)
2147	WRITECSR(sc, R_MAC_HASH(i), 0);
2148#endif /* T3_AUTOPOLL */
2149
2150    /* Enable interrupts (97) */
2151    mhc = READCSR(sc, R_MISC_HOST_CTRL);
2152    mhc &= ~M_MHC_MASKPCIINT;
2153    WRITECSR(sc, R_MISC_HOST_CTRL, mhc);
2154
2155    return 0;
2156}
2157
2158
2159static void
2160t3_initlink(t3_ether_t *sc)
2161{
2162    uint32_t mcr;
2163
2164    if (mii_probe(sc) != 0) {
2165	xprintf("%s: no PHY found\n", t3_devname(sc));
2166	return;
2167	}
2168#if T3_DEBUG
2169    xprintf("%s: PHY addr %d\n", t3_devname(sc), sc->phy_addr);
2170#endif
2171    if (1)   /* XXX Support only autonegotiation for now */
2172	mii_autonegotiate(sc);
2173    else
2174	mii_set_speed(sc, ETHER_SPEED_100HDX);
2175
2176    mii_enable_interrupts(sc);
2177
2178    mcr = READCSR(sc, R_MODE_CTRL);
2179    mcr |= M_MCTL_MACINT;
2180    WRITECSR(sc, R_MODE_CTRL, mcr);
2181
2182    sc->mii_polling = 0;
2183    sc->phy_change = 0;
2184}
2185
2186static void
2187t3_shutdownlink(t3_ether_t *sc)
2188{
2189    uint32_t mcr;
2190
2191    mcr = READCSR(sc, R_MODE_CTRL);
2192    mcr &= ~M_MCTL_MACINT;
2193    WRITECSR(sc, R_MODE_CTRL, mcr);
2194
2195    WRITECSR(sc, R_MAC_EVENT_ENB, 0);
2196
2197    /* The manual is fuzzy about what to do with the PHY at this
2198       point.  Empirically, resetting the 5705 PHY (but not others)
2199       will cause it to get stuck in 10/100 MII mode.  */
2200    if (sc->device != K_PCI_ID_BCM5705)
2201	mii_write_register(sc, sc->phy_addr, MII_BMCR, BMCR_RESET);
2202
2203    sc->mii_polling = 0;
2204    sc->phy_change = 0;
2205}
2206
2207
2208static void
2209t3_hwinit(t3_ether_t *sc)
2210{
2211    if (sc->state != eth_state_on) {
2212
2213	if (sc->state == eth_state_uninit) {
2214	    WRITECSR(sc, R_MEMWIN_BASE_ADDR, 0);   /* Default memory window */
2215	    t3_coldreset(sc);
2216	    }
2217	else
2218	    t3_warmreset(sc);
2219
2220	t3_init_registers(sc);
2221	t3_init_pools(sc);
2222	t3_init_rings(sc);
2223	t3_configure_mac(sc);
2224	t3_enable_stats(sc);
2225	t3_init_coalescing(sc);
2226	t3_init_dma(sc);
2227	t3_init_enable(sc);
2228
2229    if ( 1 )
2230    {
2231
2232
2233#if T3_DEBUG
2234	dumpcsrs(sc, "end init");
2235#else
2236	(void)dumpcsrs;
2237#endif
2238
2239	eeprom_access_init(sc);
2240#if T3_DEBUG
2241	{
2242	    uint32_t eeprom[0x100/4];
2243	    int i;
2244
2245	    cfe_sleep(1);
2246	    /* XXX Apparently a few reads can be required to get the
2247               AutoAccess logic into a good state. ??? */
2248	    for (i = 0; i < 4; i++) {
2249		eeprom_read_range(sc, 0, 4, eeprom);
2250		}
2251
2252	    eeprom_read_range(sc, 0, sizeof(eeprom), eeprom);
2253	    eeprom_dump_range("Boot Strap", eeprom, 0x00, 20);
2254	    eeprom_dump_range("Manufacturing Info", eeprom, 0x74, 140);
2255	}
2256#else
2257	(void)eeprom_read_range;
2258	(void)eeprom_dump_range;
2259#endif
2260
2261
2262	t3_initlink(sc);
2263
2264    }
2265
2266	sc->state = eth_state_off;
2267	}
2268}
2269
2270static void
2271t3_hwshutdown(t3_ether_t *sc)
2272{
2273    /* Receive path shutdown */
2274    t3_clear(sc, R_RX_MODE, M_MODE_ENABLE);
2275    t3_clear(sc, R_RCV_BD_INIT_MODE, M_MODE_ENABLE);
2276    t3_clear(sc, R_RCV_LIST_MODE, M_MODE_ENABLE);
2277    if ( (sc->device != K_PCI_ID_BCM5705) &&
2278         !BCM571X_FAMILY_DEVICE(sc->device) )
2279    {
2280	t3_clear(sc, R_RCV_LIST_SEL_MODE, M_MODE_ENABLE);
2281	}
2282    t3_clear(sc, R_RCV_DATA_INIT_MODE, M_MODE_ENABLE);
2283#ifndef BCM47XX  /* XXX bus error on 5705 */
2284    t3_clear(sc, R_RCV_COMP_MODE, M_MODE_ENABLE);
2285#endif
2286    t3_clear(sc, R_RCV_BD_COMP_MODE, M_MODE_ENABLE);
2287
2288    /* Transmit path shutdown */
2289    t3_clear(sc, R_SND_BD_SEL_MODE, M_MODE_ENABLE);
2290    t3_clear(sc, R_SND_BD_INIT_MODE, M_MODE_ENABLE);
2291    t3_clear(sc, R_SND_DATA_MODE, M_MODE_ENABLE);
2292    t3_clear(sc, R_RD_DMA_MODE, M_MODE_ENABLE);
2293#ifndef BCM47XX  /* XXX bus error on 5703 */
2294    t3_clear(sc, R_SND_DATA_COMP_MODE, M_MODE_ENABLE);
2295#endif
2296    if ( (sc->device != K_PCI_ID_BCM5705)
2297        && !BCM571X_FAMILY_DEVICE(sc->device))
2298    {
2299#ifndef BCM47XX  /* XXX bus error on 5703 */
2300        t3_clear(sc, R_DMA_COMP_MODE, M_MODE_ENABLE);
2301#endif
2302	}
2303
2304    t3_clear(sc, R_SND_BD_COMP_MODE, M_MODE_ENABLE);
2305    t3_clear(sc, R_TX_MODE, M_MODE_ENABLE);
2306
2307    /* Memory shutdown */
2308    t3_clear(sc, R_HOST_COAL_MODE, M_HCM_ENABLE);
2309    t3_clear(sc, R_WR_DMA_MODE, M_MODE_ENABLE);
2310    if (sc->device != K_PCI_ID_BCM5705) {
2311	t3_clear(sc, R_MBUF_FREE_MODE, M_MODE_ENABLE);
2312	}
2313    WRITECSR(sc, R_FTQ_RESET, 0xFFFFFFFF);
2314    cfe_sleep(1);
2315    WRITECSR(sc, R_FTQ_RESET, 0x00000000);
2316    t3_clear(sc, R_BMGR_MODE, M_BMODE_ENABLE);
2317    t3_clear(sc, R_MEM_MODE, M_MAM_ENABLE);
2318
2319    t3_shutdownlink(sc);
2320
2321    WRITECSR(sc, R_MEMWIN_BASE_ADDR, 0);   /* Default memory window */
2322    t3_coldreset(sc);
2323
2324    sc->state = eth_state_uninit;
2325}
2326
2327
2328static void
2329t3_isr(void *arg)
2330{
2331    t3_ether_t *sc = (t3_ether_t *)arg;
2332    volatile t3_status_t *status = sc->status;
2333    uint32_t mac_status;
2334    int handled;
2335
2336    do {
2337	WRITEMBOX(sc, R_INT_MBOX(0), 1);
2338
2339	handled = 0;
2340	mac_status = READCSR(sc, R_MAC_STATUS);  /* force ordering */
2341	status->status &= ~M_STATUS_UPDATED;
2342
2343	if (status->index[RI(1)].return_p != sc->rxr_1_index) {
2344	    handled = 1;
2345	    if (IPOLL) sc->rx_interrupts++;
2346	    t3_procrxring(sc);
2347	    }
2348
2349	if (status->index[RI(1)].send_c != sc->txc_1_index) {
2350	    handled = 1;
2351    if (IPOLL) sc->tx_interrupts++;
2352	    t3_proctxring(sc);
2353	    }
2354
2355	if ((mac_status & M_EVT_LINKCHNG) != 0) {
2356	    handled = 1;
2357#if T3_AUTOPOLL
2358	    WRITECSR(sc, R_MAC_STATUS, M_LINKCHNG_CLR);
2359#endif
2360	    WRITECSR(sc, R_MAC_STATUS, M_EVT_MICOMPLETE);
2361
2362	    status->status &= ~M_STATUS_LINKCHNG;
2363	    sc->phy_change = 1;
2364	    }
2365
2366	WRITEMBOX(sc, R_INT_MBOX(0), 0);
2367	(void)READMBOX(sc, R_INT_MBOX(0));  /* push */
2368
2369#if (!XPOLL)
2370	if (!handled)
2371	    sc->bogus_interrupts++;
2372#endif
2373
2374	} while ((status->status & M_STATUS_UPDATED) != 0);
2375
2376    if (sc->rxp_std_index != sc->prev_rxp_std_index) {
2377	sc->prev_rxp_std_index = sc->rxp_std_index;
2378	WRITEMBOX(sc, R_RCV_BD_STD_PI, sc->rxp_std_index);
2379	}
2380}
2381
2382
2383static void
2384t3_setaddr(t3_ether_t *sc, uint8_t *addr)
2385{
2386    uint32_t rx_mode, tx_mode;
2387    uint32_t low, high;
2388
2389    /* MAC must be disabled */
2390    rx_mode = READCSR(sc, R_RX_MODE);
2391    tx_mode = READCSR(sc, R_TX_MODE);
2392    t3_clear(sc, R_RX_MODE, M_MODE_ENABLE);
2393    t3_clear(sc, R_TX_MODE, M_MODE_ENABLE);
2394
2395    high = (addr[0] << 8) | addr[1];
2396    low = ((addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]);
2397    WRITECSR(sc, R_MAC_ADDR1_HIGH, high);  WRITECSR(sc, R_MAC_ADDR1_LOW, low);
2398    WRITECSR(sc, R_MAC_ADDR2_HIGH, high);  WRITECSR(sc, R_MAC_ADDR2_LOW, low);
2399    WRITECSR(sc, R_MAC_ADDR3_HIGH, high);  WRITECSR(sc, R_MAC_ADDR3_LOW, low);
2400    WRITECSR(sc, R_MAC_ADDR4_HIGH, high);  WRITECSR(sc, R_MAC_ADDR4_LOW, low);
2401
2402    WRITECSR(sc, R_TX_MODE, tx_mode);
2403    WRITECSR(sc, R_RX_MODE, rx_mode);
2404}
2405
2406
2407static void
2408t3_clear_stats(t3_ether_t *sc)
2409{
2410#ifndef BCM47XX
2411    WRITEMBOX(sc, R_RELOAD_STATS_MBOX + 4, 0);
2412    WRITEMBOX(sc, R_RELOAD_STATS_MBOX, PTR_TO_PCI(zero_stats));
2413#endif
2414}
2415
2416
2417static void
2418t3_start(t3_ether_t *sc)
2419{
2420    t3_hwinit(sc);
2421
2422    sc->intmask = 0;
2423
2424#if IPOLL
2425    cfe_request_irq(sc->irq, t3_isr, sc, CFE_IRQ_FLAGS_SHARED, 0);
2426
2427#if T3_AUTOPOLL
2428    sc->intmask |= M_EVT_LINKCHNG;
2429#else
2430    sc->intmask |= M_EVT_LINKCHNG | M_EVT_MIINT;
2431#endif
2432    WRITECSR(sc, R_MAC_EVENT_ENB, sc->intmask);
2433#endif
2434
2435    /* Post some Rcv Producer buffers */
2436    sc->prev_rxp_std_index = sc->rxp_std_index;
2437    WRITEMBOX(sc, R_RCV_BD_STD_PI, sc->rxp_std_index);
2438
2439    sc->state = eth_state_on;
2440}
2441
2442static void
2443t3_stop(t3_ether_t *sc)
2444{
2445    WRITECSR(sc, R_MAC_EVENT_ENB, 0);
2446    sc->intmask = 0;
2447#if IPOLL
2448    cfe_free_irq(sc->irq, 0);
2449#endif
2450
2451    if (sc->state == eth_state_on) {
2452	sc->state = eth_state_off;
2453	t3_hwshutdown(sc);
2454	t3_reinit(sc);
2455	}
2456}
2457
2458
2459static int t3_ether_open(cfe_devctx_t *ctx);
2460static int t3_ether_read(cfe_devctx_t *ctx,iocb_buffer_t *buffer);
2461static int t3_ether_inpstat(cfe_devctx_t *ctx,iocb_inpstat_t *inpstat);
2462static int t3_ether_write(cfe_devctx_t *ctx,iocb_buffer_t *buffer);
2463static int t3_ether_ioctl(cfe_devctx_t *ctx,iocb_buffer_t *buffer);
2464static int t3_ether_close(cfe_devctx_t *ctx);
2465static void t3_ether_poll(cfe_devctx_t *ctx, int64_t ticks);
2466static void t3_ether_reset(void *softc);
2467
2468const static cfe_devdisp_t t3_ether_dispatch = {
2469    t3_ether_open,
2470    t3_ether_read,
2471    t3_ether_inpstat,
2472    t3_ether_write,
2473    t3_ether_ioctl,
2474    t3_ether_close,
2475    t3_ether_poll,
2476    t3_ether_reset
2477};
2478
2479cfe_driver_t bcm5700drv = {
2480    "BCM570x Ethernet",
2481    "eth",
2482    CFE_DEV_NETWORK,
2483    &t3_ether_dispatch,
2484    t3_ether_probe
2485};
2486
2487
2488static void
2489t3_delete_sc(t3_ether_t *sc)
2490{
2491    xprintf("BCM570x attach: No memory to complete probe\n");
2492    if (sc != NULL) {
2493	if (sc->txp_1 != NULL)
2494	    KFREE(sc->txp_1);
2495	if (sc->rxr_1 != NULL)
2496	    KFREE(sc->rxr_1);
2497	if (sc->rxp_std != NULL)
2498	    KFREE(sc->rxp_std);
2499	if (sc->stats != NULL)
2500	    KFREE((t3_stats_t *)sc->stats);
2501	if (sc->status != NULL)
2502	    KFREE((t3_ether_t *)CACHE_DMA_CACHEABLE(sc->status));
2503	KFREE(sc);
2504	}
2505}
2506
2507static const uint8_t null_addr[ENET_ADDR_LEN]  = {0x00,0x00,0x00,0x00,0x00};
2508static const uint8_t bcast_addr[ENET_ADDR_LEN] = {0xFF,0xFF,0xFF,0xFF,0xFF};
2509
2510static int
2511t3_ether_attach(cfe_driver_t *drv, pcitag_t tag, uint8_t hwaddr[])
2512{
2513    t3_ether_t *sc;
2514    char descr[80];
2515    phys_addr_t pa;
2516    uint32_t base;
2517    uint32_t pcictrl;
2518    uint32_t addr;
2519    pcireg_t device, class;
2520    const char *devname;
2521    int i;
2522
2523    if (zero_stats == NULL)
2524	zero_stats = (t3_stats_t *) dma_alloc(sizeof(t3_stats_t), CACHE_ALIGN);
2525    if (zero_stats == NULL)
2526	return 0;
2527    memset(zero_stats, 0, sizeof(t3_stats_t));
2528    CACHE_DMA_SYNC(zero_stats, sizeof(t3_stats_t));
2529
2530    pci_map_mem(tag, PCI_MAPREG(0), CSR_MATCH_MODE, &pa);
2531    base = (uint32_t)pa;
2532
2533    sc = (t3_ether_t *) KMALLOC(sizeof(t3_ether_t), 0);
2534    if (sc == NULL) {
2535	t3_delete_sc(sc);
2536	return 0;
2537	}
2538
2539    memset(sc, 0, sizeof(*sc));
2540
2541    sc->status = NULL;
2542    sc->stats = NULL;
2543
2544    device = pci_conf_read(tag, PCI_ID_REG);
2545    class = pci_conf_read(tag, PCI_CLASS_REG);
2546    sc->tag = tag;
2547    sc->device = PCI_PRODUCT(device);
2548    sc->revision = PCI_REVISION(class);
2549
2550    /* (Some?) 5700s report the 5701 device code */
2551    sc->asic_revision = G_MHC_ASICREV(pci_conf_read(tag, R_MISC_HOST_CTRL));
2552    if (( sc->device == K_PCI_ID_BCM5701)
2553        && (sc->asic_revision & 0xF000) == 0x7000 )
2554	sc->device = K_PCI_ID_BCM5700;
2555
2556    sc->status =
2557	(t3_status_t *) CACHE_DMA_SHARED(dma_alloc(sizeof(t3_status_t), CACHE_ALIGN));
2558    if (sc->status == NULL) {
2559	t3_delete_sc(sc);
2560	return 0;
2561	}
2562
2563    sc->stats = (t3_stats_t *) dma_alloc(sizeof(t3_stats_t), CACHE_ALIGN);
2564    if (sc->stats == NULL) {
2565	t3_delete_sc(sc);
2566	return 0;
2567	}
2568
2569    if (sc->device == K_PCI_ID_BCM5705)
2570    {
2571        sc->rxr_entries = RXR_RING_ENTRIES_05;
2572    }
2573    else if ( BCM571X_FAMILY_DEVICE(sc->device) )
2574    {
2575        sc->rxr_entries = RXR_RING_ENTRIES_BCM571X_FAMILY;
2576    }
2577    else
2578    {
2579        sc->rxr_entries = RXR_RING_ENTRIES;
2580    }
2581
2582    sc->rxp_std =
2583        (t3_rcv_bd_t *) dma_alloc(RXP_STD_ENTRIES*RCV_BD_SIZE, CACHE_ALIGN);
2584    sc->rxr_1 =
2585        (t3_rcv_bd_t *) dma_alloc(sc->rxr_entries*RCV_BD_SIZE, CACHE_ALIGN);
2586    sc->txp_1 =
2587        (t3_snd_bd_t *) dma_alloc(TXP_RING_ENTRIES*SND_BD_SIZE, CACHE_ALIGN);
2588    if (sc->rxp_std == NULL || sc->rxr_1 == NULL || sc->txp_1 == NULL) {
2589	t3_delete_sc(sc);
2590	return 0;
2591	}
2592
2593    sc->regbase = base;
2594
2595    /* NB: the relative base of memory depends on the access model */
2596    pcictrl = pci_conf_read(tag, R_PCI_STATE);
2597    sc->membase = base + 0x8000;       /* Normal mode: 32K window */
2598
2599    sc->irq = pci_conf_read(tag, PCI_BPARAM_INTERRUPT_REG) & 0xFF;
2600
2601    sc->devctx = NULL;
2602
2603    /* Assume on-chip firmware has initialized the MAC address. */
2604    addr = READCSR(sc, R_MAC_ADDR1_HIGH);
2605    for (i = 0; i < 2; i++)
2606	sc->hwaddr[i] = (addr >> (8*(1-i))) & 0xff;
2607    addr = READCSR(sc, R_MAC_ADDR1_LOW);
2608    for (i = 0; i < 4; i++)
2609	sc->hwaddr[2+i] = (addr >> (8*(3-i))) & 0xff;
2610    if (memcmp(sc->hwaddr, null_addr, ENET_ADDR_LEN) == 0
2611	|| memcmp(sc->hwaddr, bcast_addr, ENET_ADDR_LEN) == 0)
2612	memcpy(sc->hwaddr, hwaddr, ENET_ADDR_LEN);
2613
2614    t3_init(sc);
2615
2616    sc->state = eth_state_uninit;
2617
2618    switch (sc->device) {
2619    case K_PCI_ID_BCM5700:
2620	devname = "BCM5700"; break;
2621    case K_PCI_ID_BCM5701:
2622	devname = "BCM5701"; break;
2623    case K_PCI_ID_BCM5702:
2624	devname = "BCM5702"; break;
2625    case K_PCI_ID_BCM5703:
2626    case K_PCI_ID_BCM5703a:
2627    case K_PCI_ID_BCM5703b:
2628	devname = "BCM5703"; break;
2629    case K_PCI_ID_BCM5704C:
2630	devname = "BCM5704C"; break;
2631    case K_PCI_ID_BCM5705:
2632	devname = "BCM5705"; break;
2633    case K_PCI_ID_BCM5780:
2634	devname = "BCM5780"; break;
2635    default:
2636	devname = "BCM570x"; break;
2637	}
2638    xsprintf(descr, "%s Ethernet at 0x%X (%a)",
2639	     devname, sc->regbase, sc->hwaddr);
2640
2641    cfe_attach(drv, sc, NULL, descr);
2642    return 1;
2643}
2644
2645static void
2646t3_ether_probe(cfe_driver_t *drv,
2647	       unsigned long probe_a, unsigned long probe_b,
2648	       void *probe_ptr)
2649{
2650    int index;
2651    uint8_t hwaddr[ENET_ADDR_LEN];
2652
2653    if (probe_ptr)
2654	enet_parse_hwaddr((char *)probe_ptr, hwaddr);
2655    else {
2656	/* Use default address 02-10-18-11-22-33 */
2657	hwaddr[0] = 0x02;  hwaddr[1] = 0x10;  hwaddr[2] = 0x18;
2658	hwaddr[3] = 0x11;  hwaddr[4] = 0x22;  hwaddr[5] = 0x33;
2659	}
2660
2661    index = 0;
2662    for (;;) {
2663	pcitag_t tag;
2664	pcireg_t device;
2665
2666	if (pci_find_class(PCI_CLASS_NETWORK, index, &tag) != 0)
2667	   break;
2668
2669	index++;
2670
2671	device = pci_conf_read(tag, PCI_ID_REG);
2672	if (PCI_VENDOR(device) == K_PCI_VENDOR_BROADCOM) {
2673	    switch (PCI_PRODUCT(device)) {
2674		case K_PCI_ID_BCM5700:
2675		case K_PCI_ID_BCM5701:
2676		case K_PCI_ID_BCM5702:
2677		case K_PCI_ID_BCM5703:
2678		case K_PCI_ID_BCM5703a:
2679		case K_PCI_ID_BCM5703b:
2680		case K_PCI_ID_BCM5704C:
2681		case K_PCI_ID_BCM5705:
2682        case K_PCI_ID_BCM5780:
2683		    t3_ether_attach(drv, tag, hwaddr);
2684		    enet_incr_hwaddr(hwaddr, 1);
2685		    break;
2686		default:
2687		    break;
2688		}
2689	    }
2690	}
2691}
2692
2693
2694/* The functions below are called via the dispatch vector for the Tigon 3 */
2695
2696static int
2697t3_ether_open(cfe_devctx_t *ctx)
2698{
2699    t3_ether_t *sc = ctx->dev_softc;
2700    volatile t3_stats_t *stats = sc->stats;
2701    int i;
2702
2703    if (sc->state == eth_state_on)
2704	t3_stop(sc);
2705
2706    sc->devctx = ctx;
2707
2708    for (i = 0; i < L_MAC_STATS/sizeof(uint64_t); i++)
2709	stats->stats[i] = 0;
2710    CACHE_DMA_SYNC(stats, sizeof(t3_stats_t));
2711
2712    t3_start(sc);
2713
2714    sc->rx_interrupts = sc->tx_interrupts = sc->bogus_interrupts = 0;
2715    t3_clear_stats(sc);
2716
2717    if (XPOLL) t3_isr(sc);
2718    return 0;
2719}
2720
2721static int
2722t3_ether_read(cfe_devctx_t *ctx, iocb_buffer_t *buffer)
2723{
2724    t3_ether_t *sc = ctx->dev_softc;
2725    eth_pkt_t *pkt;
2726    int blen;
2727
2728    if (XPOLL) t3_isr(sc);
2729
2730    if (sc->state != eth_state_on) return -1;
2731
2732    CS_ENTER(sc);
2733    pkt = (eth_pkt_t *) q_deqnext(&(sc->rxqueue));
2734    CS_EXIT(sc);
2735
2736    if (pkt == NULL) {
2737	buffer->buf_retlen = 0;
2738	return 0;
2739	}
2740
2741    blen = buffer->buf_length;
2742    if (blen > pkt->length) blen = pkt->length;
2743
2744    CACHE_DMA_INVAL(pkt->buffer, blen);
2745    hs_memcpy_to_hs(buffer->buf_ptr, pkt->buffer, blen);
2746    buffer->buf_retlen = blen;
2747
2748    eth_free_pkt(sc, pkt);
2749
2750    if (XPOLL) t3_isr(sc);
2751    return 0;
2752}
2753
2754static int
2755t3_ether_inpstat(cfe_devctx_t *ctx, iocb_inpstat_t *inpstat)
2756{
2757    t3_ether_t *sc = ctx->dev_softc;
2758
2759    if (XPOLL) t3_isr(sc);
2760
2761    if (sc->state != eth_state_on) return -1;
2762
2763    /* We avoid an interlock here because the result is a hint and an
2764       interrupt cannot turn a non-empty queue into an empty one. */
2765    inpstat->inp_status = (q_isempty(&(sc->rxqueue))) ? 0 : 1;
2766
2767    return 0;
2768}
2769
2770static int
2771t3_ether_write(cfe_devctx_t *ctx, iocb_buffer_t *buffer)
2772{
2773    t3_ether_t *sc = ctx->dev_softc;
2774    eth_pkt_t *pkt;
2775    int blen;
2776
2777    if (XPOLL) t3_isr(sc);
2778
2779    if (sc->state != eth_state_on) return -1;
2780
2781    pkt = eth_alloc_pkt(sc);
2782    if (!pkt) return CFE_ERR_NOMEM;
2783
2784    blen = buffer->buf_length;
2785    if (blen > pkt->length) blen = pkt->length;
2786
2787    hs_memcpy_from_hs(pkt->buffer, buffer->buf_ptr, blen);
2788    pkt->length = blen;
2789    CACHE_DMA_SYNC(pkt->buffer, blen);
2790
2791    if (t3_transmit(sc, pkt) != 0) {
2792	eth_free_pkt(sc,pkt);
2793	return CFE_ERR_IOERR;
2794	}
2795
2796    if (XPOLL) t3_isr(sc);
2797    return 0;
2798}
2799
2800static int
2801t3_ether_ioctl(cfe_devctx_t *ctx, iocb_buffer_t *buffer)
2802{
2803    t3_ether_t *sc = ctx->dev_softc;
2804    int speed;
2805
2806    switch ((int)buffer->buf_ioctlcmd) {
2807	case IOCTL_ETHER_GETHWADDR:
2808	    hs_memcpy_to_hs(buffer->buf_ptr, sc->hwaddr, sizeof(sc->hwaddr));
2809	    return 0;
2810
2811	case IOCTL_ETHER_SETHWADDR:
2812	    hs_memcpy_from_hs(sc->hwaddr, buffer->buf_ptr, sizeof(sc->hwaddr));
2813	    t3_setaddr(sc, sc->hwaddr);
2814	    return -1;
2815
2816	case IOCTL_ETHER_GETSPEED:
2817	    speed = sc->linkspeed;
2818	    hs_memcpy_to_hs(buffer->buf_ptr,&speed,sizeof(int));
2819	    return 0;
2820
2821	default:
2822	    return -1;
2823	}
2824}
2825
2826static int
2827t3_ether_close(cfe_devctx_t *ctx)
2828{
2829    t3_ether_t *sc = ctx->dev_softc;
2830    volatile t3_stats_t *stats = sc->stats;
2831    uint32_t inpkts, outpkts, interrupts;
2832    int i;
2833
2834    t3_stop(sc);
2835
2836    CACHE_DMA_INVAL(stats, sizeof(t3_stats_t));
2837#if T3_BRINGUP
2838    for (i = 0; i < L_MAC_STATS/sizeof(uint64_t); i++) {
2839	uint64_t count = ctoh64(stats->stats[i]);
2840
2841	if (count != 0)
2842	    xprintf(" stats[%d] = %8lld\n", i, count);
2843	}
2844#else
2845    (void) i;
2846#endif
2847
2848    inpkts = ctoh64(stats->stats[ifHCInUcastPkts])
2849	      + ctoh64(stats->stats[ifHCInMulticastPkts])
2850	      + ctoh64(stats->stats[ifHCInBroadcastPkts]);
2851    outpkts = ctoh64(stats->stats[ifHCOutUcastPkts])
2852	      + ctoh64(stats->stats[ifHCOutMulticastPkts])
2853	      + ctoh64(stats->stats[ifHCOutBroadcastPkts]);
2854    interrupts = ctoh64(stats->stats[nicInterrupts]);
2855
2856    /* Empirically, counters on the 5705 are always zero.  */
2857    if (sc->device != K_PCI_ID_BCM5705) {
2858	xprintf("%s: %d sent, %d received, %d interrupts\n",
2859		t3_devname(sc), outpkts, inpkts, interrupts);
2860	if (IPOLL) {
2861	    xprintf("  %d rx interrupts, %d tx interrupts",
2862		    sc->rx_interrupts, sc->tx_interrupts);
2863	    if (sc->bogus_interrupts != 0)
2864	        xprintf(", %d bogus interrupts", sc->bogus_interrupts);
2865	    xprintf("\n");
2866	    }
2867	}
2868
2869    sc->devctx = NULL;
2870    return 0;
2871}
2872
2873static void
2874t3_ether_poll(cfe_devctx_t *ctx, int64_t ticks)
2875{
2876    t3_ether_t *sc = ctx->dev_softc;
2877    int changed;
2878
2879    if (sc->phy_change && sc->state != eth_state_uninit && !sc->mii_polling) {
2880	uint32_t mask;
2881
2882	sc->mii_polling++;
2883	mask = READCSR(sc, R_MAC_EVENT_ENB);
2884	WRITECSR(sc, R_MAC_EVENT_ENB, 0);
2885
2886	changed = mii_poll(sc);
2887	if (changed) {
2888	    mii_autonegotiate(sc);
2889	    }
2890	sc->phy_change = 0;
2891	sc->mii_polling--;
2892
2893	WRITECSR(sc, R_MAC_EVENT_ENB, mask);
2894	}
2895}
2896
2897static void
2898t3_ether_reset(void *softc)
2899{
2900    t3_ether_t *sc = (t3_ether_t *)softc;
2901
2902    /* Turn off the Ethernet interface. */
2903
2904    if (sc->state == eth_state_on)
2905	t3_stop(sc);
2906
2907    sc->state = eth_state_uninit;
2908}
2909
2910
2911uint32_t l_phys_read32(uint32_t addr )
2912{
2913    //printf("rd:%08X\n", addr);
2914    return( phys_read32( addr ) );
2915}
2916
2917void l_phys_write32(uint32_t addr, uint32_t val)
2918{
2919    //printf("wr:%08X %08X\n ", addr, val);
2920    phys_write32( addr, val );
2921}
2922
2923