1/* $OpenBSD: if_nge.c,v 1.99 2024/05/24 06:02:56 jsg Exp $ */ 2/* 3 * Copyright (c) 2001 Wind River Systems 4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 5 * Bill Paul <wpaul@bsdi.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: if_nge.c,v 1.35 2002/08/08 18:33:28 ambrisko Exp $ 35 */ 36 37/* 38 * National Semiconductor DP83820/DP83821 gigabit ethernet driver 39 * for FreeBSD. Datasheets are available from: 40 * 41 * http://www.national.com/ds/DP/DP83820.pdf 42 * http://www.national.com/ds/DP/DP83821.pdf 43 * 44 * These chips are used on several low cost gigabit ethernet NICs 45 * sold by D-Link, Addtron, SMC and Asante. Both parts are 46 * virtually the same, except the 83820 is a 64-bit/32-bit part, 47 * while the 83821 is 32-bit only. 48 * 49 * Many cards also use National gigE transceivers, such as the 50 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet 51 * contains a full register description that applies to all of these 52 * components: 53 * 54 * http://www.national.com/ds/DP/DP83861.pdf 55 * 56 * Written by Bill Paul <wpaul@bsdi.com> 57 * BSDi Open Source Solutions 58 */ 59 60/* 61 * The NatSemi DP83820 and 83821 controllers are enhanced versions 62 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100 63 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII 64 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP 65 * hardware checksum offload (IPv4 only), VLAN tagging and filtering, 66 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern 67 * matching buffers, one perfect address filter buffer and interrupt 68 * moderation. The 83820 supports both 64-bit and 32-bit addressing 69 * and data transfers: the 64-bit support can be toggled on or off 70 * via software. This affects the size of certain fields in the DMA 71 * descriptors. 72 * 73 * There are two bugs/misfeatures in the 83820/83821 that I have 74 * discovered so far: 75 * 76 * - Receive buffers must be aligned on 64-bit boundaries, which means 77 * you must resort to copying data in order to fix up the payload 78 * alignment. 79 * 80 * - In order to transmit jumbo frames larger than 8170 bytes, you have 81 * to turn off transmit checksum offloading, because the chip can't 82 * compute the checksum on an outgoing frame unless it fits entirely 83 * within the TX FIFO, which is only 8192 bytes in size. If you have 84 * TX checksum offload enabled and you transmit attempt to transmit a 85 * frame larger than 8170 bytes, the transmitter will wedge. 86 * 87 * To work around the latter problem, TX checksum offload is disabled 88 * if the user selects an MTU larger than 8152 (8170 - 18). 89 */ 90 91#include "bpfilter.h" 92#include "vlan.h" 93 94#include <sys/param.h> 95#include <sys/systm.h> 96#include <sys/sockio.h> 97#include <sys/mbuf.h> 98#include <sys/device.h> 99 100#include <net/if.h> 101#include <net/if_media.h> 102 103#include <netinet/in.h> 104#include <netinet/if_ether.h> 105 106#if NBPFILTER > 0 107#include <net/bpf.h> 108#endif 109 110#include <uvm/uvm_extern.h> /* for vtophys */ 111#define VTOPHYS(v) vtophys((vaddr_t)(v)) 112 113#include <dev/pci/pcireg.h> 114#include <dev/pci/pcivar.h> 115#include <dev/pci/pcidevs.h> 116 117#include <dev/mii/miivar.h> 118 119#define NGE_USEIOSPACE 120 121#include <dev/pci/if_ngereg.h> 122 123int nge_probe(struct device *, void *, void *); 124void nge_attach(struct device *, struct device *, void *); 125 126int nge_newbuf(struct nge_softc *, struct nge_desc *, 127 struct mbuf *); 128int nge_encap(struct nge_softc *, struct mbuf *, u_int32_t *); 129void nge_rxeof(struct nge_softc *); 130void nge_txeof(struct nge_softc *); 131int nge_intr(void *); 132void nge_tick(void *); 133void nge_start(struct ifnet *); 134int nge_ioctl(struct ifnet *, u_long, caddr_t); 135void nge_init(void *); 136void nge_stop(struct nge_softc *); 137void nge_watchdog(struct ifnet *); 138int nge_ifmedia_mii_upd(struct ifnet *); 139void nge_ifmedia_mii_sts(struct ifnet *, struct ifmediareq *); 140int nge_ifmedia_tbi_upd(struct ifnet *); 141void nge_ifmedia_tbi_sts(struct ifnet *, struct ifmediareq *); 142 143void nge_delay(struct nge_softc *); 144void nge_eeprom_idle(struct nge_softc *); 145void nge_eeprom_putbyte(struct nge_softc *, int); 146void nge_eeprom_getword(struct nge_softc *, int, u_int16_t *); 147void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int); 148 149void nge_mii_sync(struct nge_softc *); 150void nge_mii_send(struct nge_softc *, u_int32_t, int); 151int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *); 152int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *); 153 154int nge_miibus_readreg(struct device *, int, int); 155void nge_miibus_writereg(struct device *, int, int, int); 156void nge_miibus_statchg(struct device *); 157 158void nge_setmulti(struct nge_softc *); 159void nge_reset(struct nge_softc *); 160int nge_list_rx_init(struct nge_softc *); 161int nge_list_tx_init(struct nge_softc *); 162 163#ifdef NGE_USEIOSPACE 164#define NGE_RES SYS_RES_IOPORT 165#define NGE_RID NGE_PCI_LOIO 166#else 167#define NGE_RES SYS_RES_MEMORY 168#define NGE_RID NGE_PCI_LOMEM 169#endif 170 171#ifdef NGE_DEBUG 172#define DPRINTF(x) if (ngedebug) printf x 173#define DPRINTFN(n,x) if (ngedebug >= (n)) printf x 174int ngedebug = 0; 175#else 176#define DPRINTF(x) 177#define DPRINTFN(n,x) 178#endif 179 180#define NGE_SETBIT(sc, reg, x) \ 181 CSR_WRITE_4(sc, reg, \ 182 CSR_READ_4(sc, reg) | (x)) 183 184#define NGE_CLRBIT(sc, reg, x) \ 185 CSR_WRITE_4(sc, reg, \ 186 CSR_READ_4(sc, reg) & ~(x)) 187 188#define SIO_SET(x) \ 189 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x)) 190 191#define SIO_CLR(x) \ 192 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x)) 193 194void 195nge_delay(struct nge_softc *sc) 196{ 197 int idx; 198 199 for (idx = (300 / 33) + 1; idx > 0; idx--) 200 CSR_READ_4(sc, NGE_CSR); 201} 202 203void 204nge_eeprom_idle(struct nge_softc *sc) 205{ 206 int i; 207 208 SIO_SET(NGE_MEAR_EE_CSEL); 209 nge_delay(sc); 210 SIO_SET(NGE_MEAR_EE_CLK); 211 nge_delay(sc); 212 213 for (i = 0; i < 25; i++) { 214 SIO_CLR(NGE_MEAR_EE_CLK); 215 nge_delay(sc); 216 SIO_SET(NGE_MEAR_EE_CLK); 217 nge_delay(sc); 218 } 219 220 SIO_CLR(NGE_MEAR_EE_CLK); 221 nge_delay(sc); 222 SIO_CLR(NGE_MEAR_EE_CSEL); 223 nge_delay(sc); 224 CSR_WRITE_4(sc, NGE_MEAR, 0x00000000); 225} 226 227/* 228 * Send a read command and address to the EEPROM, check for ACK. 229 */ 230void 231nge_eeprom_putbyte(struct nge_softc *sc, int addr) 232{ 233 int d, i; 234 235 d = addr | NGE_EECMD_READ; 236 237 /* 238 * Feed in each bit and strobe the clock. 239 */ 240 for (i = 0x400; i; i >>= 1) { 241 if (d & i) { 242 SIO_SET(NGE_MEAR_EE_DIN); 243 } else { 244 SIO_CLR(NGE_MEAR_EE_DIN); 245 } 246 nge_delay(sc); 247 SIO_SET(NGE_MEAR_EE_CLK); 248 nge_delay(sc); 249 SIO_CLR(NGE_MEAR_EE_CLK); 250 nge_delay(sc); 251 } 252} 253 254/* 255 * Read a word of data stored in the EEPROM at address 'addr.' 256 */ 257void 258nge_eeprom_getword(struct nge_softc *sc, int addr, u_int16_t *dest) 259{ 260 int i; 261 u_int16_t word = 0; 262 263 /* Force EEPROM to idle state. */ 264 nge_eeprom_idle(sc); 265 266 /* Enter EEPROM access mode. */ 267 nge_delay(sc); 268 SIO_CLR(NGE_MEAR_EE_CLK); 269 nge_delay(sc); 270 SIO_SET(NGE_MEAR_EE_CSEL); 271 nge_delay(sc); 272 273 /* 274 * Send address of word we want to read. 275 */ 276 nge_eeprom_putbyte(sc, addr); 277 278 /* 279 * Start reading bits from EEPROM. 280 */ 281 for (i = 0x8000; i; i >>= 1) { 282 SIO_SET(NGE_MEAR_EE_CLK); 283 nge_delay(sc); 284 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT) 285 word |= i; 286 nge_delay(sc); 287 SIO_CLR(NGE_MEAR_EE_CLK); 288 nge_delay(sc); 289 } 290 291 /* Turn off EEPROM access mode. */ 292 nge_eeprom_idle(sc); 293 294 *dest = word; 295} 296 297/* 298 * Read a sequence of words from the EEPROM. 299 */ 300void 301nge_read_eeprom(struct nge_softc *sc, caddr_t dest, int off, int cnt, int swap) 302{ 303 int i; 304 u_int16_t word = 0, *ptr; 305 306 for (i = 0; i < cnt; i++) { 307 nge_eeprom_getword(sc, off + i, &word); 308 ptr = (u_int16_t *)(dest + (i * 2)); 309 if (swap) 310 *ptr = ntohs(word); 311 else 312 *ptr = word; 313 } 314} 315 316/* 317 * Sync the PHYs by setting data bit and strobing the clock 32 times. 318 */ 319void 320nge_mii_sync(struct nge_softc *sc) 321{ 322 int i; 323 324 SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA); 325 326 for (i = 0; i < 32; i++) { 327 SIO_SET(NGE_MEAR_MII_CLK); 328 DELAY(1); 329 SIO_CLR(NGE_MEAR_MII_CLK); 330 DELAY(1); 331 } 332} 333 334/* 335 * Clock a series of bits through the MII. 336 */ 337void 338nge_mii_send(struct nge_softc *sc, u_int32_t bits, int cnt) 339{ 340 int i; 341 342 SIO_CLR(NGE_MEAR_MII_CLK); 343 344 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 345 if (bits & i) { 346 SIO_SET(NGE_MEAR_MII_DATA); 347 } else { 348 SIO_CLR(NGE_MEAR_MII_DATA); 349 } 350 DELAY(1); 351 SIO_CLR(NGE_MEAR_MII_CLK); 352 DELAY(1); 353 SIO_SET(NGE_MEAR_MII_CLK); 354 } 355} 356 357/* 358 * Read an PHY register through the MII. 359 */ 360int 361nge_mii_readreg(struct nge_softc *sc, struct nge_mii_frame *frame) 362{ 363 int i, ack, s; 364 365 s = splnet(); 366 367 /* 368 * Set up frame for RX. 369 */ 370 frame->mii_stdelim = NGE_MII_STARTDELIM; 371 frame->mii_opcode = NGE_MII_READOP; 372 frame->mii_turnaround = 0; 373 frame->mii_data = 0; 374 375 CSR_WRITE_4(sc, NGE_MEAR, 0); 376 377 /* 378 * Turn on data xmit. 379 */ 380 SIO_SET(NGE_MEAR_MII_DIR); 381 382 nge_mii_sync(sc); 383 384 /* 385 * Send command/address info. 386 */ 387 nge_mii_send(sc, frame->mii_stdelim, 2); 388 nge_mii_send(sc, frame->mii_opcode, 2); 389 nge_mii_send(sc, frame->mii_phyaddr, 5); 390 nge_mii_send(sc, frame->mii_regaddr, 5); 391 392 /* Idle bit */ 393 SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA)); 394 DELAY(1); 395 SIO_SET(NGE_MEAR_MII_CLK); 396 DELAY(1); 397 398 /* Turn off xmit. */ 399 SIO_CLR(NGE_MEAR_MII_DIR); 400 /* Check for ack */ 401 SIO_CLR(NGE_MEAR_MII_CLK); 402 DELAY(1); 403 ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA; 404 SIO_SET(NGE_MEAR_MII_CLK); 405 DELAY(1); 406 407 /* 408 * Now try reading data bits. If the ack failed, we still 409 * need to clock through 16 cycles to keep the PHY(s) in sync. 410 */ 411 if (ack) { 412 for(i = 0; i < 16; i++) { 413 SIO_CLR(NGE_MEAR_MII_CLK); 414 DELAY(1); 415 SIO_SET(NGE_MEAR_MII_CLK); 416 DELAY(1); 417 } 418 goto fail; 419 } 420 421 for (i = 0x8000; i; i >>= 1) { 422 SIO_CLR(NGE_MEAR_MII_CLK); 423 DELAY(1); 424 if (!ack) { 425 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA) 426 frame->mii_data |= i; 427 DELAY(1); 428 } 429 SIO_SET(NGE_MEAR_MII_CLK); 430 DELAY(1); 431 } 432 433fail: 434 435 SIO_CLR(NGE_MEAR_MII_CLK); 436 DELAY(1); 437 SIO_SET(NGE_MEAR_MII_CLK); 438 DELAY(1); 439 440 splx(s); 441 442 if (ack) 443 return(1); 444 return(0); 445} 446 447/* 448 * Write to a PHY register through the MII. 449 */ 450int 451nge_mii_writereg(struct nge_softc *sc, struct nge_mii_frame *frame) 452{ 453 int s; 454 455 s = splnet(); 456 /* 457 * Set up frame for TX. 458 */ 459 460 frame->mii_stdelim = NGE_MII_STARTDELIM; 461 frame->mii_opcode = NGE_MII_WRITEOP; 462 frame->mii_turnaround = NGE_MII_TURNAROUND; 463 464 /* 465 * Turn on data output. 466 */ 467 SIO_SET(NGE_MEAR_MII_DIR); 468 469 nge_mii_sync(sc); 470 471 nge_mii_send(sc, frame->mii_stdelim, 2); 472 nge_mii_send(sc, frame->mii_opcode, 2); 473 nge_mii_send(sc, frame->mii_phyaddr, 5); 474 nge_mii_send(sc, frame->mii_regaddr, 5); 475 nge_mii_send(sc, frame->mii_turnaround, 2); 476 nge_mii_send(sc, frame->mii_data, 16); 477 478 /* Idle bit. */ 479 SIO_SET(NGE_MEAR_MII_CLK); 480 DELAY(1); 481 SIO_CLR(NGE_MEAR_MII_CLK); 482 DELAY(1); 483 484 /* 485 * Turn off xmit. 486 */ 487 SIO_CLR(NGE_MEAR_MII_DIR); 488 489 splx(s); 490 491 return(0); 492} 493 494int 495nge_miibus_readreg(struct device *dev, int phy, int reg) 496{ 497 struct nge_softc *sc = (struct nge_softc *)dev; 498 struct nge_mii_frame frame; 499 500 DPRINTFN(9, ("%s: nge_miibus_readreg\n", sc->sc_dv.dv_xname)); 501 502 bzero(&frame, sizeof(frame)); 503 504 frame.mii_phyaddr = phy; 505 frame.mii_regaddr = reg; 506 nge_mii_readreg(sc, &frame); 507 508 return(frame.mii_data); 509} 510 511void 512nge_miibus_writereg(struct device *dev, int phy, int reg, int data) 513{ 514 struct nge_softc *sc = (struct nge_softc *)dev; 515 struct nge_mii_frame frame; 516 517 518 DPRINTFN(9, ("%s: nge_miibus_writereg\n", sc->sc_dv.dv_xname)); 519 520 bzero(&frame, sizeof(frame)); 521 522 frame.mii_phyaddr = phy; 523 frame.mii_regaddr = reg; 524 frame.mii_data = data; 525 nge_mii_writereg(sc, &frame); 526} 527 528void 529nge_miibus_statchg(struct device *dev) 530{ 531 struct nge_softc *sc = (struct nge_softc *)dev; 532 struct mii_data *mii = &sc->nge_mii; 533 u_int32_t txcfg, rxcfg; 534 535 txcfg = CSR_READ_4(sc, NGE_TX_CFG); 536 rxcfg = CSR_READ_4(sc, NGE_RX_CFG); 537 538 DPRINTFN(4, ("%s: nge_miibus_statchg txcfg=%#x, rxcfg=%#x\n", 539 sc->sc_dv.dv_xname, txcfg, rxcfg)); 540 541 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 542 txcfg |= (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR); 543 rxcfg |= (NGE_RXCFG_RX_FDX); 544 } else { 545 txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR); 546 rxcfg &= ~(NGE_RXCFG_RX_FDX); 547 } 548 549 txcfg |= NGE_TXCFG_AUTOPAD; 550 551 CSR_WRITE_4(sc, NGE_TX_CFG, txcfg); 552 CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg); 553 554 /* If we have a 1000Mbps link, set the mode_1000 bit. */ 555 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 556 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); 557 else 558 NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); 559} 560 561void 562nge_setmulti(struct nge_softc *sc) 563{ 564 struct arpcom *ac = &sc->arpcom; 565 struct ifnet *ifp = &ac->ac_if; 566 struct ether_multi *enm; 567 struct ether_multistep step; 568 u_int32_t h = 0, i, filtsave; 569 int bit, index; 570 571 if (ac->ac_multirangecnt > 0) 572 ifp->if_flags |= IFF_ALLMULTI; 573 574 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 575 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 576 NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH); 577 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI); 578 return; 579 } 580 581 /* 582 * We have to explicitly enable the multicast hash table 583 * on the NatSemi chip if we want to use it, which we do. 584 * We also have to tell it that we don't want to use the 585 * hash table for matching unicast addresses. 586 */ 587 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH); 588 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 589 NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH); 590 591 filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL); 592 593 /* first, zot all the existing hash bits */ 594 for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) { 595 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i); 596 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0); 597 } 598 599 /* 600 * From the 11 bits returned by the crc routine, the top 7 601 * bits represent the 16-bit word in the mcast hash table 602 * that needs to be updated, and the lower 4 bits represent 603 * which bit within that byte needs to be set. 604 */ 605 ETHER_FIRST_MULTI(step, ac, enm); 606 while (enm != NULL) { 607 h = (ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 21) & 608 0x00000FFF; 609 index = (h >> 4) & 0x7F; 610 bit = h & 0xF; 611 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 612 NGE_FILTADDR_MCAST_LO + (index * 2)); 613 NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit)); 614 ETHER_NEXT_MULTI(step, enm); 615 } 616 617 CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave); 618} 619 620void 621nge_reset(struct nge_softc *sc) 622{ 623 int i; 624 625 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET); 626 627 for (i = 0; i < NGE_TIMEOUT; i++) { 628 if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET)) 629 break; 630 } 631 632 if (i == NGE_TIMEOUT) 633 printf("%s: reset never completed\n", sc->sc_dv.dv_xname); 634 635 /* Wait a little while for the chip to get its brains in order. */ 636 DELAY(1000); 637 638 /* 639 * If this is a NetSemi chip, make sure to clear 640 * PME mode. 641 */ 642 CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS); 643 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 644} 645 646/* 647 * Probe for an NatSemi chip. Check the PCI vendor and device 648 * IDs against our list and return a device name if we find a match. 649 */ 650int 651nge_probe(struct device *parent, void *match, void *aux) 652{ 653 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 654 655 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NS && 656 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NS_DP83820) 657 return (1); 658 659 return (0); 660} 661 662/* 663 * Attach the interface. Allocate softc structures, do ifmedia 664 * setup and ethernet/BPF attach. 665 */ 666void 667nge_attach(struct device *parent, struct device *self, void *aux) 668{ 669 struct nge_softc *sc = (struct nge_softc *)self; 670 struct pci_attach_args *pa = aux; 671 pci_chipset_tag_t pc = pa->pa_pc; 672 pci_intr_handle_t ih; 673 const char *intrstr = NULL; 674 bus_size_t size; 675 bus_dma_segment_t seg; 676 bus_dmamap_t dmamap; 677 int rseg; 678 u_char eaddr[ETHER_ADDR_LEN]; 679#ifndef NGE_USEIOSPACE 680 pcireg_t memtype; 681#endif 682 struct ifnet *ifp; 683 caddr_t kva; 684 685 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 686 687 /* 688 * Map control/status registers. 689 */ 690 DPRINTFN(5, ("%s: map control/status regs\n", sc->sc_dv.dv_xname)); 691 692#ifdef NGE_USEIOSPACE 693 DPRINTFN(5, ("%s: pci_mapreg_map\n", sc->sc_dv.dv_xname)); 694 if (pci_mapreg_map(pa, NGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 695 &sc->nge_btag, &sc->nge_bhandle, NULL, &size, 0)) { 696 printf(": can't map i/o space\n"); 697 return; 698 } 699#else 700 DPRINTFN(5, ("%s: pci_mapreg_map\n", sc->sc_dv.dv_xname)); 701 memtype = pci_mapreg_type(pc, pa->pa_tag, NGE_PCI_LOMEM); 702 if (pci_mapreg_map(pa, NGE_PCI_LOMEM, memtype, 0, &sc->nge_btag, 703 &sc->nge_bhandle, NULL, &size, 0)) { 704 printf(": can't map mem space\n"); 705 return; 706 } 707#endif 708 709 /* Disable all interrupts */ 710 CSR_WRITE_4(sc, NGE_IER, 0); 711 712 DPRINTFN(5, ("%s: pci_intr_map\n", sc->sc_dv.dv_xname)); 713 if (pci_intr_map(pa, &ih)) { 714 printf(": couldn't map interrupt\n"); 715 goto fail_1; 716 } 717 718 DPRINTFN(5, ("%s: pci_intr_string\n", sc->sc_dv.dv_xname)); 719 intrstr = pci_intr_string(pc, ih); 720 DPRINTFN(5, ("%s: pci_intr_establish\n", sc->sc_dv.dv_xname)); 721 sc->nge_intrhand = pci_intr_establish(pc, ih, IPL_NET, nge_intr, sc, 722 sc->sc_dv.dv_xname); 723 if (sc->nge_intrhand == NULL) { 724 printf(": couldn't establish interrupt"); 725 if (intrstr != NULL) 726 printf(" at %s", intrstr); 727 printf("\n"); 728 goto fail_1; 729 } 730 printf(": %s", intrstr); 731 732 /* Reset the adapter. */ 733 DPRINTFN(5, ("%s: nge_reset\n", sc->sc_dv.dv_xname)); 734 nge_reset(sc); 735 736 /* 737 * Get station address from the EEPROM. 738 */ 739 DPRINTFN(5, ("%s: nge_read_eeprom\n", sc->sc_dv.dv_xname)); 740 nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0); 741 nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0); 742 nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0); 743 744 /* 745 * A NatSemi chip was detected. Inform the world. 746 */ 747 printf(", address %s\n", ether_sprintf(eaddr)); 748 749 bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 750 751 sc->sc_dmatag = pa->pa_dmat; 752 DPRINTFN(5, ("%s: bus_dmamem_alloc\n", sc->sc_dv.dv_xname)); 753 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct nge_list_data), 754 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | 755 BUS_DMA_ZERO)) { 756 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 757 goto fail_2; 758 } 759 DPRINTFN(5, ("%s: bus_dmamem_map\n", sc->sc_dv.dv_xname)); 760 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 761 sizeof(struct nge_list_data), &kva, 762 BUS_DMA_NOWAIT)) { 763 printf("%s: can't map dma buffers (%zd bytes)\n", 764 sc->sc_dv.dv_xname, sizeof(struct nge_list_data)); 765 goto fail_3; 766 } 767 DPRINTFN(5, ("%s: bus_dmamap_create\n", sc->sc_dv.dv_xname)); 768 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct nge_list_data), 1, 769 sizeof(struct nge_list_data), 0, 770 BUS_DMA_NOWAIT, &dmamap)) { 771 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 772 goto fail_4; 773 } 774 DPRINTFN(5, ("%s: bus_dmamap_load\n", sc->sc_dv.dv_xname)); 775 if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva, 776 sizeof(struct nge_list_data), NULL, 777 BUS_DMA_NOWAIT)) { 778 goto fail_5; 779 } 780 781 DPRINTFN(5, ("%s: bzero\n", sc->sc_dv.dv_xname)); 782 sc->nge_ldata = (struct nge_list_data *)kva; 783 784 ifp = &sc->arpcom.ac_if; 785 ifp->if_softc = sc; 786 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 787 ifp->if_ioctl = nge_ioctl; 788 ifp->if_start = nge_start; 789 ifp->if_watchdog = nge_watchdog; 790 ifp->if_hardmtu = NGE_JUMBO_MTU; 791 ifq_init_maxlen(&ifp->if_snd, NGE_TX_LIST_CNT - 1); 792 DPRINTFN(5, ("%s: bcopy\n", sc->sc_dv.dv_xname)); 793 bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ); 794 795 ifp->if_capabilities = IFCAP_VLAN_MTU; 796 797#if NVLAN > 0 798 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 799#endif 800 801 /* 802 * Do MII setup. 803 */ 804 DPRINTFN(5, ("%s: mii setup\n", sc->sc_dv.dv_xname)); 805 if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) { 806 DPRINTFN(5, ("%s: TBI mode\n", sc->sc_dv.dv_xname)); 807 sc->nge_tbi = 1; 808 809 ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_tbi_upd, 810 nge_ifmedia_tbi_sts); 811 812 ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_NONE, 0, NULL), 813 ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 814 ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 815 0, NULL); 816 ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 817 818 ifmedia_set(&sc->nge_ifmedia, IFM_ETHER|IFM_AUTO); 819 820 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 821 | NGE_GPIO_GP4_OUT 822 | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB 823 | NGE_GPIO_GP3_OUTENB | NGE_GPIO_GP4_OUTENB 824 | NGE_GPIO_GP5_OUTENB); 825 826 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); 827 } else { 828 sc->nge_mii.mii_ifp = ifp; 829 sc->nge_mii.mii_readreg = nge_miibus_readreg; 830 sc->nge_mii.mii_writereg = nge_miibus_writereg; 831 sc->nge_mii.mii_statchg = nge_miibus_statchg; 832 833 ifmedia_init(&sc->nge_mii.mii_media, 0, nge_ifmedia_mii_upd, 834 nge_ifmedia_mii_sts); 835 mii_attach(&sc->sc_dv, &sc->nge_mii, 0xffffffff, MII_PHY_ANY, 836 MII_OFFSET_ANY, 0); 837 838 if (LIST_FIRST(&sc->nge_mii.mii_phys) == NULL) { 839 840 printf("%s: no PHY found!\n", sc->sc_dv.dv_xname); 841 ifmedia_add(&sc->nge_mii.mii_media, 842 IFM_ETHER|IFM_MANUAL, 0, NULL); 843 ifmedia_set(&sc->nge_mii.mii_media, 844 IFM_ETHER|IFM_MANUAL); 845 } 846 else 847 ifmedia_set(&sc->nge_mii.mii_media, 848 IFM_ETHER|IFM_AUTO); 849 } 850 851 /* 852 * Call MI attach routine. 853 */ 854 DPRINTFN(5, ("%s: if_attach\n", sc->sc_dv.dv_xname)); 855 if_attach(ifp); 856 DPRINTFN(5, ("%s: ether_ifattach\n", sc->sc_dv.dv_xname)); 857 ether_ifattach(ifp); 858 DPRINTFN(5, ("%s: timeout_set\n", sc->sc_dv.dv_xname)); 859 timeout_set(&sc->nge_timeout, nge_tick, sc); 860 timeout_add_sec(&sc->nge_timeout, 1); 861 return; 862 863fail_5: 864 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 865 866fail_4: 867 bus_dmamem_unmap(sc->sc_dmatag, kva, 868 sizeof(struct nge_list_data)); 869 870fail_3: 871 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 872 873fail_2: 874 pci_intr_disestablish(pc, sc->nge_intrhand); 875 876fail_1: 877 bus_space_unmap(sc->nge_btag, sc->nge_bhandle, size); 878} 879 880/* 881 * Initialize the transmit descriptors. 882 */ 883int 884nge_list_tx_init(struct nge_softc *sc) 885{ 886 struct nge_list_data *ld; 887 struct nge_ring_data *cd; 888 int i; 889 890 cd = &sc->nge_cdata; 891 ld = sc->nge_ldata; 892 893 for (i = 0; i < NGE_TX_LIST_CNT; i++) { 894 if (i == (NGE_TX_LIST_CNT - 1)) { 895 ld->nge_tx_list[i].nge_nextdesc = 896 &ld->nge_tx_list[0]; 897 ld->nge_tx_list[i].nge_next = 898 VTOPHYS(&ld->nge_tx_list[0]); 899 } else { 900 ld->nge_tx_list[i].nge_nextdesc = 901 &ld->nge_tx_list[i + 1]; 902 ld->nge_tx_list[i].nge_next = 903 VTOPHYS(&ld->nge_tx_list[i + 1]); 904 } 905 ld->nge_tx_list[i].nge_mbuf = NULL; 906 ld->nge_tx_list[i].nge_ptr = 0; 907 ld->nge_tx_list[i].nge_ctl = 0; 908 } 909 910 cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0; 911 912 return(0); 913} 914 915 916/* 917 * Initialize the RX descriptors and allocate mbufs for them. Note that 918 * we arrange the descriptors in a closed ring, so that the last descriptor 919 * points back to the first. 920 */ 921int 922nge_list_rx_init(struct nge_softc *sc) 923{ 924 struct nge_list_data *ld; 925 struct nge_ring_data *cd; 926 int i; 927 928 ld = sc->nge_ldata; 929 cd = &sc->nge_cdata; 930 931 for (i = 0; i < NGE_RX_LIST_CNT; i++) { 932 if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS) 933 return(ENOBUFS); 934 if (i == (NGE_RX_LIST_CNT - 1)) { 935 ld->nge_rx_list[i].nge_nextdesc = 936 &ld->nge_rx_list[0]; 937 ld->nge_rx_list[i].nge_next = 938 VTOPHYS(&ld->nge_rx_list[0]); 939 } else { 940 ld->nge_rx_list[i].nge_nextdesc = 941 &ld->nge_rx_list[i + 1]; 942 ld->nge_rx_list[i].nge_next = 943 VTOPHYS(&ld->nge_rx_list[i + 1]); 944 } 945 } 946 947 cd->nge_rx_prod = 0; 948 949 return(0); 950} 951 952/* 953 * Initialize an RX descriptor and attach an MBUF cluster. 954 */ 955int 956nge_newbuf(struct nge_softc *sc, struct nge_desc *c, struct mbuf *m) 957{ 958 struct mbuf *m_new = NULL; 959 960 if (m == NULL) { 961 m_new = MCLGETL(NULL, M_DONTWAIT, NGE_MCLBYTES); 962 if (m_new == NULL) 963 return (ENOBUFS); 964 } else { 965 /* 966 * We're re-using a previously allocated mbuf; 967 * be sure to re-init pointers and lengths to 968 * default values. 969 */ 970 m_new = m; 971 m_new->m_data = m_new->m_ext.ext_buf; 972 } 973 974 m_new->m_len = m_new->m_pkthdr.len = NGE_MCLBYTES; 975 m_adj(m_new, sizeof(u_int64_t)); 976 977 c->nge_mbuf = m_new; 978 c->nge_ptr = VTOPHYS(mtod(m_new, caddr_t)); 979 DPRINTFN(7,("%s: c->nge_ptr=%#x\n", sc->sc_dv.dv_xname, 980 c->nge_ptr)); 981 c->nge_ctl = m_new->m_len; 982 c->nge_extsts = 0; 983 984 return(0); 985} 986 987/* 988 * A frame has been uploaded: pass the resulting mbuf chain up to 989 * the higher level protocols. 990 */ 991void 992nge_rxeof(struct nge_softc *sc) 993{ 994 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 995 struct mbuf *m; 996 struct ifnet *ifp; 997 struct nge_desc *cur_rx; 998 int i, total_len = 0; 999 u_int32_t rxstat; 1000 1001 ifp = &sc->arpcom.ac_if; 1002 i = sc->nge_cdata.nge_rx_prod; 1003 1004 while (NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) { 1005 struct mbuf *m0 = NULL; 1006 u_int32_t extsts; 1007 1008 cur_rx = &sc->nge_ldata->nge_rx_list[i]; 1009 rxstat = cur_rx->nge_rxstat; 1010 extsts = cur_rx->nge_extsts; 1011 m = cur_rx->nge_mbuf; 1012 cur_rx->nge_mbuf = NULL; 1013 total_len = NGE_RXBYTES(cur_rx); 1014 NGE_INC(i, NGE_RX_LIST_CNT); 1015 1016 /* 1017 * If an error occurs, update stats, clear the 1018 * status word and leave the mbuf cluster in place: 1019 * it should simply get re-used next time this descriptor 1020 * comes up in the ring. 1021 */ 1022 if (!(rxstat & NGE_CMDSTS_PKT_OK)) { 1023#if NVLAN > 0 1024 if ((rxstat & NGE_RXSTAT_RUNT) && 1025 total_len >= (ETHER_MIN_LEN - ETHER_CRC_LEN - 1026 ETHER_VLAN_ENCAP_LEN)) { 1027 /* 1028 * Workaround a hardware bug. Accept runt 1029 * frames if its length is larger than or 1030 * equal to 56. 1031 */ 1032 } else { 1033#endif 1034 ifp->if_ierrors++; 1035 nge_newbuf(sc, cur_rx, m); 1036 continue; 1037#if NVLAN > 0 1038 } 1039#endif 1040 } 1041 1042 /* 1043 * Ok. NatSemi really screwed up here. This is the 1044 * only gigE chip I know of with alignment constraints 1045 * on receive buffers. RX buffers must be 64-bit aligned. 1046 */ 1047#ifndef __STRICT_ALIGNMENT 1048 /* 1049 * By popular demand, ignore the alignment problems 1050 * on the Intel x86 platform. The performance hit 1051 * incurred due to unaligned accesses is much smaller 1052 * than the hit produced by forcing buffer copies all 1053 * the time, especially with jumbo frames. We still 1054 * need to fix up the alignment everywhere else though. 1055 */ 1056 if (nge_newbuf(sc, cur_rx, NULL) == ENOBUFS) { 1057#endif 1058 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN); 1059 nge_newbuf(sc, cur_rx, m); 1060 if (m0 == NULL) { 1061 ifp->if_ierrors++; 1062 continue; 1063 } 1064 m_adj(m0, ETHER_ALIGN); 1065 m = m0; 1066#ifndef __STRICT_ALIGNMENT 1067 } else { 1068 m->m_pkthdr.len = m->m_len = total_len; 1069 } 1070#endif 1071 1072#if NVLAN > 0 1073 if (extsts & NGE_RXEXTSTS_VLANPKT) { 1074 m->m_pkthdr.ether_vtag = 1075 ntohs(extsts & NGE_RXEXTSTS_VTCI); 1076 m->m_flags |= M_VLANTAG; 1077 } 1078#endif 1079 1080 /* Do IP checksum checking. */ 1081 if (extsts & NGE_RXEXTSTS_IPPKT) { 1082 if (!(extsts & NGE_RXEXTSTS_IPCSUMERR)) 1083 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1084 if ((extsts & NGE_RXEXTSTS_TCPPKT) && 1085 (!(extsts & NGE_RXEXTSTS_TCPCSUMERR))) 1086 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 1087 else if ((extsts & NGE_RXEXTSTS_UDPPKT) && 1088 (!(extsts & NGE_RXEXTSTS_UDPCSUMERR))) 1089 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 1090 } 1091 1092 ml_enqueue(&ml, m); 1093 } 1094 1095 if_input(ifp, &ml); 1096 1097 sc->nge_cdata.nge_rx_prod = i; 1098} 1099 1100/* 1101 * A frame was downloaded to the chip. It's safe for us to clean up 1102 * the list buffers. 1103 */ 1104 1105void 1106nge_txeof(struct nge_softc *sc) 1107{ 1108 struct nge_desc *cur_tx; 1109 struct ifnet *ifp; 1110 u_int32_t idx; 1111 1112 ifp = &sc->arpcom.ac_if; 1113 1114 /* 1115 * Go through our tx list and free mbufs for those 1116 * frames that have been transmitted. 1117 */ 1118 idx = sc->nge_cdata.nge_tx_cons; 1119 while (idx != sc->nge_cdata.nge_tx_prod) { 1120 cur_tx = &sc->nge_ldata->nge_tx_list[idx]; 1121 1122 if (NGE_OWNDESC(cur_tx)) 1123 break; 1124 1125 if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) { 1126 sc->nge_cdata.nge_tx_cnt--; 1127 NGE_INC(idx, NGE_TX_LIST_CNT); 1128 continue; 1129 } 1130 1131 if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) { 1132 ifp->if_oerrors++; 1133 if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS) 1134 ifp->if_collisions++; 1135 if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL) 1136 ifp->if_collisions++; 1137 } 1138 1139 ifp->if_collisions += 1140 (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16; 1141 1142 if (cur_tx->nge_mbuf != NULL) { 1143 m_freem(cur_tx->nge_mbuf); 1144 cur_tx->nge_mbuf = NULL; 1145 ifq_clr_oactive(&ifp->if_snd); 1146 } 1147 1148 sc->nge_cdata.nge_tx_cnt--; 1149 NGE_INC(idx, NGE_TX_LIST_CNT); 1150 } 1151 1152 sc->nge_cdata.nge_tx_cons = idx; 1153 1154 if (idx == sc->nge_cdata.nge_tx_prod) 1155 ifp->if_timer = 0; 1156} 1157 1158void 1159nge_tick(void *xsc) 1160{ 1161 struct nge_softc *sc = xsc; 1162 struct mii_data *mii = &sc->nge_mii; 1163 struct ifnet *ifp = &sc->arpcom.ac_if; 1164 int s; 1165 1166 s = splnet(); 1167 1168 DPRINTFN(10, ("%s: nge_tick: link=%d\n", sc->sc_dv.dv_xname, 1169 sc->nge_link)); 1170 1171 timeout_add_sec(&sc->nge_timeout, 1); 1172 if (sc->nge_link) { 1173 splx(s); 1174 return; 1175 } 1176 1177 if (sc->nge_tbi) { 1178 if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) 1179 == IFM_AUTO) { 1180 u_int32_t bmsr, anlpar, txcfg, rxcfg; 1181 1182 bmsr = CSR_READ_4(sc, NGE_TBI_BMSR); 1183 DPRINTFN(2, ("%s: nge_tick: bmsr=%#x\n", 1184 sc->sc_dv.dv_xname, bmsr)); 1185 1186 if (!(bmsr & NGE_TBIBMSR_ANEG_DONE)) { 1187 CSR_WRITE_4(sc, NGE_TBI_BMCR, 0); 1188 1189 splx(s); 1190 return; 1191 } 1192 1193 anlpar = CSR_READ_4(sc, NGE_TBI_ANLPAR); 1194 txcfg = CSR_READ_4(sc, NGE_TX_CFG); 1195 rxcfg = CSR_READ_4(sc, NGE_RX_CFG); 1196 1197 DPRINTFN(2, ("%s: nge_tick: anlpar=%#x, txcfg=%#x, " 1198 "rxcfg=%#x\n", sc->sc_dv.dv_xname, anlpar, 1199 txcfg, rxcfg)); 1200 1201 if (anlpar == 0 || anlpar & NGE_TBIANAR_FDX) { 1202 txcfg |= (NGE_TXCFG_IGN_HBEAT| 1203 NGE_TXCFG_IGN_CARR); 1204 rxcfg |= NGE_RXCFG_RX_FDX; 1205 } else { 1206 txcfg &= ~(NGE_TXCFG_IGN_HBEAT| 1207 NGE_TXCFG_IGN_CARR); 1208 rxcfg &= ~(NGE_RXCFG_RX_FDX); 1209 } 1210 txcfg |= NGE_TXCFG_AUTOPAD; 1211 CSR_WRITE_4(sc, NGE_TX_CFG, txcfg); 1212 CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg); 1213 } 1214 1215 DPRINTF(("%s: gigabit link up\n", sc->sc_dv.dv_xname)); 1216 sc->nge_link++; 1217 if (!ifq_empty(&ifp->if_snd)) 1218 nge_start(ifp); 1219 } else { 1220 mii_tick(mii); 1221 if (mii->mii_media_status & IFM_ACTIVE && 1222 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1223 sc->nge_link++; 1224 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 1225 DPRINTF(("%s: gigabit link up\n", 1226 sc->sc_dv.dv_xname)); 1227 if (!ifq_empty(&ifp->if_snd)) 1228 nge_start(ifp); 1229 } 1230 1231 } 1232 1233 splx(s); 1234} 1235 1236int 1237nge_intr(void *arg) 1238{ 1239 struct nge_softc *sc; 1240 struct ifnet *ifp; 1241 u_int32_t status; 1242 int claimed = 0; 1243 1244 sc = arg; 1245 ifp = &sc->arpcom.ac_if; 1246 1247 /* Suppress unwanted interrupts */ 1248 if (!(ifp->if_flags & IFF_UP)) { 1249 nge_stop(sc); 1250 return (0); 1251 } 1252 1253 /* Disable interrupts. */ 1254 CSR_WRITE_4(sc, NGE_IER, 0); 1255 1256 /* Data LED on for TBI mode */ 1257 if(sc->nge_tbi) 1258 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 1259 | NGE_GPIO_GP3_OUT); 1260 1261 for (;;) { 1262 /* Reading the ISR register clears all interrupts. */ 1263 status = CSR_READ_4(sc, NGE_ISR); 1264 1265 if ((status & NGE_INTRS) == 0) 1266 break; 1267 1268 claimed = 1; 1269 1270 if ((status & NGE_ISR_TX_DESC_OK) || 1271 (status & NGE_ISR_TX_ERR) || 1272 (status & NGE_ISR_TX_OK) || 1273 (status & NGE_ISR_TX_IDLE)) 1274 nge_txeof(sc); 1275 1276 if ((status & NGE_ISR_RX_DESC_OK) || 1277 (status & NGE_ISR_RX_ERR) || 1278 (status & NGE_ISR_RX_OFLOW) || 1279 (status & NGE_ISR_RX_FIFO_OFLOW) || 1280 (status & NGE_ISR_RX_IDLE) || 1281 (status & NGE_ISR_RX_OK)) 1282 nge_rxeof(sc); 1283 1284 if ((status & NGE_ISR_RX_IDLE)) 1285 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1286 1287 if (status & NGE_ISR_SYSERR) { 1288 nge_reset(sc); 1289 ifp->if_flags &= ~IFF_RUNNING; 1290 nge_init(sc); 1291 } 1292 1293#if 0 1294 /* 1295 * XXX: nge_tick() is not ready to be called this way 1296 * it screws up the aneg timeout because mii_tick() is 1297 * only to be called once per second. 1298 */ 1299 if (status & NGE_IMR_PHY_INTR) { 1300 sc->nge_link = 0; 1301 nge_tick(sc); 1302 } 1303#endif 1304 } 1305 1306 /* Re-enable interrupts. */ 1307 CSR_WRITE_4(sc, NGE_IER, 1); 1308 1309 if (!ifq_empty(&ifp->if_snd)) 1310 nge_start(ifp); 1311 1312 /* Data LED off for TBI mode */ 1313 if(sc->nge_tbi) 1314 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 1315 & ~NGE_GPIO_GP3_OUT); 1316 1317 return claimed; 1318} 1319 1320/* 1321 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1322 * pointers to the fragment pointers. 1323 */ 1324int 1325nge_encap(struct nge_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 1326{ 1327 struct nge_desc *f = NULL; 1328 struct mbuf *m; 1329 int frag, cur, cnt = 0; 1330 1331 /* 1332 * Start packing the mbufs in this chain into 1333 * the fragment pointers. Stop when we run out 1334 * of fragments or hit the end of the mbuf chain. 1335 */ 1336 m = m_head; 1337 cur = frag = *txidx; 1338 1339 for (m = m_head; m != NULL; m = m->m_next) { 1340 if (m->m_len != 0) { 1341 if ((NGE_TX_LIST_CNT - 1342 (sc->nge_cdata.nge_tx_cnt + cnt)) < 2) 1343 return(ENOBUFS); 1344 f = &sc->nge_ldata->nge_tx_list[frag]; 1345 f->nge_ctl = NGE_CMDSTS_MORE | m->m_len; 1346 f->nge_ptr = VTOPHYS(mtod(m, vaddr_t)); 1347 DPRINTFN(7,("%s: f->nge_ptr=%#x\n", 1348 sc->sc_dv.dv_xname, f->nge_ptr)); 1349 if (cnt != 0) 1350 f->nge_ctl |= NGE_CMDSTS_OWN; 1351 cur = frag; 1352 NGE_INC(frag, NGE_TX_LIST_CNT); 1353 cnt++; 1354 } 1355 } 1356 1357 if (m != NULL) 1358 return(ENOBUFS); 1359 1360 sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0; 1361 1362#if NVLAN > 0 1363 if (m_head->m_flags & M_VLANTAG) { 1364 sc->nge_ldata->nge_tx_list[cur].nge_extsts |= 1365 (NGE_TXEXTSTS_VLANPKT|htons(m_head->m_pkthdr.ether_vtag)); 1366 } 1367#endif 1368 1369 sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head; 1370 sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE; 1371 sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN; 1372 sc->nge_cdata.nge_tx_cnt += cnt; 1373 *txidx = frag; 1374 1375 return(0); 1376} 1377 1378/* 1379 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1380 * to the mbuf data regions directly in the transmit lists. We also save a 1381 * copy of the pointers since the transmit list fragment pointers are 1382 * physical addresses. 1383 */ 1384 1385void 1386nge_start(struct ifnet *ifp) 1387{ 1388 struct nge_softc *sc; 1389 struct mbuf *m_head = NULL; 1390 u_int32_t idx; 1391 int pkts = 0; 1392 1393 sc = ifp->if_softc; 1394 1395 if (!sc->nge_link) 1396 return; 1397 1398 idx = sc->nge_cdata.nge_tx_prod; 1399 1400 if (ifq_is_oactive(&ifp->if_snd)) 1401 return; 1402 1403 while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) { 1404 m_head = ifq_deq_begin(&ifp->if_snd); 1405 if (m_head == NULL) 1406 break; 1407 1408 if (nge_encap(sc, m_head, &idx)) { 1409 ifq_deq_rollback(&ifp->if_snd, m_head); 1410 ifq_set_oactive(&ifp->if_snd); 1411 break; 1412 } 1413 1414 /* now we are committed to transmit the packet */ 1415 ifq_deq_commit(&ifp->if_snd, m_head); 1416 pkts++; 1417 1418#if NBPFILTER > 0 1419 /* 1420 * If there's a BPF listener, bounce a copy of this frame 1421 * to him. 1422 */ 1423 if (ifp->if_bpf) 1424 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1425#endif 1426 } 1427 if (pkts == 0) 1428 return; 1429 1430 /* Transmit */ 1431 sc->nge_cdata.nge_tx_prod = idx; 1432 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE); 1433 1434 /* 1435 * Set a timeout in case the chip goes out to lunch. 1436 */ 1437 ifp->if_timer = 5; 1438} 1439 1440void 1441nge_init(void *xsc) 1442{ 1443 struct nge_softc *sc = xsc; 1444 struct ifnet *ifp = &sc->arpcom.ac_if; 1445 struct mii_data *mii; 1446 u_int32_t txcfg, rxcfg; 1447 uint64_t media; 1448 int s; 1449 1450 if (ifp->if_flags & IFF_RUNNING) 1451 return; 1452 1453 s = splnet(); 1454 1455 /* 1456 * Cancel pending I/O and free all RX/TX buffers. 1457 */ 1458 nge_stop(sc); 1459 1460 mii = sc->nge_tbi ? NULL: &sc->nge_mii; 1461 1462 /* Set MAC address */ 1463 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0); 1464 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1465 ((u_int16_t *)sc->arpcom.ac_enaddr)[0]); 1466 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1); 1467 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1468 ((u_int16_t *)sc->arpcom.ac_enaddr)[1]); 1469 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2); 1470 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1471 ((u_int16_t *)sc->arpcom.ac_enaddr)[2]); 1472 1473 /* Init circular RX list. */ 1474 if (nge_list_rx_init(sc) == ENOBUFS) { 1475 printf("%s: initialization failed: no " 1476 "memory for rx buffers\n", sc->sc_dv.dv_xname); 1477 nge_stop(sc); 1478 splx(s); 1479 return; 1480 } 1481 1482 /* 1483 * Init tx descriptors. 1484 */ 1485 nge_list_tx_init(sc); 1486 1487 /* 1488 * For the NatSemi chip, we have to explicitly enable the 1489 * reception of ARP frames, as well as turn on the 'perfect 1490 * match' filter where we store the station address, otherwise 1491 * we won't receive unicasts meant for this host. 1492 */ 1493 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP); 1494 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT); 1495 1496 /* If we want promiscuous mode, set the allframes bit. */ 1497 if (ifp->if_flags & IFF_PROMISC) 1498 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); 1499 else 1500 NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); 1501 1502 /* 1503 * Set the capture broadcast bit to capture broadcast frames. 1504 */ 1505 if (ifp->if_flags & IFF_BROADCAST) 1506 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 1507 else 1508 NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 1509 1510 /* 1511 * Load the multicast filter. 1512 */ 1513 nge_setmulti(sc); 1514 1515 /* Turn the receive filter on */ 1516 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE); 1517 1518 /* 1519 * Load the address of the RX and TX lists. 1520 */ 1521 CSR_WRITE_4(sc, NGE_RX_LISTPTR, 1522 VTOPHYS(&sc->nge_ldata->nge_rx_list[0])); 1523 CSR_WRITE_4(sc, NGE_TX_LISTPTR, 1524 VTOPHYS(&sc->nge_ldata->nge_tx_list[0])); 1525 1526 /* Set RX configuration */ 1527 CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG); 1528 1529 /* 1530 * Enable hardware checksum validation for all IPv4 1531 * packets, do not reject packets with bad checksums. 1532 */ 1533 CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB); 1534 1535 /* 1536 * If VLAN support is enabled, tell the chip to detect 1537 * and strip VLAN tag info from received frames. The tag 1538 * will be provided in the extsts field in the RX descriptors. 1539 */ 1540 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1541 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, 1542 NGE_VIPRXCTL_TAG_DETECT_ENB | NGE_VIPRXCTL_TAG_STRIP_ENB); 1543 1544 /* Set TX configuration */ 1545 CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG); 1546 1547 /* 1548 * If VLAN support is enabled, tell the chip to insert 1549 * VLAN tags on a per-packet basis as dictated by the 1550 * code in the frame encapsulation routine. 1551 */ 1552 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1553 NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT); 1554 1555 /* Set full/half duplex mode. */ 1556 if (sc->nge_tbi) 1557 media = sc->nge_ifmedia.ifm_cur->ifm_media; 1558 else 1559 media = mii->mii_media_active; 1560 1561 txcfg = CSR_READ_4(sc, NGE_TX_CFG); 1562 rxcfg = CSR_READ_4(sc, NGE_RX_CFG); 1563 1564 DPRINTFN(4, ("%s: nge_init txcfg=%#x, rxcfg=%#x\n", 1565 sc->sc_dv.dv_xname, txcfg, rxcfg)); 1566 1567 if ((media & IFM_GMASK) == IFM_FDX) { 1568 txcfg |= (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR); 1569 rxcfg |= (NGE_RXCFG_RX_FDX); 1570 } else { 1571 txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR); 1572 rxcfg &= ~(NGE_RXCFG_RX_FDX); 1573 } 1574 1575 txcfg |= NGE_TXCFG_AUTOPAD; 1576 1577 CSR_WRITE_4(sc, NGE_TX_CFG, txcfg); 1578 CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg); 1579 1580 nge_tick(sc); 1581 1582 /* 1583 * Enable the delivery of PHY interrupts based on 1584 * link/speed/duplex status changes and enable return 1585 * of extended status information in the DMA descriptors, 1586 * required for checksum offloading. 1587 */ 1588 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD|NGE_CFG_PHYINTR_LNK| 1589 NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB); 1590 1591 DPRINTFN(1, ("%s: nge_init: config=%#x\n", sc->sc_dv.dv_xname, 1592 CSR_READ_4(sc, NGE_CFG))); 1593 1594 /* 1595 * Configure interrupt holdoff (moderation). We can 1596 * have the chip delay interrupt delivery for a certain 1597 * period. Units are in 100us, and the max setting 1598 * is 25500us (0xFF x 100us). Default is a 100us holdoff. 1599 */ 1600 CSR_WRITE_4(sc, NGE_IHR, 0x01); 1601 1602 /* 1603 * Enable interrupts. 1604 */ 1605 CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS); 1606 CSR_WRITE_4(sc, NGE_IER, 1); 1607 1608 /* Enable receiver and transmitter. */ 1609 NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); 1610 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1611 1612 if (sc->nge_tbi) 1613 nge_ifmedia_tbi_upd(ifp); 1614 else 1615 nge_ifmedia_mii_upd(ifp); 1616 1617 ifp->if_flags |= IFF_RUNNING; 1618 ifq_clr_oactive(&ifp->if_snd); 1619 1620 splx(s); 1621} 1622 1623/* 1624 * Set mii media options. 1625 */ 1626int 1627nge_ifmedia_mii_upd(struct ifnet *ifp) 1628{ 1629 struct nge_softc *sc = ifp->if_softc; 1630 struct mii_data *mii = &sc->nge_mii; 1631 1632 DPRINTFN(2, ("%s: nge_ifmedia_mii_upd\n", sc->sc_dv.dv_xname)); 1633 1634 sc->nge_link = 0; 1635 1636 if (mii->mii_instance) { 1637 struct mii_softc *miisc; 1638 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1639 mii_phy_reset(miisc); 1640 } 1641 mii_mediachg(mii); 1642 1643 return(0); 1644} 1645 1646/* 1647 * Report current mii media status. 1648 */ 1649void 1650nge_ifmedia_mii_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1651{ 1652 struct nge_softc *sc = ifp->if_softc; 1653 struct mii_data *mii = &sc->nge_mii; 1654 1655 DPRINTFN(2, ("%s: nge_ifmedia_mii_sts\n", sc->sc_dv.dv_xname)); 1656 1657 mii_pollstat(mii); 1658 ifmr->ifm_active = mii->mii_media_active; 1659 ifmr->ifm_status = mii->mii_media_status; 1660} 1661 1662/* 1663 * Set mii media options. 1664 */ 1665int 1666nge_ifmedia_tbi_upd(struct ifnet *ifp) 1667{ 1668 struct nge_softc *sc = ifp->if_softc; 1669 1670 DPRINTFN(2, ("%s: nge_ifmedia_tbi_upd\n", sc->sc_dv.dv_xname)); 1671 1672 sc->nge_link = 0; 1673 1674 if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) 1675 == IFM_AUTO) { 1676 u_int32_t anar, bmcr; 1677 anar = CSR_READ_4(sc, NGE_TBI_ANAR); 1678 anar |= (NGE_TBIANAR_HDX | NGE_TBIANAR_FDX); 1679 CSR_WRITE_4(sc, NGE_TBI_ANAR, anar); 1680 1681 bmcr = CSR_READ_4(sc, NGE_TBI_BMCR); 1682 bmcr |= (NGE_TBIBMCR_ENABLE_ANEG|NGE_TBIBMCR_RESTART_ANEG); 1683 CSR_WRITE_4(sc, NGE_TBI_BMCR, bmcr); 1684 1685 bmcr &= ~(NGE_TBIBMCR_RESTART_ANEG); 1686 CSR_WRITE_4(sc, NGE_TBI_BMCR, bmcr); 1687 } else { 1688 u_int32_t txcfg, rxcfg; 1689 txcfg = CSR_READ_4(sc, NGE_TX_CFG); 1690 rxcfg = CSR_READ_4(sc, NGE_RX_CFG); 1691 1692 if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) 1693 == IFM_FDX) { 1694 txcfg |= NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR; 1695 rxcfg |= NGE_RXCFG_RX_FDX; 1696 } else { 1697 txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR); 1698 rxcfg &= ~(NGE_RXCFG_RX_FDX); 1699 } 1700 1701 txcfg |= NGE_TXCFG_AUTOPAD; 1702 CSR_WRITE_4(sc, NGE_TX_CFG, txcfg); 1703 CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg); 1704 } 1705 1706 NGE_CLRBIT(sc, NGE_GPIO, NGE_GPIO_GP3_OUT); 1707 1708 return(0); 1709} 1710 1711/* 1712 * Report current tbi media status. 1713 */ 1714void 1715nge_ifmedia_tbi_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1716{ 1717 struct nge_softc *sc = ifp->if_softc; 1718 u_int32_t bmcr; 1719 1720 bmcr = CSR_READ_4(sc, NGE_TBI_BMCR); 1721 1722 if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) { 1723 u_int32_t bmsr = CSR_READ_4(sc, NGE_TBI_BMSR); 1724 DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts bmsr=%#x, bmcr=%#x\n", 1725 sc->sc_dv.dv_xname, bmsr, bmcr)); 1726 1727 if (!(bmsr & NGE_TBIBMSR_ANEG_DONE)) { 1728 ifmr->ifm_active = IFM_ETHER|IFM_NONE; 1729 ifmr->ifm_status = IFM_AVALID; 1730 return; 1731 } 1732 } else { 1733 DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts bmcr=%#x\n", 1734 sc->sc_dv.dv_xname, bmcr)); 1735 } 1736 1737 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; 1738 ifmr->ifm_active = IFM_ETHER|IFM_1000_SX; 1739 1740 if (bmcr & NGE_TBIBMCR_LOOPBACK) 1741 ifmr->ifm_active |= IFM_LOOP; 1742 1743 if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) { 1744 u_int32_t anlpar = CSR_READ_4(sc, NGE_TBI_ANLPAR); 1745 DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts anlpar=%#x\n", 1746 sc->sc_dv.dv_xname, anlpar)); 1747 1748 ifmr->ifm_active |= IFM_AUTO; 1749 if (anlpar & NGE_TBIANLPAR_FDX) { 1750 ifmr->ifm_active |= IFM_FDX; 1751 } else if (anlpar & NGE_TBIANLPAR_HDX) { 1752 ifmr->ifm_active |= IFM_HDX; 1753 } else 1754 ifmr->ifm_active |= IFM_FDX; 1755 1756 } else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) == IFM_FDX) 1757 ifmr->ifm_active |= IFM_FDX; 1758 else 1759 ifmr->ifm_active |= IFM_HDX; 1760 1761} 1762 1763int 1764nge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1765{ 1766 struct nge_softc *sc = ifp->if_softc; 1767 struct ifreq *ifr = (struct ifreq *) data; 1768 struct mii_data *mii; 1769 int s, error = 0; 1770 1771 s = splnet(); 1772 1773 switch(command) { 1774 case SIOCSIFADDR: 1775 ifp->if_flags |= IFF_UP; 1776 nge_init(sc); 1777 break; 1778 1779 case SIOCSIFFLAGS: 1780 if (ifp->if_flags & IFF_UP) { 1781 if (ifp->if_flags & IFF_RUNNING && 1782 ifp->if_flags & IFF_PROMISC && 1783 !(sc->nge_if_flags & IFF_PROMISC)) { 1784 NGE_SETBIT(sc, NGE_RXFILT_CTL, 1785 NGE_RXFILTCTL_ALLPHYS| 1786 NGE_RXFILTCTL_ALLMULTI); 1787 } else if (ifp->if_flags & IFF_RUNNING && 1788 !(ifp->if_flags & IFF_PROMISC) && 1789 sc->nge_if_flags & IFF_PROMISC) { 1790 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 1791 NGE_RXFILTCTL_ALLPHYS); 1792 if (!(ifp->if_flags & IFF_ALLMULTI)) 1793 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 1794 NGE_RXFILTCTL_ALLMULTI); 1795 } else { 1796 ifp->if_flags &= ~IFF_RUNNING; 1797 nge_init(sc); 1798 } 1799 } else { 1800 if (ifp->if_flags & IFF_RUNNING) 1801 nge_stop(sc); 1802 } 1803 sc->nge_if_flags = ifp->if_flags; 1804 error = 0; 1805 break; 1806 1807 case SIOCGIFMEDIA: 1808 case SIOCSIFMEDIA: 1809 if (sc->nge_tbi) { 1810 error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia, 1811 command); 1812 } else { 1813 mii = &sc->nge_mii; 1814 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 1815 command); 1816 } 1817 break; 1818 1819 default: 1820 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1821 } 1822 1823 if (error == ENETRESET) { 1824 if (ifp->if_flags & IFF_RUNNING) 1825 nge_setmulti(sc); 1826 error = 0; 1827 } 1828 1829 splx(s); 1830 return(error); 1831} 1832 1833void 1834nge_watchdog(struct ifnet *ifp) 1835{ 1836 struct nge_softc *sc; 1837 1838 sc = ifp->if_softc; 1839 1840 ifp->if_oerrors++; 1841 printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname); 1842 1843 nge_stop(sc); 1844 nge_reset(sc); 1845 ifp->if_flags &= ~IFF_RUNNING; 1846 nge_init(sc); 1847 1848 if (!ifq_empty(&ifp->if_snd)) 1849 nge_start(ifp); 1850} 1851 1852/* 1853 * Stop the adapter and free any mbufs allocated to the 1854 * RX and TX lists. 1855 */ 1856void 1857nge_stop(struct nge_softc *sc) 1858{ 1859 int i; 1860 struct ifnet *ifp; 1861 struct mii_data *mii; 1862 1863 ifp = &sc->arpcom.ac_if; 1864 ifp->if_timer = 0; 1865 if (sc->nge_tbi) { 1866 mii = NULL; 1867 } else { 1868 mii = &sc->nge_mii; 1869 } 1870 1871 timeout_del(&sc->nge_timeout); 1872 1873 ifp->if_flags &= ~IFF_RUNNING; 1874 ifq_clr_oactive(&ifp->if_snd); 1875 1876 CSR_WRITE_4(sc, NGE_IER, 0); 1877 CSR_WRITE_4(sc, NGE_IMR, 0); 1878 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); 1879 DELAY(1000); 1880 CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0); 1881 CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0); 1882 1883 if (!sc->nge_tbi) 1884 mii_down(mii); 1885 1886 sc->nge_link = 0; 1887 1888 /* 1889 * Free data in the RX lists. 1890 */ 1891 for (i = 0; i < NGE_RX_LIST_CNT; i++) { 1892 if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) { 1893 m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf); 1894 sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL; 1895 } 1896 } 1897 bzero(&sc->nge_ldata->nge_rx_list, 1898 sizeof(sc->nge_ldata->nge_rx_list)); 1899 1900 /* 1901 * Free the TX list buffers. 1902 */ 1903 for (i = 0; i < NGE_TX_LIST_CNT; i++) { 1904 if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) { 1905 m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf); 1906 sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL; 1907 } 1908 } 1909 1910 bzero(&sc->nge_ldata->nge_tx_list, 1911 sizeof(sc->nge_ldata->nge_tx_list)); 1912} 1913 1914const struct cfattach nge_ca = { 1915 sizeof(struct nge_softc), nge_probe, nge_attach 1916}; 1917 1918struct cfdriver nge_cd = { 1919 NULL, "nge", DV_IFNET 1920}; 1921