if_re.c revision 262391
1/*- 2 * Copyright (c) 1997, 1998-2003 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: stable/10/sys/dev/re/if_re.c 262391 2014-02-23 21:08:41Z marius $"); 35 36/* 37 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44/* 45 * This driver is designed to support RealTek's next generation of 46 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 47 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 48 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 49 * 50 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 51 * with the older 8139 family, however it also supports a special 52 * C+ mode of operation that provides several new performance enhancing 53 * features. These include: 54 * 55 * o Descriptor based DMA mechanism. Each descriptor represents 56 * a single packet fragment. Data buffers may be aligned on 57 * any byte boundary. 58 * 59 * o 64-bit DMA 60 * 61 * o TCP/IP checksum offload for both RX and TX 62 * 63 * o High and normal priority transmit DMA rings 64 * 65 * o VLAN tag insertion and extraction 66 * 67 * o TCP large send (segmentation offload) 68 * 69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 70 * programming API is fairly straightforward. The RX filtering, EEPROM 71 * access and PHY access is the same as it is on the older 8139 series 72 * chips. 73 * 74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 75 * same programming API and feature set as the 8139C+ with the following 76 * differences and additions: 77 * 78 * o 1000Mbps mode 79 * 80 * o Jumbo frames 81 * 82 * o GMII and TBI ports/registers for interfacing with copper 83 * or fiber PHYs 84 * 85 * o RX and TX DMA rings can have up to 1024 descriptors 86 * (the 8139C+ allows a maximum of 64) 87 * 88 * o Slight differences in register layout from the 8139C+ 89 * 90 * The TX start and timer interrupt registers are at different locations 91 * on the 8169 than they are on the 8139C+. Also, the status word in the 92 * RX descriptor has a slightly different bit layout. The 8169 does not 93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 94 * copper gigE PHY. 95 * 96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 97 * (the 'S' stands for 'single-chip'). These devices have the same 98 * programming API as the older 8169, but also have some vendor-specific 99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 101 * 102 * This driver takes advantage of the RX and TX checksum offload and 103 * VLAN tag insertion/extraction features. It also implements TX 104 * interrupt moderation using the timer interrupt registers, which 105 * significantly reduces TX interrupt load. There is also support 106 * for jumbo frames, however the 8169/8169S/8110S can not transmit 107 * jumbo frames larger than 7440, so the max MTU possible with this 108 * driver is 7422 bytes. 109 */ 110 111#ifdef HAVE_KERNEL_OPTION_HEADERS 112#include "opt_device_polling.h" 113#endif 114 115#include <sys/param.h> 116#include <sys/endian.h> 117#include <sys/systm.h> 118#include <sys/sockio.h> 119#include <sys/mbuf.h> 120#include <sys/malloc.h> 121#include <sys/module.h> 122#include <sys/kernel.h> 123#include <sys/socket.h> 124#include <sys/lock.h> 125#include <sys/mutex.h> 126#include <sys/sysctl.h> 127#include <sys/taskqueue.h> 128 129#include <net/if.h> 130#include <net/if_arp.h> 131#include <net/ethernet.h> 132#include <net/if_dl.h> 133#include <net/if_media.h> 134#include <net/if_types.h> 135#include <net/if_vlan_var.h> 136 137#include <net/bpf.h> 138 139#include <machine/bus.h> 140#include <machine/resource.h> 141#include <sys/bus.h> 142#include <sys/rman.h> 143 144#include <dev/mii/mii.h> 145#include <dev/mii/miivar.h> 146 147#include <dev/pci/pcireg.h> 148#include <dev/pci/pcivar.h> 149 150#include <pci/if_rlreg.h> 151 152MODULE_DEPEND(re, pci, 1, 1, 1); 153MODULE_DEPEND(re, ether, 1, 1, 1); 154MODULE_DEPEND(re, miibus, 1, 1, 1); 155 156/* "device miibus" required. See GENERIC if you get errors here. */ 157#include "miibus_if.h" 158 159/* Tunables. */ 160static int intr_filter = 0; 161TUNABLE_INT("hw.re.intr_filter", &intr_filter); 162static int msi_disable = 0; 163TUNABLE_INT("hw.re.msi_disable", &msi_disable); 164static int msix_disable = 0; 165TUNABLE_INT("hw.re.msix_disable", &msix_disable); 166static int prefer_iomap = 0; 167TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap); 168 169#define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 170 171/* 172 * Various supported device vendors/types and their names. 173 */ 174static const struct rl_type re_devs[] = { 175 { DLINK_VENDORID, DLINK_DEVICEID_528T, 0, 176 "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, 177 { DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0, 178 "D-Link DGE-530(T) Gigabit Ethernet Adapter" }, 179 { RT_VENDORID, RT_DEVICEID_8139, 0, 180 "RealTek 8139C+ 10/100BaseTX" }, 181 { RT_VENDORID, RT_DEVICEID_8101E, 0, 182 "RealTek 810xE PCIe 10/100baseTX" }, 183 { RT_VENDORID, RT_DEVICEID_8168, 0, 184 "RealTek 8168/8111 B/C/CP/D/DP/E/F/G PCIe Gigabit Ethernet" }, 185 { RT_VENDORID, RT_DEVICEID_8169, 0, 186 "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" }, 187 { RT_VENDORID, RT_DEVICEID_8169SC, 0, 188 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, 189 { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0, 190 "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" }, 191 { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0, 192 "Linksys EG1032 (RTL8169S) Gigabit Ethernet" }, 193 { USR_VENDORID, USR_DEVICEID_997902, 0, 194 "US Robotics 997902 (RTL8169S) Gigabit Ethernet" } 195}; 196 197static const struct rl_hwrev re_hwrevs[] = { 198 { RL_HWREV_8139, RL_8139, "", RL_MTU }, 199 { RL_HWREV_8139A, RL_8139, "A", RL_MTU }, 200 { RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU }, 201 { RL_HWREV_8139B, RL_8139, "B", RL_MTU }, 202 { RL_HWREV_8130, RL_8139, "8130", RL_MTU }, 203 { RL_HWREV_8139C, RL_8139, "C", RL_MTU }, 204 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU }, 205 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU }, 206 { RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU }, 207 { RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU }, 208 { RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU }, 209 { RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU }, 210 { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU }, 211 { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, 212 { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU }, 213 { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, 214 { RL_HWREV_8100, RL_8139, "8100", RL_MTU }, 215 { RL_HWREV_8101, RL_8139, "8101", RL_MTU }, 216 { RL_HWREV_8100E, RL_8169, "8100E", RL_MTU }, 217 { RL_HWREV_8101E, RL_8169, "8101E", RL_MTU }, 218 { RL_HWREV_8102E, RL_8169, "8102E", RL_MTU }, 219 { RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU }, 220 { RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU }, 221 { RL_HWREV_8103E, RL_8169, "8103E", RL_MTU }, 222 { RL_HWREV_8401E, RL_8169, "8401E", RL_MTU }, 223 { RL_HWREV_8402, RL_8169, "8402", RL_MTU }, 224 { RL_HWREV_8105E, RL_8169, "8105E", RL_MTU }, 225 { RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU }, 226 { RL_HWREV_8106E, RL_8169, "8106E", RL_MTU }, 227 { RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU }, 228 { RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU }, 229 { RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, 230 { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, 231 { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K }, 232 { RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K }, 233 { RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K }, 234 { RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K}, 235 { RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K}, 236 { RL_HWREV_8168EP, RL_8169, "8168EP/8111EP", RL_JUMBO_MTU_9K}, 237 { RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K}, 238 { RL_HWREV_8168G, RL_8169, "8168G/8111G", RL_JUMBO_MTU_9K}, 239 { RL_HWREV_8168GU, RL_8169, "8168GU/8111GU", RL_JUMBO_MTU_9K}, 240 { RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K}, 241 { RL_HWREV_8411B, RL_8169, "8411B", RL_JUMBO_MTU_9K}, 242 { 0, 0, NULL, 0 } 243}; 244 245static int re_probe (device_t); 246static int re_attach (device_t); 247static int re_detach (device_t); 248 249static int re_encap (struct rl_softc *, struct mbuf **); 250 251static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int); 252static int re_allocmem (device_t, struct rl_softc *); 253static __inline void re_discard_rxbuf 254 (struct rl_softc *, int); 255static int re_newbuf (struct rl_softc *, int); 256static int re_jumbo_newbuf (struct rl_softc *, int); 257static int re_rx_list_init (struct rl_softc *); 258static int re_jrx_list_init (struct rl_softc *); 259static int re_tx_list_init (struct rl_softc *); 260#ifdef RE_FIXUP_RX 261static __inline void re_fixup_rx 262 (struct mbuf *); 263#endif 264static int re_rxeof (struct rl_softc *, int *); 265static void re_txeof (struct rl_softc *); 266#ifdef DEVICE_POLLING 267static int re_poll (struct ifnet *, enum poll_cmd, int); 268static int re_poll_locked (struct ifnet *, enum poll_cmd, int); 269#endif 270static int re_intr (void *); 271static void re_intr_msi (void *); 272static void re_tick (void *); 273static void re_int_task (void *, int); 274static void re_start (struct ifnet *); 275static void re_start_locked (struct ifnet *); 276static int re_ioctl (struct ifnet *, u_long, caddr_t); 277static void re_init (void *); 278static void re_init_locked (struct rl_softc *); 279static void re_stop (struct rl_softc *); 280static void re_watchdog (struct rl_softc *); 281static int re_suspend (device_t); 282static int re_resume (device_t); 283static int re_shutdown (device_t); 284static int re_ifmedia_upd (struct ifnet *); 285static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *); 286 287static void re_eeprom_putbyte (struct rl_softc *, int); 288static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *); 289static void re_read_eeprom (struct rl_softc *, caddr_t, int, int); 290static int re_gmii_readreg (device_t, int, int); 291static int re_gmii_writereg (device_t, int, int, int); 292 293static int re_miibus_readreg (device_t, int, int); 294static int re_miibus_writereg (device_t, int, int, int); 295static void re_miibus_statchg (device_t); 296 297static void re_set_jumbo (struct rl_softc *, int); 298static void re_set_rxmode (struct rl_softc *); 299static void re_reset (struct rl_softc *); 300static void re_setwol (struct rl_softc *); 301static void re_clrwol (struct rl_softc *); 302static void re_set_linkspeed (struct rl_softc *); 303 304#ifdef DEV_NETMAP /* see ixgbe.c for details */ 305#include <dev/netmap/if_re_netmap.h> 306#endif /* !DEV_NETMAP */ 307 308#ifdef RE_DIAG 309static int re_diag (struct rl_softc *); 310#endif 311 312static void re_add_sysctls (struct rl_softc *); 313static int re_sysctl_stats (SYSCTL_HANDLER_ARGS); 314static int sysctl_int_range (SYSCTL_HANDLER_ARGS, int, int); 315static int sysctl_hw_re_int_mod (SYSCTL_HANDLER_ARGS); 316 317static device_method_t re_methods[] = { 318 /* Device interface */ 319 DEVMETHOD(device_probe, re_probe), 320 DEVMETHOD(device_attach, re_attach), 321 DEVMETHOD(device_detach, re_detach), 322 DEVMETHOD(device_suspend, re_suspend), 323 DEVMETHOD(device_resume, re_resume), 324 DEVMETHOD(device_shutdown, re_shutdown), 325 326 /* MII interface */ 327 DEVMETHOD(miibus_readreg, re_miibus_readreg), 328 DEVMETHOD(miibus_writereg, re_miibus_writereg), 329 DEVMETHOD(miibus_statchg, re_miibus_statchg), 330 331 DEVMETHOD_END 332}; 333 334static driver_t re_driver = { 335 "re", 336 re_methods, 337 sizeof(struct rl_softc) 338}; 339 340static devclass_t re_devclass; 341 342DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0); 343DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); 344 345#define EE_SET(x) \ 346 CSR_WRITE_1(sc, RL_EECMD, \ 347 CSR_READ_1(sc, RL_EECMD) | x) 348 349#define EE_CLR(x) \ 350 CSR_WRITE_1(sc, RL_EECMD, \ 351 CSR_READ_1(sc, RL_EECMD) & ~x) 352 353/* 354 * Send a read command and address to the EEPROM, check for ACK. 355 */ 356static void 357re_eeprom_putbyte(struct rl_softc *sc, int addr) 358{ 359 int d, i; 360 361 d = addr | (RL_9346_READ << sc->rl_eewidth); 362 363 /* 364 * Feed in each bit and strobe the clock. 365 */ 366 367 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 368 if (d & i) { 369 EE_SET(RL_EE_DATAIN); 370 } else { 371 EE_CLR(RL_EE_DATAIN); 372 } 373 DELAY(100); 374 EE_SET(RL_EE_CLK); 375 DELAY(150); 376 EE_CLR(RL_EE_CLK); 377 DELAY(100); 378 } 379} 380 381/* 382 * Read a word of data stored in the EEPROM at address 'addr.' 383 */ 384static void 385re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) 386{ 387 int i; 388 u_int16_t word = 0; 389 390 /* 391 * Send address of word we want to read. 392 */ 393 re_eeprom_putbyte(sc, addr); 394 395 /* 396 * Start reading bits from EEPROM. 397 */ 398 for (i = 0x8000; i; i >>= 1) { 399 EE_SET(RL_EE_CLK); 400 DELAY(100); 401 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 402 word |= i; 403 EE_CLR(RL_EE_CLK); 404 DELAY(100); 405 } 406 407 *dest = word; 408} 409 410/* 411 * Read a sequence of words from the EEPROM. 412 */ 413static void 414re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) 415{ 416 int i; 417 u_int16_t word = 0, *ptr; 418 419 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 420 421 DELAY(100); 422 423 for (i = 0; i < cnt; i++) { 424 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 425 re_eeprom_getword(sc, off + i, &word); 426 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 427 ptr = (u_int16_t *)(dest + (i * 2)); 428 *ptr = word; 429 } 430 431 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 432} 433 434static int 435re_gmii_readreg(device_t dev, int phy, int reg) 436{ 437 struct rl_softc *sc; 438 u_int32_t rval; 439 int i; 440 441 sc = device_get_softc(dev); 442 443 /* Let the rgephy driver read the GMEDIASTAT register */ 444 445 if (reg == RL_GMEDIASTAT) { 446 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 447 return (rval); 448 } 449 450 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 451 452 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 453 rval = CSR_READ_4(sc, RL_PHYAR); 454 if (rval & RL_PHYAR_BUSY) 455 break; 456 DELAY(25); 457 } 458 459 if (i == RL_PHY_TIMEOUT) { 460 device_printf(sc->rl_dev, "PHY read failed\n"); 461 return (0); 462 } 463 464 /* 465 * Controller requires a 20us delay to process next MDIO request. 466 */ 467 DELAY(20); 468 469 return (rval & RL_PHYAR_PHYDATA); 470} 471 472static int 473re_gmii_writereg(device_t dev, int phy, int reg, int data) 474{ 475 struct rl_softc *sc; 476 u_int32_t rval; 477 int i; 478 479 sc = device_get_softc(dev); 480 481 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 482 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 483 484 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 485 rval = CSR_READ_4(sc, RL_PHYAR); 486 if (!(rval & RL_PHYAR_BUSY)) 487 break; 488 DELAY(25); 489 } 490 491 if (i == RL_PHY_TIMEOUT) { 492 device_printf(sc->rl_dev, "PHY write failed\n"); 493 return (0); 494 } 495 496 /* 497 * Controller requires a 20us delay to process next MDIO request. 498 */ 499 DELAY(20); 500 501 return (0); 502} 503 504static int 505re_miibus_readreg(device_t dev, int phy, int reg) 506{ 507 struct rl_softc *sc; 508 u_int16_t rval = 0; 509 u_int16_t re8139_reg = 0; 510 511 sc = device_get_softc(dev); 512 513 if (sc->rl_type == RL_8169) { 514 rval = re_gmii_readreg(dev, phy, reg); 515 return (rval); 516 } 517 518 switch (reg) { 519 case MII_BMCR: 520 re8139_reg = RL_BMCR; 521 break; 522 case MII_BMSR: 523 re8139_reg = RL_BMSR; 524 break; 525 case MII_ANAR: 526 re8139_reg = RL_ANAR; 527 break; 528 case MII_ANER: 529 re8139_reg = RL_ANER; 530 break; 531 case MII_ANLPAR: 532 re8139_reg = RL_LPAR; 533 break; 534 case MII_PHYIDR1: 535 case MII_PHYIDR2: 536 return (0); 537 /* 538 * Allow the rlphy driver to read the media status 539 * register. If we have a link partner which does not 540 * support NWAY, this is the register which will tell 541 * us the results of parallel detection. 542 */ 543 case RL_MEDIASTAT: 544 rval = CSR_READ_1(sc, RL_MEDIASTAT); 545 return (rval); 546 default: 547 device_printf(sc->rl_dev, "bad phy register\n"); 548 return (0); 549 } 550 rval = CSR_READ_2(sc, re8139_reg); 551 if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) { 552 /* 8139C+ has different bit layout. */ 553 rval &= ~(BMCR_LOOP | BMCR_ISO); 554 } 555 return (rval); 556} 557 558static int 559re_miibus_writereg(device_t dev, int phy, int reg, int data) 560{ 561 struct rl_softc *sc; 562 u_int16_t re8139_reg = 0; 563 int rval = 0; 564 565 sc = device_get_softc(dev); 566 567 if (sc->rl_type == RL_8169) { 568 rval = re_gmii_writereg(dev, phy, reg, data); 569 return (rval); 570 } 571 572 switch (reg) { 573 case MII_BMCR: 574 re8139_reg = RL_BMCR; 575 if (sc->rl_type == RL_8139CPLUS) { 576 /* 8139C+ has different bit layout. */ 577 data &= ~(BMCR_LOOP | BMCR_ISO); 578 } 579 break; 580 case MII_BMSR: 581 re8139_reg = RL_BMSR; 582 break; 583 case MII_ANAR: 584 re8139_reg = RL_ANAR; 585 break; 586 case MII_ANER: 587 re8139_reg = RL_ANER; 588 break; 589 case MII_ANLPAR: 590 re8139_reg = RL_LPAR; 591 break; 592 case MII_PHYIDR1: 593 case MII_PHYIDR2: 594 return (0); 595 break; 596 default: 597 device_printf(sc->rl_dev, "bad phy register\n"); 598 return (0); 599 } 600 CSR_WRITE_2(sc, re8139_reg, data); 601 return (0); 602} 603 604static void 605re_miibus_statchg(device_t dev) 606{ 607 struct rl_softc *sc; 608 struct ifnet *ifp; 609 struct mii_data *mii; 610 611 sc = device_get_softc(dev); 612 mii = device_get_softc(sc->rl_miibus); 613 ifp = sc->rl_ifp; 614 if (mii == NULL || ifp == NULL || 615 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 616 return; 617 618 sc->rl_flags &= ~RL_FLAG_LINK; 619 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 620 (IFM_ACTIVE | IFM_AVALID)) { 621 switch (IFM_SUBTYPE(mii->mii_media_active)) { 622 case IFM_10_T: 623 case IFM_100_TX: 624 sc->rl_flags |= RL_FLAG_LINK; 625 break; 626 case IFM_1000_T: 627 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) 628 break; 629 sc->rl_flags |= RL_FLAG_LINK; 630 break; 631 default: 632 break; 633 } 634 } 635 /* 636 * RealTek controllers does not provide any interface to 637 * Tx/Rx MACs for resolved speed, duplex and flow-control 638 * parameters. 639 */ 640} 641 642/* 643 * Set the RX configuration and 64-bit multicast hash filter. 644 */ 645static void 646re_set_rxmode(struct rl_softc *sc) 647{ 648 struct ifnet *ifp; 649 struct ifmultiaddr *ifma; 650 uint32_t hashes[2] = { 0, 0 }; 651 uint32_t h, rxfilt; 652 653 RL_LOCK_ASSERT(sc); 654 655 ifp = sc->rl_ifp; 656 657 rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; 658 if ((sc->rl_flags & RL_FLAG_EARLYOFF) != 0) 659 rxfilt |= RL_RXCFG_EARLYOFF; 660 else if ((sc->rl_flags & RL_FLAG_EARLYOFFV2) != 0) 661 rxfilt |= RL_RXCFG_EARLYOFFV2; 662 663 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 664 if (ifp->if_flags & IFF_PROMISC) 665 rxfilt |= RL_RXCFG_RX_ALLPHYS; 666 /* 667 * Unlike other hardwares, we have to explicitly set 668 * RL_RXCFG_RX_MULTI to receive multicast frames in 669 * promiscuous mode. 670 */ 671 rxfilt |= RL_RXCFG_RX_MULTI; 672 hashes[0] = hashes[1] = 0xffffffff; 673 goto done; 674 } 675 676 if_maddr_rlock(ifp); 677 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 678 if (ifma->ifma_addr->sa_family != AF_LINK) 679 continue; 680 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 681 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 682 if (h < 32) 683 hashes[0] |= (1 << h); 684 else 685 hashes[1] |= (1 << (h - 32)); 686 } 687 if_maddr_runlock(ifp); 688 689 if (hashes[0] != 0 || hashes[1] != 0) { 690 /* 691 * For some unfathomable reason, RealTek decided to 692 * reverse the order of the multicast hash registers 693 * in the PCI Express parts. This means we have to 694 * write the hash pattern in reverse order for those 695 * devices. 696 */ 697 if ((sc->rl_flags & RL_FLAG_PCIE) != 0) { 698 h = bswap32(hashes[0]); 699 hashes[0] = bswap32(hashes[1]); 700 hashes[1] = h; 701 } 702 rxfilt |= RL_RXCFG_RX_MULTI; 703 } 704 705done: 706 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 707 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 708 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 709} 710 711static void 712re_reset(struct rl_softc *sc) 713{ 714 int i; 715 716 RL_LOCK_ASSERT(sc); 717 718 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 719 720 for (i = 0; i < RL_TIMEOUT; i++) { 721 DELAY(10); 722 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 723 break; 724 } 725 if (i == RL_TIMEOUT) 726 device_printf(sc->rl_dev, "reset never completed!\n"); 727 728 if ((sc->rl_flags & RL_FLAG_MACRESET) != 0) 729 CSR_WRITE_1(sc, 0x82, 1); 730 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S) 731 re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0); 732} 733 734#ifdef RE_DIAG 735 736/* 737 * The following routine is designed to test for a defect on some 738 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# 739 * lines connected to the bus, however for a 32-bit only card, they 740 * should be pulled high. The result of this defect is that the 741 * NIC will not work right if you plug it into a 64-bit slot: DMA 742 * operations will be done with 64-bit transfers, which will fail 743 * because the 64-bit data lines aren't connected. 744 * 745 * There's no way to work around this (short of talking a soldering 746 * iron to the board), however we can detect it. The method we use 747 * here is to put the NIC into digital loopback mode, set the receiver 748 * to promiscuous mode, and then try to send a frame. We then compare 749 * the frame data we sent to what was received. If the data matches, 750 * then the NIC is working correctly, otherwise we know the user has 751 * a defective NIC which has been mistakenly plugged into a 64-bit PCI 752 * slot. In the latter case, there's no way the NIC can work correctly, 753 * so we print out a message on the console and abort the device attach. 754 */ 755 756static int 757re_diag(struct rl_softc *sc) 758{ 759 struct ifnet *ifp = sc->rl_ifp; 760 struct mbuf *m0; 761 struct ether_header *eh; 762 struct rl_desc *cur_rx; 763 u_int16_t status; 764 u_int32_t rxstat; 765 int total_len, i, error = 0, phyaddr; 766 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; 767 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; 768 769 /* Allocate a single mbuf */ 770 MGETHDR(m0, M_NOWAIT, MT_DATA); 771 if (m0 == NULL) 772 return (ENOBUFS); 773 774 RL_LOCK(sc); 775 776 /* 777 * Initialize the NIC in test mode. This sets the chip up 778 * so that it can send and receive frames, but performs the 779 * following special functions: 780 * - Puts receiver in promiscuous mode 781 * - Enables digital loopback mode 782 * - Leaves interrupts turned off 783 */ 784 785 ifp->if_flags |= IFF_PROMISC; 786 sc->rl_testmode = 1; 787 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 788 re_init_locked(sc); 789 sc->rl_flags |= RL_FLAG_LINK; 790 if (sc->rl_type == RL_8169) 791 phyaddr = 1; 792 else 793 phyaddr = 0; 794 795 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET); 796 for (i = 0; i < RL_TIMEOUT; i++) { 797 status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR); 798 if (!(status & BMCR_RESET)) 799 break; 800 } 801 802 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP); 803 CSR_WRITE_2(sc, RL_ISR, RL_INTRS); 804 805 DELAY(100000); 806 807 /* Put some data in the mbuf */ 808 809 eh = mtod(m0, struct ether_header *); 810 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); 811 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); 812 eh->ether_type = htons(ETHERTYPE_IP); 813 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; 814 815 /* 816 * Queue the packet, start transmission. 817 * Note: IF_HANDOFF() ultimately calls re_start() for us. 818 */ 819 820 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 821 RL_UNLOCK(sc); 822 /* XXX: re_diag must not be called when in ALTQ mode */ 823 IF_HANDOFF(&ifp->if_snd, m0, ifp); 824 RL_LOCK(sc); 825 m0 = NULL; 826 827 /* Wait for it to propagate through the chip */ 828 829 DELAY(100000); 830 for (i = 0; i < RL_TIMEOUT; i++) { 831 status = CSR_READ_2(sc, RL_ISR); 832 CSR_WRITE_2(sc, RL_ISR, status); 833 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == 834 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) 835 break; 836 DELAY(10); 837 } 838 839 if (i == RL_TIMEOUT) { 840 device_printf(sc->rl_dev, 841 "diagnostic failed, failed to receive packet in" 842 " loopback mode\n"); 843 error = EIO; 844 goto done; 845 } 846 847 /* 848 * The packet should have been dumped into the first 849 * entry in the RX DMA ring. Grab it from there. 850 */ 851 852 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 853 sc->rl_ldata.rl_rx_list_map, 854 BUS_DMASYNC_POSTREAD); 855 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, 856 sc->rl_ldata.rl_rx_desc[0].rx_dmamap, 857 BUS_DMASYNC_POSTREAD); 858 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, 859 sc->rl_ldata.rl_rx_desc[0].rx_dmamap); 860 861 m0 = sc->rl_ldata.rl_rx_desc[0].rx_m; 862 sc->rl_ldata.rl_rx_desc[0].rx_m = NULL; 863 eh = mtod(m0, struct ether_header *); 864 865 cur_rx = &sc->rl_ldata.rl_rx_list[0]; 866 total_len = RL_RXBYTES(cur_rx); 867 rxstat = le32toh(cur_rx->rl_cmdstat); 868 869 if (total_len != ETHER_MIN_LEN) { 870 device_printf(sc->rl_dev, 871 "diagnostic failed, received short packet\n"); 872 error = EIO; 873 goto done; 874 } 875 876 /* Test that the received packet data matches what we sent. */ 877 878 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || 879 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || 880 ntohs(eh->ether_type) != ETHERTYPE_IP) { 881 device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n"); 882 device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n", 883 dst, ":", src, ":", ETHERTYPE_IP); 884 device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n", 885 eh->ether_dhost, ":", eh->ether_shost, ":", 886 ntohs(eh->ether_type)); 887 device_printf(sc->rl_dev, "You may have a defective 32-bit " 888 "NIC plugged into a 64-bit PCI slot.\n"); 889 device_printf(sc->rl_dev, "Please re-install the NIC in a " 890 "32-bit slot for proper operation.\n"); 891 device_printf(sc->rl_dev, "Read the re(4) man page for more " 892 "details.\n"); 893 error = EIO; 894 } 895 896done: 897 /* Turn interface off, release resources */ 898 899 sc->rl_testmode = 0; 900 sc->rl_flags &= ~RL_FLAG_LINK; 901 ifp->if_flags &= ~IFF_PROMISC; 902 re_stop(sc); 903 if (m0 != NULL) 904 m_freem(m0); 905 906 RL_UNLOCK(sc); 907 908 return (error); 909} 910 911#endif 912 913/* 914 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device 915 * IDs against our list and return a device name if we find a match. 916 */ 917static int 918re_probe(device_t dev) 919{ 920 const struct rl_type *t; 921 uint16_t devid, vendor; 922 uint16_t revid, sdevid; 923 int i; 924 925 vendor = pci_get_vendor(dev); 926 devid = pci_get_device(dev); 927 revid = pci_get_revid(dev); 928 sdevid = pci_get_subdevice(dev); 929 930 if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) { 931 if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) { 932 /* 933 * Only attach to rev. 3 of the Linksys EG1032 adapter. 934 * Rev. 2 is supported by sk(4). 935 */ 936 return (ENXIO); 937 } 938 } 939 940 if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) { 941 if (revid != 0x20) { 942 /* 8139, let rl(4) take care of this device. */ 943 return (ENXIO); 944 } 945 } 946 947 t = re_devs; 948 for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) { 949 if (vendor == t->rl_vid && devid == t->rl_did) { 950 device_set_desc(dev, t->rl_name); 951 return (BUS_PROBE_DEFAULT); 952 } 953 } 954 955 return (ENXIO); 956} 957 958/* 959 * Map a single buffer address. 960 */ 961 962static void 963re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 964{ 965 bus_addr_t *addr; 966 967 if (error) 968 return; 969 970 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 971 addr = arg; 972 *addr = segs->ds_addr; 973} 974 975static int 976re_allocmem(device_t dev, struct rl_softc *sc) 977{ 978 bus_addr_t lowaddr; 979 bus_size_t rx_list_size, tx_list_size; 980 int error; 981 int i; 982 983 rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc); 984 tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc); 985 986 /* 987 * Allocate the parent bus DMA tag appropriate for PCI. 988 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD 989 * register should be set. However some RealTek chips are known 990 * to be buggy on DAC handling, therefore disable DAC by limiting 991 * DMA address space to 32bit. PCIe variants of RealTek chips 992 * may not have the limitation. 993 */ 994 lowaddr = BUS_SPACE_MAXADDR; 995 if ((sc->rl_flags & RL_FLAG_PCIE) == 0) 996 lowaddr = BUS_SPACE_MAXADDR_32BIT; 997 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, 998 lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, 999 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 1000 NULL, NULL, &sc->rl_parent_tag); 1001 if (error) { 1002 device_printf(dev, "could not allocate parent DMA tag\n"); 1003 return (error); 1004 } 1005 1006 /* 1007 * Allocate map for TX mbufs. 1008 */ 1009 error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0, 1010 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1011 NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0, 1012 NULL, NULL, &sc->rl_ldata.rl_tx_mtag); 1013 if (error) { 1014 device_printf(dev, "could not allocate TX DMA tag\n"); 1015 return (error); 1016 } 1017 1018 /* 1019 * Allocate map for RX mbufs. 1020 */ 1021 1022 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 1023 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 1024 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1025 MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL, 1026 &sc->rl_ldata.rl_jrx_mtag); 1027 if (error) { 1028 device_printf(dev, 1029 "could not allocate jumbo RX DMA tag\n"); 1030 return (error); 1031 } 1032 } 1033 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, 1034 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1035 MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag); 1036 if (error) { 1037 device_printf(dev, "could not allocate RX DMA tag\n"); 1038 return (error); 1039 } 1040 1041 /* 1042 * Allocate map for TX descriptor list. 1043 */ 1044 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1045 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1046 NULL, tx_list_size, 1, tx_list_size, 0, 1047 NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); 1048 if (error) { 1049 device_printf(dev, "could not allocate TX DMA ring tag\n"); 1050 return (error); 1051 } 1052 1053 /* Allocate DMA'able memory for the TX ring */ 1054 1055 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, 1056 (void **)&sc->rl_ldata.rl_tx_list, 1057 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1058 &sc->rl_ldata.rl_tx_list_map); 1059 if (error) { 1060 device_printf(dev, "could not allocate TX DMA ring\n"); 1061 return (error); 1062 } 1063 1064 /* Load the map for the TX ring. */ 1065 1066 sc->rl_ldata.rl_tx_list_addr = 0; 1067 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, 1068 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 1069 tx_list_size, re_dma_map_addr, 1070 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); 1071 if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) { 1072 device_printf(dev, "could not load TX DMA ring\n"); 1073 return (ENOMEM); 1074 } 1075 1076 /* Create DMA maps for TX buffers */ 1077 1078 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1079 error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0, 1080 &sc->rl_ldata.rl_tx_desc[i].tx_dmamap); 1081 if (error) { 1082 device_printf(dev, "could not create DMA map for TX\n"); 1083 return (error); 1084 } 1085 } 1086 1087 /* 1088 * Allocate map for RX descriptor list. 1089 */ 1090 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1091 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1092 NULL, rx_list_size, 1, rx_list_size, 0, 1093 NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); 1094 if (error) { 1095 device_printf(dev, "could not create RX DMA ring tag\n"); 1096 return (error); 1097 } 1098 1099 /* Allocate DMA'able memory for the RX ring */ 1100 1101 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, 1102 (void **)&sc->rl_ldata.rl_rx_list, 1103 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1104 &sc->rl_ldata.rl_rx_list_map); 1105 if (error) { 1106 device_printf(dev, "could not allocate RX DMA ring\n"); 1107 return (error); 1108 } 1109 1110 /* Load the map for the RX ring. */ 1111 1112 sc->rl_ldata.rl_rx_list_addr = 0; 1113 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, 1114 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1115 rx_list_size, re_dma_map_addr, 1116 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); 1117 if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) { 1118 device_printf(dev, "could not load RX DMA ring\n"); 1119 return (ENOMEM); 1120 } 1121 1122 /* Create DMA maps for RX buffers */ 1123 1124 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 1125 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, 1126 &sc->rl_ldata.rl_jrx_sparemap); 1127 if (error) { 1128 device_printf(dev, 1129 "could not create spare DMA map for jumbo RX\n"); 1130 return (error); 1131 } 1132 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1133 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, 1134 &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); 1135 if (error) { 1136 device_printf(dev, 1137 "could not create DMA map for jumbo RX\n"); 1138 return (error); 1139 } 1140 } 1141 } 1142 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, 1143 &sc->rl_ldata.rl_rx_sparemap); 1144 if (error) { 1145 device_printf(dev, "could not create spare DMA map for RX\n"); 1146 return (error); 1147 } 1148 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1149 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, 1150 &sc->rl_ldata.rl_rx_desc[i].rx_dmamap); 1151 if (error) { 1152 device_printf(dev, "could not create DMA map for RX\n"); 1153 return (error); 1154 } 1155 } 1156 1157 /* Create DMA map for statistics. */ 1158 error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0, 1159 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1160 sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL, 1161 &sc->rl_ldata.rl_stag); 1162 if (error) { 1163 device_printf(dev, "could not create statistics DMA tag\n"); 1164 return (error); 1165 } 1166 /* Allocate DMA'able memory for statistics. */ 1167 error = bus_dmamem_alloc(sc->rl_ldata.rl_stag, 1168 (void **)&sc->rl_ldata.rl_stats, 1169 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1170 &sc->rl_ldata.rl_smap); 1171 if (error) { 1172 device_printf(dev, 1173 "could not allocate statistics DMA memory\n"); 1174 return (error); 1175 } 1176 /* Load the map for statistics. */ 1177 sc->rl_ldata.rl_stats_addr = 0; 1178 error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, 1179 sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr, 1180 &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT); 1181 if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) { 1182 device_printf(dev, "could not load statistics DMA memory\n"); 1183 return (ENOMEM); 1184 } 1185 1186 return (0); 1187} 1188 1189/* 1190 * Attach the interface. Allocate softc structures, do ifmedia 1191 * setup and ethernet/BPF attach. 1192 */ 1193static int 1194re_attach(device_t dev) 1195{ 1196 u_char eaddr[ETHER_ADDR_LEN]; 1197 u_int16_t as[ETHER_ADDR_LEN / 2]; 1198 struct rl_softc *sc; 1199 struct ifnet *ifp; 1200 const struct rl_hwrev *hw_rev; 1201 u_int32_t cap, ctl; 1202 int hwrev; 1203 u_int16_t devid, re_did = 0; 1204 int error = 0, i, phy, rid; 1205 int msic, msixc, reg; 1206 uint8_t cfg; 1207 1208 sc = device_get_softc(dev); 1209 sc->rl_dev = dev; 1210 1211 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1212 MTX_DEF); 1213 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); 1214 1215 /* 1216 * Map control/status registers. 1217 */ 1218 pci_enable_busmaster(dev); 1219 1220 devid = pci_get_device(dev); 1221 /* 1222 * Prefer memory space register mapping over IO space. 1223 * Because RTL8169SC does not seem to work when memory mapping 1224 * is used always activate io mapping. 1225 */ 1226 if (devid == RT_DEVICEID_8169SC) 1227 prefer_iomap = 1; 1228 if (prefer_iomap == 0) { 1229 sc->rl_res_id = PCIR_BAR(1); 1230 sc->rl_res_type = SYS_RES_MEMORY; 1231 /* RTL8168/8101E seems to use different BARs. */ 1232 if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E) 1233 sc->rl_res_id = PCIR_BAR(2); 1234 } else { 1235 sc->rl_res_id = PCIR_BAR(0); 1236 sc->rl_res_type = SYS_RES_IOPORT; 1237 } 1238 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 1239 &sc->rl_res_id, RF_ACTIVE); 1240 if (sc->rl_res == NULL && prefer_iomap == 0) { 1241 sc->rl_res_id = PCIR_BAR(0); 1242 sc->rl_res_type = SYS_RES_IOPORT; 1243 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 1244 &sc->rl_res_id, RF_ACTIVE); 1245 } 1246 if (sc->rl_res == NULL) { 1247 device_printf(dev, "couldn't map ports/memory\n"); 1248 error = ENXIO; 1249 goto fail; 1250 } 1251 1252 sc->rl_btag = rman_get_bustag(sc->rl_res); 1253 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 1254 1255 msic = pci_msi_count(dev); 1256 msixc = pci_msix_count(dev); 1257 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { 1258 sc->rl_flags |= RL_FLAG_PCIE; 1259 sc->rl_expcap = reg; 1260 } 1261 if (bootverbose) { 1262 device_printf(dev, "MSI count : %d\n", msic); 1263 device_printf(dev, "MSI-X count : %d\n", msixc); 1264 } 1265 if (msix_disable > 0) 1266 msixc = 0; 1267 if (msi_disable > 0) 1268 msic = 0; 1269 /* Prefer MSI-X to MSI. */ 1270 if (msixc > 0) { 1271 msixc = RL_MSI_MESSAGES; 1272 rid = PCIR_BAR(4); 1273 sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1274 &rid, RF_ACTIVE); 1275 if (sc->rl_res_pba == NULL) { 1276 device_printf(sc->rl_dev, 1277 "could not allocate MSI-X PBA resource\n"); 1278 } 1279 if (sc->rl_res_pba != NULL && 1280 pci_alloc_msix(dev, &msixc) == 0) { 1281 if (msixc == RL_MSI_MESSAGES) { 1282 device_printf(dev, "Using %d MSI-X message\n", 1283 msixc); 1284 sc->rl_flags |= RL_FLAG_MSIX; 1285 } else 1286 pci_release_msi(dev); 1287 } 1288 if ((sc->rl_flags & RL_FLAG_MSIX) == 0) { 1289 if (sc->rl_res_pba != NULL) 1290 bus_release_resource(dev, SYS_RES_MEMORY, rid, 1291 sc->rl_res_pba); 1292 sc->rl_res_pba = NULL; 1293 msixc = 0; 1294 } 1295 } 1296 /* Prefer MSI to INTx. */ 1297 if (msixc == 0 && msic > 0) { 1298 msic = RL_MSI_MESSAGES; 1299 if (pci_alloc_msi(dev, &msic) == 0) { 1300 if (msic == RL_MSI_MESSAGES) { 1301 device_printf(dev, "Using %d MSI message\n", 1302 msic); 1303 sc->rl_flags |= RL_FLAG_MSI; 1304 /* Explicitly set MSI enable bit. */ 1305 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1306 cfg = CSR_READ_1(sc, RL_CFG2); 1307 cfg |= RL_CFG2_MSI; 1308 CSR_WRITE_1(sc, RL_CFG2, cfg); 1309 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1310 } else 1311 pci_release_msi(dev); 1312 } 1313 if ((sc->rl_flags & RL_FLAG_MSI) == 0) 1314 msic = 0; 1315 } 1316 1317 /* Allocate interrupt */ 1318 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) { 1319 rid = 0; 1320 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1321 RF_SHAREABLE | RF_ACTIVE); 1322 if (sc->rl_irq[0] == NULL) { 1323 device_printf(dev, "couldn't allocate IRQ resources\n"); 1324 error = ENXIO; 1325 goto fail; 1326 } 1327 } else { 1328 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { 1329 sc->rl_irq[i] = bus_alloc_resource_any(dev, 1330 SYS_RES_IRQ, &rid, RF_ACTIVE); 1331 if (sc->rl_irq[i] == NULL) { 1332 device_printf(dev, 1333 "couldn't allocate IRQ resources for " 1334 "message %d\n", rid); 1335 error = ENXIO; 1336 goto fail; 1337 } 1338 } 1339 } 1340 1341 if ((sc->rl_flags & RL_FLAG_MSI) == 0) { 1342 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1343 cfg = CSR_READ_1(sc, RL_CFG2); 1344 if ((cfg & RL_CFG2_MSI) != 0) { 1345 device_printf(dev, "turning off MSI enable bit.\n"); 1346 cfg &= ~RL_CFG2_MSI; 1347 CSR_WRITE_1(sc, RL_CFG2, cfg); 1348 } 1349 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1350 } 1351 1352 /* Disable ASPM L0S/L1. */ 1353 if (sc->rl_expcap != 0) { 1354 cap = pci_read_config(dev, sc->rl_expcap + 1355 PCIER_LINK_CAP, 2); 1356 if ((cap & PCIEM_LINK_CAP_ASPM) != 0) { 1357 ctl = pci_read_config(dev, sc->rl_expcap + 1358 PCIER_LINK_CTL, 2); 1359 if ((ctl & PCIEM_LINK_CTL_ASPMC) != 0) { 1360 ctl &= ~PCIEM_LINK_CTL_ASPMC; 1361 pci_write_config(dev, sc->rl_expcap + 1362 PCIER_LINK_CTL, ctl, 2); 1363 device_printf(dev, "ASPM disabled\n"); 1364 } 1365 } else 1366 device_printf(dev, "no ASPM capability\n"); 1367 } 1368 1369 hw_rev = re_hwrevs; 1370 hwrev = CSR_READ_4(sc, RL_TXCFG); 1371 switch (hwrev & 0x70000000) { 1372 case 0x00000000: 1373 case 0x10000000: 1374 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000); 1375 hwrev &= (RL_TXCFG_HWREV | 0x80000000); 1376 break; 1377 default: 1378 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000); 1379 sc->rl_macrev = hwrev & 0x00700000; 1380 hwrev &= RL_TXCFG_HWREV; 1381 break; 1382 } 1383 device_printf(dev, "MAC rev. 0x%08x\n", sc->rl_macrev); 1384 while (hw_rev->rl_desc != NULL) { 1385 if (hw_rev->rl_rev == hwrev) { 1386 sc->rl_type = hw_rev->rl_type; 1387 sc->rl_hwrev = hw_rev; 1388 break; 1389 } 1390 hw_rev++; 1391 } 1392 if (hw_rev->rl_desc == NULL) { 1393 device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev); 1394 error = ENXIO; 1395 goto fail; 1396 } 1397 1398 switch (hw_rev->rl_rev) { 1399 case RL_HWREV_8139CPLUS: 1400 sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; 1401 break; 1402 case RL_HWREV_8100E: 1403 case RL_HWREV_8101E: 1404 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; 1405 break; 1406 case RL_HWREV_8102E: 1407 case RL_HWREV_8102EL: 1408 case RL_HWREV_8102EL_SPIN1: 1409 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | 1410 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | 1411 RL_FLAG_AUTOPAD; 1412 break; 1413 case RL_HWREV_8103E: 1414 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | 1415 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | 1416 RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP; 1417 break; 1418 case RL_HWREV_8401E: 1419 case RL_HWREV_8105E: 1420 case RL_HWREV_8105E_SPIN1: 1421 case RL_HWREV_8106E: 1422 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1423 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1424 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 1425 break; 1426 case RL_HWREV_8402: 1427 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1428 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1429 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | 1430 RL_FLAG_CMDSTOP_WAIT_TXQ; 1431 break; 1432 case RL_HWREV_8168B_SPIN1: 1433 case RL_HWREV_8168B_SPIN2: 1434 sc->rl_flags |= RL_FLAG_WOLRXENB; 1435 /* FALLTHROUGH */ 1436 case RL_HWREV_8168B_SPIN3: 1437 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; 1438 break; 1439 case RL_HWREV_8168C_SPIN2: 1440 sc->rl_flags |= RL_FLAG_MACSLEEP; 1441 /* FALLTHROUGH */ 1442 case RL_HWREV_8168C: 1443 if (sc->rl_macrev == 0x00200000) 1444 sc->rl_flags |= RL_FLAG_MACSLEEP; 1445 /* FALLTHROUGH */ 1446 case RL_HWREV_8168CP: 1447 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1448 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 1449 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 1450 break; 1451 case RL_HWREV_8168D: 1452 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1453 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1454 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 1455 RL_FLAG_WOL_MANLINK; 1456 break; 1457 case RL_HWREV_8168DP: 1458 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1459 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD | 1460 RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK; 1461 break; 1462 case RL_HWREV_8168E: 1463 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1464 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1465 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 1466 RL_FLAG_WOL_MANLINK; 1467 break; 1468 case RL_HWREV_8168E_VL: 1469 case RL_HWREV_8168F: 1470 sc->rl_flags |= RL_FLAG_EARLYOFF; 1471 /* FALLTHROUGH */ 1472 case RL_HWREV_8411: 1473 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1474 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 1475 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 1476 RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK; 1477 break; 1478 case RL_HWREV_8168EP: 1479 case RL_HWREV_8168G: 1480 case RL_HWREV_8411B: 1481 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1482 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 1483 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 1484 RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK | 1485 RL_FLAG_EARLYOFFV2 | RL_FLAG_RXDV_GATED; 1486 break; 1487 case RL_HWREV_8168GU: 1488 if (pci_get_device(dev) == RT_DEVICEID_8101E) { 1489 /* RTL8106EUS */ 1490 sc->rl_flags |= RL_FLAG_FASTETHER; 1491 } else 1492 sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 1493 1494 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1495 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 1496 RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ | 1497 RL_FLAG_EARLYOFFV2 | RL_FLAG_RXDV_GATED; 1498 break; 1499 case RL_HWREV_8169_8110SB: 1500 case RL_HWREV_8169_8110SBL: 1501 case RL_HWREV_8169_8110SC: 1502 case RL_HWREV_8169_8110SCE: 1503 sc->rl_flags |= RL_FLAG_PHYWAKE; 1504 /* FALLTHROUGH */ 1505 case RL_HWREV_8169: 1506 case RL_HWREV_8169S: 1507 case RL_HWREV_8110S: 1508 sc->rl_flags |= RL_FLAG_MACRESET; 1509 break; 1510 default: 1511 break; 1512 } 1513 1514 if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) { 1515 sc->rl_cfg0 = RL_8139_CFG0; 1516 sc->rl_cfg1 = RL_8139_CFG1; 1517 sc->rl_cfg2 = 0; 1518 sc->rl_cfg3 = RL_8139_CFG3; 1519 sc->rl_cfg4 = RL_8139_CFG4; 1520 sc->rl_cfg5 = RL_8139_CFG5; 1521 } else { 1522 sc->rl_cfg0 = RL_CFG0; 1523 sc->rl_cfg1 = RL_CFG1; 1524 sc->rl_cfg2 = RL_CFG2; 1525 sc->rl_cfg3 = RL_CFG3; 1526 sc->rl_cfg4 = RL_CFG4; 1527 sc->rl_cfg5 = RL_CFG5; 1528 } 1529 1530 /* Reset the adapter. */ 1531 RL_LOCK(sc); 1532 re_reset(sc); 1533 RL_UNLOCK(sc); 1534 1535 /* Enable PME. */ 1536 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1537 cfg = CSR_READ_1(sc, sc->rl_cfg1); 1538 cfg |= RL_CFG1_PME; 1539 CSR_WRITE_1(sc, sc->rl_cfg1, cfg); 1540 cfg = CSR_READ_1(sc, sc->rl_cfg5); 1541 cfg &= RL_CFG5_PME_STS; 1542 CSR_WRITE_1(sc, sc->rl_cfg5, cfg); 1543 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1544 1545 if ((sc->rl_flags & RL_FLAG_PAR) != 0) { 1546 /* 1547 * XXX Should have a better way to extract station 1548 * address from EEPROM. 1549 */ 1550 for (i = 0; i < ETHER_ADDR_LEN; i++) 1551 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 1552 } else { 1553 sc->rl_eewidth = RL_9356_ADDR_LEN; 1554 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 1555 if (re_did != 0x8129) 1556 sc->rl_eewidth = RL_9346_ADDR_LEN; 1557 1558 /* 1559 * Get station address from the EEPROM. 1560 */ 1561 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 1562 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 1563 as[i] = le16toh(as[i]); 1564 bcopy(as, eaddr, ETHER_ADDR_LEN); 1565 } 1566 1567 if (sc->rl_type == RL_8169) { 1568 /* Set RX length mask and number of descriptors. */ 1569 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 1570 sc->rl_txstart = RL_GTXSTART; 1571 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; 1572 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; 1573 } else { 1574 /* Set RX length mask and number of descriptors. */ 1575 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 1576 sc->rl_txstart = RL_TXSTART; 1577 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; 1578 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; 1579 } 1580 1581 error = re_allocmem(dev, sc); 1582 if (error) 1583 goto fail; 1584 re_add_sysctls(sc); 1585 1586 ifp = sc->rl_ifp = if_alloc(IFT_ETHER); 1587 if (ifp == NULL) { 1588 device_printf(dev, "can not if_alloc()\n"); 1589 error = ENOSPC; 1590 goto fail; 1591 } 1592 1593 /* Take controller out of deep sleep mode. */ 1594 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 1595 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 1596 CSR_WRITE_1(sc, RL_GPIO, 1597 CSR_READ_1(sc, RL_GPIO) | 0x01); 1598 else 1599 CSR_WRITE_1(sc, RL_GPIO, 1600 CSR_READ_1(sc, RL_GPIO) & ~0x01); 1601 } 1602 1603 /* Take PHY out of power down mode. */ 1604 if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) { 1605 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); 1606 if (hw_rev->rl_rev == RL_HWREV_8401E) 1607 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); 1608 } 1609 if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) { 1610 re_gmii_writereg(dev, 1, 0x1f, 0); 1611 re_gmii_writereg(dev, 1, 0x0e, 0); 1612 } 1613 1614 ifp->if_softc = sc; 1615 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1616 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1617 ifp->if_ioctl = re_ioctl; 1618 ifp->if_start = re_start; 1619 /* 1620 * RTL8168/8111C generates wrong IP checksummed frame if the 1621 * packet has IP options so disable TX IP checksum offloading. 1622 */ 1623 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C || 1624 sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2 || 1625 sc->rl_hwrev->rl_rev == RL_HWREV_8168CP) 1626 ifp->if_hwassist = CSUM_TCP | CSUM_UDP; 1627 else 1628 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; 1629 ifp->if_hwassist |= CSUM_TSO; 1630 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; 1631 ifp->if_capenable = ifp->if_capabilities; 1632 ifp->if_init = re_init; 1633 IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN); 1634 ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; 1635 IFQ_SET_READY(&ifp->if_snd); 1636 1637 TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); 1638 1639#define RE_PHYAD_INTERNAL 0 1640 1641 /* Do MII setup. */ 1642 phy = RE_PHYAD_INTERNAL; 1643 if (sc->rl_type == RL_8169) 1644 phy = 1; 1645 error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd, 1646 re_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, MIIF_DOPAUSE); 1647 if (error != 0) { 1648 device_printf(dev, "attaching PHYs failed\n"); 1649 goto fail; 1650 } 1651 1652 /* 1653 * Call MI attach routine. 1654 */ 1655 ether_ifattach(ifp, eaddr); 1656 1657 /* VLAN capability setup */ 1658 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 1659 if (ifp->if_capabilities & IFCAP_HWCSUM) 1660 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1661 /* Enable WOL if PM is supported. */ 1662 if (pci_find_cap(sc->rl_dev, PCIY_PMG, ®) == 0) 1663 ifp->if_capabilities |= IFCAP_WOL; 1664 ifp->if_capenable = ifp->if_capabilities; 1665 ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST); 1666 /* 1667 * Don't enable TSO by default. It is known to generate 1668 * corrupted TCP segments(bad TCP options) under certain 1669 * circumstances. 1670 */ 1671 ifp->if_hwassist &= ~CSUM_TSO; 1672 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO); 1673#ifdef DEVICE_POLLING 1674 ifp->if_capabilities |= IFCAP_POLLING; 1675#endif 1676 /* 1677 * Tell the upper layer(s) we support long frames. 1678 * Must appear after the call to ether_ifattach() because 1679 * ether_ifattach() sets ifi_hdrlen to the default value. 1680 */ 1681 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1682 1683#ifdef DEV_NETMAP 1684 re_netmap_attach(sc); 1685#endif /* DEV_NETMAP */ 1686#ifdef RE_DIAG 1687 /* 1688 * Perform hardware diagnostic on the original RTL8169. 1689 * Some 32-bit cards were incorrectly wired and would 1690 * malfunction if plugged into a 64-bit slot. 1691 */ 1692 1693 if (hwrev == RL_HWREV_8169) { 1694 error = re_diag(sc); 1695 if (error) { 1696 device_printf(dev, 1697 "attach aborted due to hardware diag failure\n"); 1698 ether_ifdetach(ifp); 1699 goto fail; 1700 } 1701 } 1702#endif 1703 1704#ifdef RE_TX_MODERATION 1705 intr_filter = 1; 1706#endif 1707 /* Hook interrupt last to avoid having to lock softc */ 1708 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && 1709 intr_filter == 0) { 1710 error = bus_setup_intr(dev, sc->rl_irq[0], 1711 INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc, 1712 &sc->rl_intrhand[0]); 1713 } else { 1714 error = bus_setup_intr(dev, sc->rl_irq[0], 1715 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, 1716 &sc->rl_intrhand[0]); 1717 } 1718 if (error) { 1719 device_printf(dev, "couldn't set up irq\n"); 1720 ether_ifdetach(ifp); 1721 } 1722 1723fail: 1724 1725 if (error) 1726 re_detach(dev); 1727 1728 return (error); 1729} 1730 1731/* 1732 * Shutdown hardware and free up resources. This can be called any 1733 * time after the mutex has been initialized. It is called in both 1734 * the error case in attach and the normal detach case so it needs 1735 * to be careful about only freeing resources that have actually been 1736 * allocated. 1737 */ 1738static int 1739re_detach(device_t dev) 1740{ 1741 struct rl_softc *sc; 1742 struct ifnet *ifp; 1743 int i, rid; 1744 1745 sc = device_get_softc(dev); 1746 ifp = sc->rl_ifp; 1747 KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); 1748 1749 /* These should only be active if attach succeeded */ 1750 if (device_is_attached(dev)) { 1751#ifdef DEVICE_POLLING 1752 if (ifp->if_capenable & IFCAP_POLLING) 1753 ether_poll_deregister(ifp); 1754#endif 1755 RL_LOCK(sc); 1756#if 0 1757 sc->suspended = 1; 1758#endif 1759 re_stop(sc); 1760 RL_UNLOCK(sc); 1761 callout_drain(&sc->rl_stat_callout); 1762 taskqueue_drain(taskqueue_fast, &sc->rl_inttask); 1763 /* 1764 * Force off the IFF_UP flag here, in case someone 1765 * still had a BPF descriptor attached to this 1766 * interface. If they do, ether_ifdetach() will cause 1767 * the BPF code to try and clear the promisc mode 1768 * flag, which will bubble down to re_ioctl(), 1769 * which will try to call re_init() again. This will 1770 * turn the NIC back on and restart the MII ticker, 1771 * which will panic the system when the kernel tries 1772 * to invoke the re_tick() function that isn't there 1773 * anymore. 1774 */ 1775 ifp->if_flags &= ~IFF_UP; 1776 ether_ifdetach(ifp); 1777 } 1778 if (sc->rl_miibus) 1779 device_delete_child(dev, sc->rl_miibus); 1780 bus_generic_detach(dev); 1781 1782 /* 1783 * The rest is resource deallocation, so we should already be 1784 * stopped here. 1785 */ 1786 1787 if (sc->rl_intrhand[0] != NULL) { 1788 bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]); 1789 sc->rl_intrhand[0] = NULL; 1790 } 1791 if (ifp != NULL) { 1792#ifdef DEV_NETMAP 1793 netmap_detach(ifp); 1794#endif /* DEV_NETMAP */ 1795 if_free(ifp); 1796 } 1797 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) 1798 rid = 0; 1799 else 1800 rid = 1; 1801 if (sc->rl_irq[0] != NULL) { 1802 bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]); 1803 sc->rl_irq[0] = NULL; 1804 } 1805 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0) 1806 pci_release_msi(dev); 1807 if (sc->rl_res_pba) { 1808 rid = PCIR_BAR(4); 1809 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba); 1810 } 1811 if (sc->rl_res) 1812 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id, 1813 sc->rl_res); 1814 1815 /* Unload and free the RX DMA ring memory and map */ 1816 1817 if (sc->rl_ldata.rl_rx_list_tag) { 1818 if (sc->rl_ldata.rl_rx_list_map) 1819 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, 1820 sc->rl_ldata.rl_rx_list_map); 1821 if (sc->rl_ldata.rl_rx_list_map && sc->rl_ldata.rl_rx_list) 1822 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, 1823 sc->rl_ldata.rl_rx_list, 1824 sc->rl_ldata.rl_rx_list_map); 1825 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); 1826 } 1827 1828 /* Unload and free the TX DMA ring memory and map */ 1829 1830 if (sc->rl_ldata.rl_tx_list_tag) { 1831 if (sc->rl_ldata.rl_tx_list_map) 1832 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, 1833 sc->rl_ldata.rl_tx_list_map); 1834 if (sc->rl_ldata.rl_tx_list_map && sc->rl_ldata.rl_tx_list) 1835 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, 1836 sc->rl_ldata.rl_tx_list, 1837 sc->rl_ldata.rl_tx_list_map); 1838 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); 1839 } 1840 1841 /* Destroy all the RX and TX buffer maps */ 1842 1843 if (sc->rl_ldata.rl_tx_mtag) { 1844 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1845 if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap) 1846 bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag, 1847 sc->rl_ldata.rl_tx_desc[i].tx_dmamap); 1848 } 1849 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag); 1850 } 1851 if (sc->rl_ldata.rl_rx_mtag) { 1852 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1853 if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap) 1854 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, 1855 sc->rl_ldata.rl_rx_desc[i].rx_dmamap); 1856 } 1857 if (sc->rl_ldata.rl_rx_sparemap) 1858 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, 1859 sc->rl_ldata.rl_rx_sparemap); 1860 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag); 1861 } 1862 if (sc->rl_ldata.rl_jrx_mtag) { 1863 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1864 if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap) 1865 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, 1866 sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); 1867 } 1868 if (sc->rl_ldata.rl_jrx_sparemap) 1869 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, 1870 sc->rl_ldata.rl_jrx_sparemap); 1871 bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag); 1872 } 1873 /* Unload and free the stats buffer and map */ 1874 1875 if (sc->rl_ldata.rl_stag) { 1876 if (sc->rl_ldata.rl_smap) 1877 bus_dmamap_unload(sc->rl_ldata.rl_stag, 1878 sc->rl_ldata.rl_smap); 1879 if (sc->rl_ldata.rl_smap && sc->rl_ldata.rl_stats) 1880 bus_dmamem_free(sc->rl_ldata.rl_stag, 1881 sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap); 1882 bus_dma_tag_destroy(sc->rl_ldata.rl_stag); 1883 } 1884 1885 if (sc->rl_parent_tag) 1886 bus_dma_tag_destroy(sc->rl_parent_tag); 1887 1888 mtx_destroy(&sc->rl_mtx); 1889 1890 return (0); 1891} 1892 1893static __inline void 1894re_discard_rxbuf(struct rl_softc *sc, int idx) 1895{ 1896 struct rl_desc *desc; 1897 struct rl_rxdesc *rxd; 1898 uint32_t cmdstat; 1899 1900 if (sc->rl_ifp->if_mtu > RL_MTU && 1901 (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) 1902 rxd = &sc->rl_ldata.rl_jrx_desc[idx]; 1903 else 1904 rxd = &sc->rl_ldata.rl_rx_desc[idx]; 1905 desc = &sc->rl_ldata.rl_rx_list[idx]; 1906 desc->rl_vlanctl = 0; 1907 cmdstat = rxd->rx_size; 1908 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1909 cmdstat |= RL_RDESC_CMD_EOR; 1910 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1911} 1912 1913static int 1914re_newbuf(struct rl_softc *sc, int idx) 1915{ 1916 struct mbuf *m; 1917 struct rl_rxdesc *rxd; 1918 bus_dma_segment_t segs[1]; 1919 bus_dmamap_t map; 1920 struct rl_desc *desc; 1921 uint32_t cmdstat; 1922 int error, nsegs; 1923 1924 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1925 if (m == NULL) 1926 return (ENOBUFS); 1927 1928 m->m_len = m->m_pkthdr.len = MCLBYTES; 1929#ifdef RE_FIXUP_RX 1930 /* 1931 * This is part of an evil trick to deal with non-x86 platforms. 1932 * The RealTek chip requires RX buffers to be aligned on 64-bit 1933 * boundaries, but that will hose non-x86 machines. To get around 1934 * this, we leave some empty space at the start of each buffer 1935 * and for non-x86 hosts, we copy the buffer back six bytes 1936 * to achieve word alignment. This is slightly more efficient 1937 * than allocating a new buffer, copying the contents, and 1938 * discarding the old buffer. 1939 */ 1940 m_adj(m, RE_ETHER_ALIGN); 1941#endif 1942 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag, 1943 sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); 1944 if (error != 0) { 1945 m_freem(m); 1946 return (ENOBUFS); 1947 } 1948 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); 1949 1950 rxd = &sc->rl_ldata.rl_rx_desc[idx]; 1951 if (rxd->rx_m != NULL) { 1952 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, 1953 BUS_DMASYNC_POSTREAD); 1954 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); 1955 } 1956 1957 rxd->rx_m = m; 1958 map = rxd->rx_dmamap; 1959 rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap; 1960 rxd->rx_size = segs[0].ds_len; 1961 sc->rl_ldata.rl_rx_sparemap = map; 1962 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, 1963 BUS_DMASYNC_PREREAD); 1964 1965 desc = &sc->rl_ldata.rl_rx_list[idx]; 1966 desc->rl_vlanctl = 0; 1967 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); 1968 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); 1969 cmdstat = segs[0].ds_len; 1970 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1971 cmdstat |= RL_RDESC_CMD_EOR; 1972 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1973 1974 return (0); 1975} 1976 1977static int 1978re_jumbo_newbuf(struct rl_softc *sc, int idx) 1979{ 1980 struct mbuf *m; 1981 struct rl_rxdesc *rxd; 1982 bus_dma_segment_t segs[1]; 1983 bus_dmamap_t map; 1984 struct rl_desc *desc; 1985 uint32_t cmdstat; 1986 int error, nsegs; 1987 1988 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 1989 if (m == NULL) 1990 return (ENOBUFS); 1991 m->m_len = m->m_pkthdr.len = MJUM9BYTES; 1992#ifdef RE_FIXUP_RX 1993 m_adj(m, RE_ETHER_ALIGN); 1994#endif 1995 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag, 1996 sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); 1997 if (error != 0) { 1998 m_freem(m); 1999 return (ENOBUFS); 2000 } 2001 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); 2002 2003 rxd = &sc->rl_ldata.rl_jrx_desc[idx]; 2004 if (rxd->rx_m != NULL) { 2005 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, 2006 BUS_DMASYNC_POSTREAD); 2007 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap); 2008 } 2009 2010 rxd->rx_m = m; 2011 map = rxd->rx_dmamap; 2012 rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap; 2013 rxd->rx_size = segs[0].ds_len; 2014 sc->rl_ldata.rl_jrx_sparemap = map; 2015 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, 2016 BUS_DMASYNC_PREREAD); 2017 2018 desc = &sc->rl_ldata.rl_rx_list[idx]; 2019 desc->rl_vlanctl = 0; 2020 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); 2021 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); 2022 cmdstat = segs[0].ds_len; 2023 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 2024 cmdstat |= RL_RDESC_CMD_EOR; 2025 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 2026 2027 return (0); 2028} 2029 2030#ifdef RE_FIXUP_RX 2031static __inline void 2032re_fixup_rx(struct mbuf *m) 2033{ 2034 int i; 2035 uint16_t *src, *dst; 2036 2037 src = mtod(m, uint16_t *); 2038 dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src; 2039 2040 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 2041 *dst++ = *src++; 2042 2043 m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN; 2044} 2045#endif 2046 2047static int 2048re_tx_list_init(struct rl_softc *sc) 2049{ 2050 struct rl_desc *desc; 2051 int i; 2052 2053 RL_LOCK_ASSERT(sc); 2054 2055 bzero(sc->rl_ldata.rl_tx_list, 2056 sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)); 2057 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) 2058 sc->rl_ldata.rl_tx_desc[i].tx_m = NULL; 2059#ifdef DEV_NETMAP 2060 re_netmap_tx_init(sc); 2061#endif /* DEV_NETMAP */ 2062 /* Set EOR. */ 2063 desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1]; 2064 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR); 2065 2066 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2067 sc->rl_ldata.rl_tx_list_map, 2068 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2069 2070 sc->rl_ldata.rl_tx_prodidx = 0; 2071 sc->rl_ldata.rl_tx_considx = 0; 2072 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; 2073 2074 return (0); 2075} 2076 2077static int 2078re_rx_list_init(struct rl_softc *sc) 2079{ 2080 int error, i; 2081 2082 bzero(sc->rl_ldata.rl_rx_list, 2083 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); 2084 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 2085 sc->rl_ldata.rl_rx_desc[i].rx_m = NULL; 2086 if ((error = re_newbuf(sc, i)) != 0) 2087 return (error); 2088 } 2089#ifdef DEV_NETMAP 2090 re_netmap_rx_init(sc); 2091#endif /* DEV_NETMAP */ 2092 2093 /* Flush the RX descriptors */ 2094 2095 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2096 sc->rl_ldata.rl_rx_list_map, 2097 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2098 2099 sc->rl_ldata.rl_rx_prodidx = 0; 2100 sc->rl_head = sc->rl_tail = NULL; 2101 sc->rl_int_rx_act = 0; 2102 2103 return (0); 2104} 2105 2106static int 2107re_jrx_list_init(struct rl_softc *sc) 2108{ 2109 int error, i; 2110 2111 bzero(sc->rl_ldata.rl_rx_list, 2112 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); 2113 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 2114 sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL; 2115 if ((error = re_jumbo_newbuf(sc, i)) != 0) 2116 return (error); 2117 } 2118 2119 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2120 sc->rl_ldata.rl_rx_list_map, 2121 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2122 2123 sc->rl_ldata.rl_rx_prodidx = 0; 2124 sc->rl_head = sc->rl_tail = NULL; 2125 sc->rl_int_rx_act = 0; 2126 2127 return (0); 2128} 2129 2130/* 2131 * RX handler for C+ and 8169. For the gigE chips, we support 2132 * the reception of jumbo frames that have been fragmented 2133 * across multiple 2K mbuf cluster buffers. 2134 */ 2135static int 2136re_rxeof(struct rl_softc *sc, int *rx_npktsp) 2137{ 2138 struct mbuf *m; 2139 struct ifnet *ifp; 2140 int i, rxerr, total_len; 2141 struct rl_desc *cur_rx; 2142 u_int32_t rxstat, rxvlan; 2143 int jumbo, maxpkt = 16, rx_npkts = 0; 2144 2145 RL_LOCK_ASSERT(sc); 2146 2147 ifp = sc->rl_ifp; 2148#ifdef DEV_NETMAP 2149 if (netmap_rx_irq(ifp, 0, &rx_npkts)) 2150 return 0; 2151#endif /* DEV_NETMAP */ 2152 if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) 2153 jumbo = 1; 2154 else 2155 jumbo = 0; 2156 2157 /* Invalidate the descriptor memory */ 2158 2159 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2160 sc->rl_ldata.rl_rx_list_map, 2161 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2162 2163 for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0; 2164 i = RL_RX_DESC_NXT(sc, i)) { 2165 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2166 break; 2167 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 2168 rxstat = le32toh(cur_rx->rl_cmdstat); 2169 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 2170 break; 2171 total_len = rxstat & sc->rl_rxlenmask; 2172 rxvlan = le32toh(cur_rx->rl_vlanctl); 2173 if (jumbo != 0) 2174 m = sc->rl_ldata.rl_jrx_desc[i].rx_m; 2175 else 2176 m = sc->rl_ldata.rl_rx_desc[i].rx_m; 2177 2178 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 2179 (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) != 2180 (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) { 2181 /* 2182 * RTL8168C or later controllers do not 2183 * support multi-fragment packet. 2184 */ 2185 re_discard_rxbuf(sc, i); 2186 continue; 2187 } else if ((rxstat & RL_RDESC_STAT_EOF) == 0) { 2188 if (re_newbuf(sc, i) != 0) { 2189 /* 2190 * If this is part of a multi-fragment packet, 2191 * discard all the pieces. 2192 */ 2193 if (sc->rl_head != NULL) { 2194 m_freem(sc->rl_head); 2195 sc->rl_head = sc->rl_tail = NULL; 2196 } 2197 re_discard_rxbuf(sc, i); 2198 continue; 2199 } 2200 m->m_len = RE_RX_DESC_BUFLEN; 2201 if (sc->rl_head == NULL) 2202 sc->rl_head = sc->rl_tail = m; 2203 else { 2204 m->m_flags &= ~M_PKTHDR; 2205 sc->rl_tail->m_next = m; 2206 sc->rl_tail = m; 2207 } 2208 continue; 2209 } 2210 2211 /* 2212 * NOTE: for the 8139C+, the frame length field 2213 * is always 12 bits in size, but for the gigE chips, 2214 * it is 13 bits (since the max RX frame length is 16K). 2215 * Unfortunately, all 32 bits in the status word 2216 * were already used, so to make room for the extra 2217 * length bit, RealTek took out the 'frame alignment 2218 * error' bit and shifted the other status bits 2219 * over one slot. The OWN, EOR, FS and LS bits are 2220 * still in the same places. We have already extracted 2221 * the frame length and checked the OWN bit, so rather 2222 * than using an alternate bit mapping, we shift the 2223 * status bits one space to the right so we can evaluate 2224 * them using the 8169 status as though it was in the 2225 * same format as that of the 8139C+. 2226 */ 2227 if (sc->rl_type == RL_8169) 2228 rxstat >>= 1; 2229 2230 /* 2231 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 2232 * set, but if CRC is clear, it will still be a valid frame. 2233 */ 2234 if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) { 2235 rxerr = 1; 2236 if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 && 2237 total_len > 8191 && 2238 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT) 2239 rxerr = 0; 2240 if (rxerr != 0) { 2241 ifp->if_ierrors++; 2242 /* 2243 * If this is part of a multi-fragment packet, 2244 * discard all the pieces. 2245 */ 2246 if (sc->rl_head != NULL) { 2247 m_freem(sc->rl_head); 2248 sc->rl_head = sc->rl_tail = NULL; 2249 } 2250 re_discard_rxbuf(sc, i); 2251 continue; 2252 } 2253 } 2254 2255 /* 2256 * If allocating a replacement mbuf fails, 2257 * reload the current one. 2258 */ 2259 if (jumbo != 0) 2260 rxerr = re_jumbo_newbuf(sc, i); 2261 else 2262 rxerr = re_newbuf(sc, i); 2263 if (rxerr != 0) { 2264 ifp->if_iqdrops++; 2265 if (sc->rl_head != NULL) { 2266 m_freem(sc->rl_head); 2267 sc->rl_head = sc->rl_tail = NULL; 2268 } 2269 re_discard_rxbuf(sc, i); 2270 continue; 2271 } 2272 2273 if (sc->rl_head != NULL) { 2274 if (jumbo != 0) 2275 m->m_len = total_len; 2276 else { 2277 m->m_len = total_len % RE_RX_DESC_BUFLEN; 2278 if (m->m_len == 0) 2279 m->m_len = RE_RX_DESC_BUFLEN; 2280 } 2281 /* 2282 * Special case: if there's 4 bytes or less 2283 * in this buffer, the mbuf can be discarded: 2284 * the last 4 bytes is the CRC, which we don't 2285 * care about anyway. 2286 */ 2287 if (m->m_len <= ETHER_CRC_LEN) { 2288 sc->rl_tail->m_len -= 2289 (ETHER_CRC_LEN - m->m_len); 2290 m_freem(m); 2291 } else { 2292 m->m_len -= ETHER_CRC_LEN; 2293 m->m_flags &= ~M_PKTHDR; 2294 sc->rl_tail->m_next = m; 2295 } 2296 m = sc->rl_head; 2297 sc->rl_head = sc->rl_tail = NULL; 2298 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 2299 } else 2300 m->m_pkthdr.len = m->m_len = 2301 (total_len - ETHER_CRC_LEN); 2302 2303#ifdef RE_FIXUP_RX 2304 re_fixup_rx(m); 2305#endif 2306 ifp->if_ipackets++; 2307 m->m_pkthdr.rcvif = ifp; 2308 2309 /* Do RX checksumming if enabled */ 2310 2311 if (ifp->if_capenable & IFCAP_RXCSUM) { 2312 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { 2313 /* Check IP header checksum */ 2314 if (rxstat & RL_RDESC_STAT_PROTOID) 2315 m->m_pkthdr.csum_flags |= 2316 CSUM_IP_CHECKED; 2317 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) 2318 m->m_pkthdr.csum_flags |= 2319 CSUM_IP_VALID; 2320 2321 /* Check TCP/UDP checksum */ 2322 if ((RL_TCPPKT(rxstat) && 2323 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 2324 (RL_UDPPKT(rxstat) && 2325 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 2326 m->m_pkthdr.csum_flags |= 2327 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 2328 m->m_pkthdr.csum_data = 0xffff; 2329 } 2330 } else { 2331 /* 2332 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP 2333 */ 2334 if ((rxstat & RL_RDESC_STAT_PROTOID) && 2335 (rxvlan & RL_RDESC_IPV4)) 2336 m->m_pkthdr.csum_flags |= 2337 CSUM_IP_CHECKED; 2338 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) && 2339 (rxvlan & RL_RDESC_IPV4)) 2340 m->m_pkthdr.csum_flags |= 2341 CSUM_IP_VALID; 2342 if (((rxstat & RL_RDESC_STAT_TCP) && 2343 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 2344 ((rxstat & RL_RDESC_STAT_UDP) && 2345 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 2346 m->m_pkthdr.csum_flags |= 2347 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 2348 m->m_pkthdr.csum_data = 0xffff; 2349 } 2350 } 2351 } 2352 maxpkt--; 2353 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 2354 m->m_pkthdr.ether_vtag = 2355 bswap16((rxvlan & RL_RDESC_VLANCTL_DATA)); 2356 m->m_flags |= M_VLANTAG; 2357 } 2358 RL_UNLOCK(sc); 2359 (*ifp->if_input)(ifp, m); 2360 RL_LOCK(sc); 2361 rx_npkts++; 2362 } 2363 2364 /* Flush the RX DMA ring */ 2365 2366 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2367 sc->rl_ldata.rl_rx_list_map, 2368 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2369 2370 sc->rl_ldata.rl_rx_prodidx = i; 2371 2372 if (rx_npktsp != NULL) 2373 *rx_npktsp = rx_npkts; 2374 if (maxpkt) 2375 return (EAGAIN); 2376 2377 return (0); 2378} 2379 2380static void 2381re_txeof(struct rl_softc *sc) 2382{ 2383 struct ifnet *ifp; 2384 struct rl_txdesc *txd; 2385 u_int32_t txstat; 2386 int cons; 2387 2388 cons = sc->rl_ldata.rl_tx_considx; 2389 if (cons == sc->rl_ldata.rl_tx_prodidx) 2390 return; 2391 2392 ifp = sc->rl_ifp; 2393#ifdef DEV_NETMAP 2394 if (netmap_tx_irq(ifp, 0)) 2395 return; 2396#endif /* DEV_NETMAP */ 2397 /* Invalidate the TX descriptor list */ 2398 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2399 sc->rl_ldata.rl_tx_list_map, 2400 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2401 2402 for (; cons != sc->rl_ldata.rl_tx_prodidx; 2403 cons = RL_TX_DESC_NXT(sc, cons)) { 2404 txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat); 2405 if (txstat & RL_TDESC_STAT_OWN) 2406 break; 2407 /* 2408 * We only stash mbufs in the last descriptor 2409 * in a fragment chain, which also happens to 2410 * be the only place where the TX status bits 2411 * are valid. 2412 */ 2413 if (txstat & RL_TDESC_CMD_EOF) { 2414 txd = &sc->rl_ldata.rl_tx_desc[cons]; 2415 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 2416 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2417 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, 2418 txd->tx_dmamap); 2419 KASSERT(txd->tx_m != NULL, 2420 ("%s: freeing NULL mbufs!", __func__)); 2421 m_freem(txd->tx_m); 2422 txd->tx_m = NULL; 2423 if (txstat & (RL_TDESC_STAT_EXCESSCOL| 2424 RL_TDESC_STAT_COLCNT)) 2425 ifp->if_collisions++; 2426 if (txstat & RL_TDESC_STAT_TXERRSUM) 2427 ifp->if_oerrors++; 2428 else 2429 ifp->if_opackets++; 2430 } 2431 sc->rl_ldata.rl_tx_free++; 2432 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2433 } 2434 sc->rl_ldata.rl_tx_considx = cons; 2435 2436 /* No changes made to the TX ring, so no flush needed */ 2437 2438 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) { 2439#ifdef RE_TX_MODERATION 2440 /* 2441 * If not all descriptors have been reaped yet, reload 2442 * the timer so that we will eventually get another 2443 * interrupt that will cause us to re-enter this routine. 2444 * This is done in case the transmitter has gone idle. 2445 */ 2446 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2447#endif 2448 } else 2449 sc->rl_watchdog_timer = 0; 2450} 2451 2452static void 2453re_tick(void *xsc) 2454{ 2455 struct rl_softc *sc; 2456 struct mii_data *mii; 2457 2458 sc = xsc; 2459 2460 RL_LOCK_ASSERT(sc); 2461 2462 mii = device_get_softc(sc->rl_miibus); 2463 mii_tick(mii); 2464 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 2465 re_miibus_statchg(sc->rl_dev); 2466 /* 2467 * Reclaim transmitted frames here. Technically it is not 2468 * necessary to do here but it ensures periodic reclamation 2469 * regardless of Tx completion interrupt which seems to be 2470 * lost on PCIe based controllers under certain situations. 2471 */ 2472 re_txeof(sc); 2473 re_watchdog(sc); 2474 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 2475} 2476 2477#ifdef DEVICE_POLLING 2478static int 2479re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2480{ 2481 struct rl_softc *sc = ifp->if_softc; 2482 int rx_npkts = 0; 2483 2484 RL_LOCK(sc); 2485 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2486 rx_npkts = re_poll_locked(ifp, cmd, count); 2487 RL_UNLOCK(sc); 2488 return (rx_npkts); 2489} 2490 2491static int 2492re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 2493{ 2494 struct rl_softc *sc = ifp->if_softc; 2495 int rx_npkts; 2496 2497 RL_LOCK_ASSERT(sc); 2498 2499 sc->rxcycles = count; 2500 re_rxeof(sc, &rx_npkts); 2501 re_txeof(sc); 2502 2503 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2504 re_start_locked(ifp); 2505 2506 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 2507 u_int16_t status; 2508 2509 status = CSR_READ_2(sc, RL_ISR); 2510 if (status == 0xffff) 2511 return (rx_npkts); 2512 if (status) 2513 CSR_WRITE_2(sc, RL_ISR, status); 2514 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2515 (sc->rl_flags & RL_FLAG_PCIE)) 2516 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2517 2518 /* 2519 * XXX check behaviour on receiver stalls. 2520 */ 2521 2522 if (status & RL_ISR_SYSTEM_ERR) { 2523 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2524 re_init_locked(sc); 2525 } 2526 } 2527 return (rx_npkts); 2528} 2529#endif /* DEVICE_POLLING */ 2530 2531static int 2532re_intr(void *arg) 2533{ 2534 struct rl_softc *sc; 2535 uint16_t status; 2536 2537 sc = arg; 2538 2539 status = CSR_READ_2(sc, RL_ISR); 2540 if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0) 2541 return (FILTER_STRAY); 2542 CSR_WRITE_2(sc, RL_IMR, 0); 2543 2544 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2545 2546 return (FILTER_HANDLED); 2547} 2548 2549static void 2550re_int_task(void *arg, int npending) 2551{ 2552 struct rl_softc *sc; 2553 struct ifnet *ifp; 2554 u_int16_t status; 2555 int rval = 0; 2556 2557 sc = arg; 2558 ifp = sc->rl_ifp; 2559 2560 RL_LOCK(sc); 2561 2562 status = CSR_READ_2(sc, RL_ISR); 2563 CSR_WRITE_2(sc, RL_ISR, status); 2564 2565 if (sc->suspended || 2566 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2567 RL_UNLOCK(sc); 2568 return; 2569 } 2570 2571#ifdef DEVICE_POLLING 2572 if (ifp->if_capenable & IFCAP_POLLING) { 2573 RL_UNLOCK(sc); 2574 return; 2575 } 2576#endif 2577 2578 if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) 2579 rval = re_rxeof(sc, NULL); 2580 2581 /* 2582 * Some chips will ignore a second TX request issued 2583 * while an existing transmission is in progress. If 2584 * the transmitter goes idle but there are still 2585 * packets waiting to be sent, we need to restart the 2586 * channel here to flush them out. This only seems to 2587 * be required with the PCIe devices. 2588 */ 2589 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2590 (sc->rl_flags & RL_FLAG_PCIE)) 2591 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2592 if (status & ( 2593#ifdef RE_TX_MODERATION 2594 RL_ISR_TIMEOUT_EXPIRED| 2595#else 2596 RL_ISR_TX_OK| 2597#endif 2598 RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL)) 2599 re_txeof(sc); 2600 2601 if (status & RL_ISR_SYSTEM_ERR) { 2602 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2603 re_init_locked(sc); 2604 } 2605 2606 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2607 re_start_locked(ifp); 2608 2609 RL_UNLOCK(sc); 2610 2611 if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) { 2612 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2613 return; 2614 } 2615 2616 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2617} 2618 2619static void 2620re_intr_msi(void *xsc) 2621{ 2622 struct rl_softc *sc; 2623 struct ifnet *ifp; 2624 uint16_t intrs, status; 2625 2626 sc = xsc; 2627 RL_LOCK(sc); 2628 2629 ifp = sc->rl_ifp; 2630#ifdef DEVICE_POLLING 2631 if (ifp->if_capenable & IFCAP_POLLING) { 2632 RL_UNLOCK(sc); 2633 return; 2634 } 2635#endif 2636 /* Disable interrupts. */ 2637 CSR_WRITE_2(sc, RL_IMR, 0); 2638 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2639 RL_UNLOCK(sc); 2640 return; 2641 } 2642 2643 intrs = RL_INTRS_CPLUS; 2644 status = CSR_READ_2(sc, RL_ISR); 2645 CSR_WRITE_2(sc, RL_ISR, status); 2646 if (sc->rl_int_rx_act > 0) { 2647 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | 2648 RL_ISR_RX_OVERRUN); 2649 status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | 2650 RL_ISR_RX_OVERRUN); 2651 } 2652 2653 if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR | 2654 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) { 2655 re_rxeof(sc, NULL); 2656 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2657 if (sc->rl_int_rx_mod != 0 && 2658 (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR | 2659 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) { 2660 /* Rearm one-shot timer. */ 2661 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2662 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | 2663 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); 2664 sc->rl_int_rx_act = 1; 2665 } else { 2666 intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR | 2667 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN; 2668 sc->rl_int_rx_act = 0; 2669 } 2670 } 2671 } 2672 2673 /* 2674 * Some chips will ignore a second TX request issued 2675 * while an existing transmission is in progress. If 2676 * the transmitter goes idle but there are still 2677 * packets waiting to be sent, we need to restart the 2678 * channel here to flush them out. This only seems to 2679 * be required with the PCIe devices. 2680 */ 2681 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2682 (sc->rl_flags & RL_FLAG_PCIE)) 2683 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2684 if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL)) 2685 re_txeof(sc); 2686 2687 if (status & RL_ISR_SYSTEM_ERR) { 2688 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2689 re_init_locked(sc); 2690 } 2691 2692 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2693 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2694 re_start_locked(ifp); 2695 CSR_WRITE_2(sc, RL_IMR, intrs); 2696 } 2697 RL_UNLOCK(sc); 2698} 2699 2700static int 2701re_encap(struct rl_softc *sc, struct mbuf **m_head) 2702{ 2703 struct rl_txdesc *txd, *txd_last; 2704 bus_dma_segment_t segs[RL_NTXSEGS]; 2705 bus_dmamap_t map; 2706 struct mbuf *m_new; 2707 struct rl_desc *desc; 2708 int nsegs, prod; 2709 int i, error, ei, si; 2710 int padlen; 2711 uint32_t cmdstat, csum_flags, vlanctl; 2712 2713 RL_LOCK_ASSERT(sc); 2714 M_ASSERTPKTHDR((*m_head)); 2715 2716 /* 2717 * With some of the RealTek chips, using the checksum offload 2718 * support in conjunction with the autopadding feature results 2719 * in the transmission of corrupt frames. For example, if we 2720 * need to send a really small IP fragment that's less than 60 2721 * bytes in size, and IP header checksumming is enabled, the 2722 * resulting ethernet frame that appears on the wire will 2723 * have garbled payload. To work around this, if TX IP checksum 2724 * offload is enabled, we always manually pad short frames out 2725 * to the minimum ethernet frame size. 2726 */ 2727 if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && 2728 (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && 2729 ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) { 2730 padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len; 2731 if (M_WRITABLE(*m_head) == 0) { 2732 /* Get a writable copy. */ 2733 m_new = m_dup(*m_head, M_NOWAIT); 2734 m_freem(*m_head); 2735 if (m_new == NULL) { 2736 *m_head = NULL; 2737 return (ENOBUFS); 2738 } 2739 *m_head = m_new; 2740 } 2741 if ((*m_head)->m_next != NULL || 2742 M_TRAILINGSPACE(*m_head) < padlen) { 2743 m_new = m_defrag(*m_head, M_NOWAIT); 2744 if (m_new == NULL) { 2745 m_freem(*m_head); 2746 *m_head = NULL; 2747 return (ENOBUFS); 2748 } 2749 } else 2750 m_new = *m_head; 2751 2752 /* 2753 * Manually pad short frames, and zero the pad space 2754 * to avoid leaking data. 2755 */ 2756 bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen); 2757 m_new->m_pkthdr.len += padlen; 2758 m_new->m_len = m_new->m_pkthdr.len; 2759 *m_head = m_new; 2760 } 2761 2762 prod = sc->rl_ldata.rl_tx_prodidx; 2763 txd = &sc->rl_ldata.rl_tx_desc[prod]; 2764 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, 2765 *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2766 if (error == EFBIG) { 2767 m_new = m_collapse(*m_head, M_NOWAIT, RL_NTXSEGS); 2768 if (m_new == NULL) { 2769 m_freem(*m_head); 2770 *m_head = NULL; 2771 return (ENOBUFS); 2772 } 2773 *m_head = m_new; 2774 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, 2775 txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2776 if (error != 0) { 2777 m_freem(*m_head); 2778 *m_head = NULL; 2779 return (error); 2780 } 2781 } else if (error != 0) 2782 return (error); 2783 if (nsegs == 0) { 2784 m_freem(*m_head); 2785 *m_head = NULL; 2786 return (EIO); 2787 } 2788 2789 /* Check for number of available descriptors. */ 2790 if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { 2791 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); 2792 return (ENOBUFS); 2793 } 2794 2795 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, 2796 BUS_DMASYNC_PREWRITE); 2797 2798 /* 2799 * Set up checksum offload. Note: checksum offload bits must 2800 * appear in all descriptors of a multi-descriptor transmit 2801 * attempt. This is according to testing done with an 8169 2802 * chip. This is a requirement. 2803 */ 2804 vlanctl = 0; 2805 csum_flags = 0; 2806 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2807 if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) { 2808 csum_flags |= RL_TDESC_CMD_LGSEND; 2809 vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << 2810 RL_TDESC_CMD_MSSVALV2_SHIFT); 2811 } else { 2812 csum_flags |= RL_TDESC_CMD_LGSEND | 2813 ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << 2814 RL_TDESC_CMD_MSSVAL_SHIFT); 2815 } 2816 } else { 2817 /* 2818 * Unconditionally enable IP checksum if TCP or UDP 2819 * checksum is required. Otherwise, TCP/UDP checksum 2820 * doesn't make effects. 2821 */ 2822 if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) { 2823 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { 2824 csum_flags |= RL_TDESC_CMD_IPCSUM; 2825 if (((*m_head)->m_pkthdr.csum_flags & 2826 CSUM_TCP) != 0) 2827 csum_flags |= RL_TDESC_CMD_TCPCSUM; 2828 if (((*m_head)->m_pkthdr.csum_flags & 2829 CSUM_UDP) != 0) 2830 csum_flags |= RL_TDESC_CMD_UDPCSUM; 2831 } else { 2832 vlanctl |= RL_TDESC_CMD_IPCSUMV2; 2833 if (((*m_head)->m_pkthdr.csum_flags & 2834 CSUM_TCP) != 0) 2835 vlanctl |= RL_TDESC_CMD_TCPCSUMV2; 2836 if (((*m_head)->m_pkthdr.csum_flags & 2837 CSUM_UDP) != 0) 2838 vlanctl |= RL_TDESC_CMD_UDPCSUMV2; 2839 } 2840 } 2841 } 2842 2843 /* 2844 * Set up hardware VLAN tagging. Note: vlan tag info must 2845 * appear in all descriptors of a multi-descriptor 2846 * transmission attempt. 2847 */ 2848 if ((*m_head)->m_flags & M_VLANTAG) 2849 vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) | 2850 RL_TDESC_VLANCTL_TAG; 2851 2852 si = prod; 2853 for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) { 2854 desc = &sc->rl_ldata.rl_tx_list[prod]; 2855 desc->rl_vlanctl = htole32(vlanctl); 2856 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); 2857 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); 2858 cmdstat = segs[i].ds_len; 2859 if (i != 0) 2860 cmdstat |= RL_TDESC_CMD_OWN; 2861 if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1) 2862 cmdstat |= RL_TDESC_CMD_EOR; 2863 desc->rl_cmdstat = htole32(cmdstat | csum_flags); 2864 sc->rl_ldata.rl_tx_free--; 2865 } 2866 /* Update producer index. */ 2867 sc->rl_ldata.rl_tx_prodidx = prod; 2868 2869 /* Set EOF on the last descriptor. */ 2870 ei = RL_TX_DESC_PRV(sc, prod); 2871 desc = &sc->rl_ldata.rl_tx_list[ei]; 2872 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); 2873 2874 desc = &sc->rl_ldata.rl_tx_list[si]; 2875 /* Set SOF and transfer ownership of packet to the chip. */ 2876 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF); 2877 2878 /* 2879 * Insure that the map for this transmission 2880 * is placed at the array index of the last descriptor 2881 * in this chain. (Swap last and first dmamaps.) 2882 */ 2883 txd_last = &sc->rl_ldata.rl_tx_desc[ei]; 2884 map = txd->tx_dmamap; 2885 txd->tx_dmamap = txd_last->tx_dmamap; 2886 txd_last->tx_dmamap = map; 2887 txd_last->tx_m = *m_head; 2888 2889 return (0); 2890} 2891 2892static void 2893re_start(struct ifnet *ifp) 2894{ 2895 struct rl_softc *sc; 2896 2897 sc = ifp->if_softc; 2898 RL_LOCK(sc); 2899 re_start_locked(ifp); 2900 RL_UNLOCK(sc); 2901} 2902 2903/* 2904 * Main transmit routine for C+ and gigE NICs. 2905 */ 2906static void 2907re_start_locked(struct ifnet *ifp) 2908{ 2909 struct rl_softc *sc; 2910 struct mbuf *m_head; 2911 int queued; 2912 2913 sc = ifp->if_softc; 2914 2915#ifdef DEV_NETMAP 2916 /* XXX is this necessary ? */ 2917 if (ifp->if_capenable & IFCAP_NETMAP) { 2918 struct netmap_kring *kring = &NA(ifp)->tx_rings[0]; 2919 if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) { 2920 /* kick the tx unit */ 2921 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2922#ifdef RE_TX_MODERATION 2923 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2924#endif 2925 sc->rl_watchdog_timer = 5; 2926 } 2927 return; 2928 } 2929#endif /* DEV_NETMAP */ 2930 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2931 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) 2932 return; 2933 2934 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2935 sc->rl_ldata.rl_tx_free > 1;) { 2936 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2937 if (m_head == NULL) 2938 break; 2939 2940 if (re_encap(sc, &m_head) != 0) { 2941 if (m_head == NULL) 2942 break; 2943 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2944 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2945 break; 2946 } 2947 2948 /* 2949 * If there's a BPF listener, bounce a copy of this frame 2950 * to him. 2951 */ 2952 ETHER_BPF_MTAP(ifp, m_head); 2953 2954 queued++; 2955 } 2956 2957 if (queued == 0) { 2958#ifdef RE_TX_MODERATION 2959 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) 2960 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2961#endif 2962 return; 2963 } 2964 2965 /* Flush the TX descriptors */ 2966 2967 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2968 sc->rl_ldata.rl_tx_list_map, 2969 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2970 2971 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2972 2973#ifdef RE_TX_MODERATION 2974 /* 2975 * Use the countdown timer for interrupt moderation. 2976 * 'TX done' interrupts are disabled. Instead, we reset the 2977 * countdown timer, which will begin counting until it hits 2978 * the value in the TIMERINT register, and then trigger an 2979 * interrupt. Each time we write to the TIMERCNT register, 2980 * the timer count is reset to 0. 2981 */ 2982 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2983#endif 2984 2985 /* 2986 * Set a timeout in case the chip goes out to lunch. 2987 */ 2988 sc->rl_watchdog_timer = 5; 2989} 2990 2991static void 2992re_set_jumbo(struct rl_softc *sc, int jumbo) 2993{ 2994 2995 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) { 2996 pci_set_max_read_req(sc->rl_dev, 4096); 2997 return; 2998 } 2999 3000 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 3001 if (jumbo != 0) { 3002 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) | 3003 RL_CFG3_JUMBO_EN0); 3004 switch (sc->rl_hwrev->rl_rev) { 3005 case RL_HWREV_8168DP: 3006 break; 3007 case RL_HWREV_8168E: 3008 CSR_WRITE_1(sc, sc->rl_cfg4, 3009 CSR_READ_1(sc, sc->rl_cfg4) | 0x01); 3010 break; 3011 default: 3012 CSR_WRITE_1(sc, sc->rl_cfg4, 3013 CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1); 3014 } 3015 } else { 3016 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) & 3017 ~RL_CFG3_JUMBO_EN0); 3018 switch (sc->rl_hwrev->rl_rev) { 3019 case RL_HWREV_8168DP: 3020 break; 3021 case RL_HWREV_8168E: 3022 CSR_WRITE_1(sc, sc->rl_cfg4, 3023 CSR_READ_1(sc, sc->rl_cfg4) & ~0x01); 3024 break; 3025 default: 3026 CSR_WRITE_1(sc, sc->rl_cfg4, 3027 CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1); 3028 } 3029 } 3030 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3031 3032 switch (sc->rl_hwrev->rl_rev) { 3033 case RL_HWREV_8168DP: 3034 pci_set_max_read_req(sc->rl_dev, 4096); 3035 break; 3036 default: 3037 if (jumbo != 0) 3038 pci_set_max_read_req(sc->rl_dev, 512); 3039 else 3040 pci_set_max_read_req(sc->rl_dev, 4096); 3041 } 3042} 3043 3044static void 3045re_init(void *xsc) 3046{ 3047 struct rl_softc *sc = xsc; 3048 3049 RL_LOCK(sc); 3050 re_init_locked(sc); 3051 RL_UNLOCK(sc); 3052} 3053 3054static void 3055re_init_locked(struct rl_softc *sc) 3056{ 3057 struct ifnet *ifp = sc->rl_ifp; 3058 struct mii_data *mii; 3059 uint32_t reg; 3060 uint16_t cfg; 3061 union { 3062 uint32_t align_dummy; 3063 u_char eaddr[ETHER_ADDR_LEN]; 3064 } eaddr; 3065 3066 RL_LOCK_ASSERT(sc); 3067 3068 mii = device_get_softc(sc->rl_miibus); 3069 3070 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3071 return; 3072 3073 /* 3074 * Cancel pending I/O and free all RX/TX buffers. 3075 */ 3076 re_stop(sc); 3077 3078 /* Put controller into known state. */ 3079 re_reset(sc); 3080 3081 /* 3082 * For C+ mode, initialize the RX descriptors and mbufs. 3083 */ 3084 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 3085 if (ifp->if_mtu > RL_MTU) { 3086 if (re_jrx_list_init(sc) != 0) { 3087 device_printf(sc->rl_dev, 3088 "no memory for jumbo RX buffers\n"); 3089 re_stop(sc); 3090 return; 3091 } 3092 /* Disable checksum offloading for jumbo frames. */ 3093 ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4); 3094 ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO); 3095 } else { 3096 if (re_rx_list_init(sc) != 0) { 3097 device_printf(sc->rl_dev, 3098 "no memory for RX buffers\n"); 3099 re_stop(sc); 3100 return; 3101 } 3102 } 3103 re_set_jumbo(sc, ifp->if_mtu > RL_MTU); 3104 } else { 3105 if (re_rx_list_init(sc) != 0) { 3106 device_printf(sc->rl_dev, "no memory for RX buffers\n"); 3107 re_stop(sc); 3108 return; 3109 } 3110 if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && 3111 pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) { 3112 if (ifp->if_mtu > RL_MTU) 3113 pci_set_max_read_req(sc->rl_dev, 512); 3114 else 3115 pci_set_max_read_req(sc->rl_dev, 4096); 3116 } 3117 } 3118 re_tx_list_init(sc); 3119 3120 /* 3121 * Enable C+ RX and TX mode, as well as VLAN stripping and 3122 * RX checksum offload. We must configure the C+ register 3123 * before all others. 3124 */ 3125 cfg = RL_CPLUSCMD_PCI_MRW; 3126 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 3127 cfg |= RL_CPLUSCMD_RXCSUM_ENB; 3128 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3129 cfg |= RL_CPLUSCMD_VLANSTRIP; 3130 if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) { 3131 cfg |= RL_CPLUSCMD_MACSTAT_DIS; 3132 /* XXX magic. */ 3133 cfg |= 0x0001; 3134 } else 3135 cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB; 3136 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); 3137 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC || 3138 sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) { 3139 reg = 0x000fff00; 3140 if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0) 3141 reg |= 0x000000ff; 3142 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) 3143 reg |= 0x00f00000; 3144 CSR_WRITE_4(sc, 0x7c, reg); 3145 /* Disable interrupt mitigation. */ 3146 CSR_WRITE_2(sc, 0xe2, 0); 3147 } 3148 /* 3149 * Disable TSO if interface MTU size is greater than MSS 3150 * allowed in controller. 3151 */ 3152 if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { 3153 ifp->if_capenable &= ~IFCAP_TSO4; 3154 ifp->if_hwassist &= ~CSUM_TSO; 3155 } 3156 3157 /* 3158 * Init our MAC address. Even though the chipset 3159 * documentation doesn't mention it, we need to enter "Config 3160 * register write enable" mode to modify the ID registers. 3161 */ 3162 /* Copy MAC address on stack to align. */ 3163 bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN); 3164 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 3165 CSR_WRITE_4(sc, RL_IDR0, 3166 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 3167 CSR_WRITE_4(sc, RL_IDR4, 3168 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 3169 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3170 3171 /* 3172 * Load the addresses of the RX and TX lists into the chip. 3173 */ 3174 3175 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 3176 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); 3177 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 3178 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); 3179 3180 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 3181 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); 3182 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 3183 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); 3184 3185 if ((sc->rl_flags & RL_FLAG_RXDV_GATED) != 0) 3186 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) & 3187 ~0x00080000); 3188 3189 /* 3190 * Enable transmit and receive. 3191 */ 3192 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 3193 3194 /* 3195 * Set the initial TX configuration. 3196 */ 3197 if (sc->rl_testmode) { 3198 if (sc->rl_type == RL_8169) 3199 CSR_WRITE_4(sc, RL_TXCFG, 3200 RL_TXCFG_CONFIG|RL_LOOPTEST_ON); 3201 else 3202 CSR_WRITE_4(sc, RL_TXCFG, 3203 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); 3204 } else 3205 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 3206 3207 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 3208 3209 /* 3210 * Set the initial RX configuration. 3211 */ 3212 re_set_rxmode(sc); 3213 3214 /* Configure interrupt moderation. */ 3215 if (sc->rl_type == RL_8169) { 3216 /* Magic from vendor. */ 3217 CSR_WRITE_2(sc, RL_INTRMOD, 0x5100); 3218 } 3219 3220#ifdef DEVICE_POLLING 3221 /* 3222 * Disable interrupts if we are polling. 3223 */ 3224 if (ifp->if_capenable & IFCAP_POLLING) 3225 CSR_WRITE_2(sc, RL_IMR, 0); 3226 else /* otherwise ... */ 3227#endif 3228 3229 /* 3230 * Enable interrupts. 3231 */ 3232 if (sc->rl_testmode) 3233 CSR_WRITE_2(sc, RL_IMR, 0); 3234 else 3235 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 3236 CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS); 3237 3238 /* Set initial TX threshold */ 3239 sc->rl_txthresh = RL_TX_THRESH_INIT; 3240 3241 /* Start RX/TX process. */ 3242 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 3243#ifdef notdef 3244 /* Enable receiver and transmitter. */ 3245 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 3246#endif 3247 3248 /* 3249 * Initialize the timer interrupt register so that 3250 * a timer interrupt will be generated once the timer 3251 * reaches a certain number of ticks. The timer is 3252 * reloaded on each transmit. 3253 */ 3254#ifdef RE_TX_MODERATION 3255 /* 3256 * Use timer interrupt register to moderate TX interrupt 3257 * moderation, which dramatically improves TX frame rate. 3258 */ 3259 if (sc->rl_type == RL_8169) 3260 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); 3261 else 3262 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); 3263#else 3264 /* 3265 * Use timer interrupt register to moderate RX interrupt 3266 * moderation. 3267 */ 3268 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && 3269 intr_filter == 0) { 3270 if (sc->rl_type == RL_8169) 3271 CSR_WRITE_4(sc, RL_TIMERINT_8169, 3272 RL_USECS(sc->rl_int_rx_mod)); 3273 } else { 3274 if (sc->rl_type == RL_8169) 3275 CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0)); 3276 } 3277#endif 3278 3279 /* 3280 * For 8169 gigE NICs, set the max allowed RX packet 3281 * size so we can receive jumbo frames. 3282 */ 3283 if (sc->rl_type == RL_8169) { 3284 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 3285 /* 3286 * For controllers that use new jumbo frame scheme, 3287 * set maximum size of jumbo frame depending on 3288 * controller revisions. 3289 */ 3290 if (ifp->if_mtu > RL_MTU) 3291 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 3292 sc->rl_hwrev->rl_max_mtu + 3293 ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN + 3294 ETHER_CRC_LEN); 3295 else 3296 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 3297 RE_RX_DESC_BUFLEN); 3298 } else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && 3299 sc->rl_hwrev->rl_max_mtu == RL_MTU) { 3300 /* RTL810x has no jumbo frame support. */ 3301 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); 3302 } else 3303 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 3304 } 3305 3306 if (sc->rl_testmode) 3307 return; 3308 3309 CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) | 3310 RL_CFG1_DRVLOAD); 3311 3312 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3313 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3314 3315 sc->rl_flags &= ~RL_FLAG_LINK; 3316 mii_mediachg(mii); 3317 3318 sc->rl_watchdog_timer = 0; 3319 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 3320} 3321 3322/* 3323 * Set media options. 3324 */ 3325static int 3326re_ifmedia_upd(struct ifnet *ifp) 3327{ 3328 struct rl_softc *sc; 3329 struct mii_data *mii; 3330 int error; 3331 3332 sc = ifp->if_softc; 3333 mii = device_get_softc(sc->rl_miibus); 3334 RL_LOCK(sc); 3335 error = mii_mediachg(mii); 3336 RL_UNLOCK(sc); 3337 3338 return (error); 3339} 3340 3341/* 3342 * Report current media status. 3343 */ 3344static void 3345re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3346{ 3347 struct rl_softc *sc; 3348 struct mii_data *mii; 3349 3350 sc = ifp->if_softc; 3351 mii = device_get_softc(sc->rl_miibus); 3352 3353 RL_LOCK(sc); 3354 mii_pollstat(mii); 3355 ifmr->ifm_active = mii->mii_media_active; 3356 ifmr->ifm_status = mii->mii_media_status; 3357 RL_UNLOCK(sc); 3358} 3359 3360static int 3361re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3362{ 3363 struct rl_softc *sc = ifp->if_softc; 3364 struct ifreq *ifr = (struct ifreq *) data; 3365 struct mii_data *mii; 3366 uint32_t rev; 3367 int error = 0; 3368 3369 switch (command) { 3370 case SIOCSIFMTU: 3371 if (ifr->ifr_mtu < ETHERMIN || 3372 ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu || 3373 ((sc->rl_flags & RL_FLAG_FASTETHER) != 0 && 3374 ifr->ifr_mtu > RL_MTU)) { 3375 error = EINVAL; 3376 break; 3377 } 3378 RL_LOCK(sc); 3379 if (ifp->if_mtu != ifr->ifr_mtu) { 3380 ifp->if_mtu = ifr->ifr_mtu; 3381 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 3382 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 3383 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3384 re_init_locked(sc); 3385 } 3386 if (ifp->if_mtu > RL_TSO_MTU && 3387 (ifp->if_capenable & IFCAP_TSO4) != 0) { 3388 ifp->if_capenable &= ~(IFCAP_TSO4 | 3389 IFCAP_VLAN_HWTSO); 3390 ifp->if_hwassist &= ~CSUM_TSO; 3391 } 3392 VLAN_CAPABILITIES(ifp); 3393 } 3394 RL_UNLOCK(sc); 3395 break; 3396 case SIOCSIFFLAGS: 3397 RL_LOCK(sc); 3398 if ((ifp->if_flags & IFF_UP) != 0) { 3399 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 3400 if (((ifp->if_flags ^ sc->rl_if_flags) 3401 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 3402 re_set_rxmode(sc); 3403 } else 3404 re_init_locked(sc); 3405 } else { 3406 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3407 re_stop(sc); 3408 } 3409 sc->rl_if_flags = ifp->if_flags; 3410 RL_UNLOCK(sc); 3411 break; 3412 case SIOCADDMULTI: 3413 case SIOCDELMULTI: 3414 RL_LOCK(sc); 3415 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3416 re_set_rxmode(sc); 3417 RL_UNLOCK(sc); 3418 break; 3419 case SIOCGIFMEDIA: 3420 case SIOCSIFMEDIA: 3421 mii = device_get_softc(sc->rl_miibus); 3422 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3423 break; 3424 case SIOCSIFCAP: 3425 { 3426 int mask, reinit; 3427 3428 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3429 reinit = 0; 3430#ifdef DEVICE_POLLING 3431 if (mask & IFCAP_POLLING) { 3432 if (ifr->ifr_reqcap & IFCAP_POLLING) { 3433 error = ether_poll_register(re_poll, ifp); 3434 if (error) 3435 return (error); 3436 RL_LOCK(sc); 3437 /* Disable interrupts */ 3438 CSR_WRITE_2(sc, RL_IMR, 0x0000); 3439 ifp->if_capenable |= IFCAP_POLLING; 3440 RL_UNLOCK(sc); 3441 } else { 3442 error = ether_poll_deregister(ifp); 3443 /* Enable interrupts. */ 3444 RL_LOCK(sc); 3445 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 3446 ifp->if_capenable &= ~IFCAP_POLLING; 3447 RL_UNLOCK(sc); 3448 } 3449 } 3450#endif /* DEVICE_POLLING */ 3451 RL_LOCK(sc); 3452 if ((mask & IFCAP_TXCSUM) != 0 && 3453 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 3454 ifp->if_capenable ^= IFCAP_TXCSUM; 3455 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) { 3456 rev = sc->rl_hwrev->rl_rev; 3457 if (rev == RL_HWREV_8168C || 3458 rev == RL_HWREV_8168C_SPIN2 || 3459 rev == RL_HWREV_8168CP) 3460 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP; 3461 else 3462 ifp->if_hwassist |= RE_CSUM_FEATURES; 3463 } else 3464 ifp->if_hwassist &= ~RE_CSUM_FEATURES; 3465 reinit = 1; 3466 } 3467 if ((mask & IFCAP_RXCSUM) != 0 && 3468 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 3469 ifp->if_capenable ^= IFCAP_RXCSUM; 3470 reinit = 1; 3471 } 3472 if ((mask & IFCAP_TSO4) != 0 && 3473 (ifp->if_capabilities & IFCAP_TSO4) != 0) { 3474 ifp->if_capenable ^= IFCAP_TSO4; 3475 if ((IFCAP_TSO4 & ifp->if_capenable) != 0) 3476 ifp->if_hwassist |= CSUM_TSO; 3477 else 3478 ifp->if_hwassist &= ~CSUM_TSO; 3479 if (ifp->if_mtu > RL_TSO_MTU && 3480 (ifp->if_capenable & IFCAP_TSO4) != 0) { 3481 ifp->if_capenable &= ~IFCAP_TSO4; 3482 ifp->if_hwassist &= ~CSUM_TSO; 3483 } 3484 } 3485 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 3486 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 3487 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 3488 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 3489 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 3490 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3491 /* TSO over VLAN requires VLAN hardware tagging. */ 3492 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 3493 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; 3494 reinit = 1; 3495 } 3496 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 3497 (mask & (IFCAP_HWCSUM | IFCAP_TSO4 | 3498 IFCAP_VLAN_HWTSO)) != 0) 3499 reinit = 1; 3500 if ((mask & IFCAP_WOL) != 0 && 3501 (ifp->if_capabilities & IFCAP_WOL) != 0) { 3502 if ((mask & IFCAP_WOL_UCAST) != 0) 3503 ifp->if_capenable ^= IFCAP_WOL_UCAST; 3504 if ((mask & IFCAP_WOL_MCAST) != 0) 3505 ifp->if_capenable ^= IFCAP_WOL_MCAST; 3506 if ((mask & IFCAP_WOL_MAGIC) != 0) 3507 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 3508 } 3509 if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) { 3510 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3511 re_init_locked(sc); 3512 } 3513 RL_UNLOCK(sc); 3514 VLAN_CAPABILITIES(ifp); 3515 } 3516 break; 3517 default: 3518 error = ether_ioctl(ifp, command, data); 3519 break; 3520 } 3521 3522 return (error); 3523} 3524 3525static void 3526re_watchdog(struct rl_softc *sc) 3527{ 3528 struct ifnet *ifp; 3529 3530 RL_LOCK_ASSERT(sc); 3531 3532 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0) 3533 return; 3534 3535 ifp = sc->rl_ifp; 3536 re_txeof(sc); 3537 if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) { 3538 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 3539 "-- recovering\n"); 3540 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3541 re_start_locked(ifp); 3542 return; 3543 } 3544 3545 if_printf(ifp, "watchdog timeout\n"); 3546 ifp->if_oerrors++; 3547 3548 re_rxeof(sc, NULL); 3549 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3550 re_init_locked(sc); 3551 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3552 re_start_locked(ifp); 3553} 3554 3555/* 3556 * Stop the adapter and free any mbufs allocated to the 3557 * RX and TX lists. 3558 */ 3559static void 3560re_stop(struct rl_softc *sc) 3561{ 3562 int i; 3563 struct ifnet *ifp; 3564 struct rl_txdesc *txd; 3565 struct rl_rxdesc *rxd; 3566 3567 RL_LOCK_ASSERT(sc); 3568 3569 ifp = sc->rl_ifp; 3570 3571 sc->rl_watchdog_timer = 0; 3572 callout_stop(&sc->rl_stat_callout); 3573 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 3574 3575 /* 3576 * Disable accepting frames to put RX MAC into idle state. 3577 * Otherwise it's possible to get frames while stop command 3578 * execution is in progress and controller can DMA the frame 3579 * to already freed RX buffer during that period. 3580 */ 3581 CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) & 3582 ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI | 3583 RL_RXCFG_RX_BROAD)); 3584 3585 if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) { 3586 for (i = RL_TIMEOUT; i > 0; i--) { 3587 if ((CSR_READ_1(sc, sc->rl_txstart) & 3588 RL_TXSTART_START) == 0) 3589 break; 3590 DELAY(20); 3591 } 3592 if (i == 0) 3593 device_printf(sc->rl_dev, 3594 "stopping TX poll timed out!\n"); 3595 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 3596 } else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) { 3597 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | 3598 RL_CMD_RX_ENB); 3599 if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) { 3600 for (i = RL_TIMEOUT; i > 0; i--) { 3601 if ((CSR_READ_4(sc, RL_TXCFG) & 3602 RL_TXCFG_QUEUE_EMPTY) != 0) 3603 break; 3604 DELAY(100); 3605 } 3606 if (i == 0) 3607 device_printf(sc->rl_dev, 3608 "stopping TXQ timed out!\n"); 3609 } 3610 } else 3611 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 3612 DELAY(1000); 3613 CSR_WRITE_2(sc, RL_IMR, 0x0000); 3614 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 3615 3616 if (sc->rl_head != NULL) { 3617 m_freem(sc->rl_head); 3618 sc->rl_head = sc->rl_tail = NULL; 3619 } 3620 3621 /* Free the TX list buffers. */ 3622 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 3623 txd = &sc->rl_ldata.rl_tx_desc[i]; 3624 if (txd->tx_m != NULL) { 3625 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 3626 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3627 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, 3628 txd->tx_dmamap); 3629 m_freem(txd->tx_m); 3630 txd->tx_m = NULL; 3631 } 3632 } 3633 3634 /* Free the RX list buffers. */ 3635 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 3636 rxd = &sc->rl_ldata.rl_rx_desc[i]; 3637 if (rxd->rx_m != NULL) { 3638 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, 3639 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3640 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, 3641 rxd->rx_dmamap); 3642 m_freem(rxd->rx_m); 3643 rxd->rx_m = NULL; 3644 } 3645 } 3646 3647 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 3648 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 3649 rxd = &sc->rl_ldata.rl_jrx_desc[i]; 3650 if (rxd->rx_m != NULL) { 3651 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, 3652 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3653 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, 3654 rxd->rx_dmamap); 3655 m_freem(rxd->rx_m); 3656 rxd->rx_m = NULL; 3657 } 3658 } 3659 } 3660} 3661 3662/* 3663 * Device suspend routine. Stop the interface and save some PCI 3664 * settings in case the BIOS doesn't restore them properly on 3665 * resume. 3666 */ 3667static int 3668re_suspend(device_t dev) 3669{ 3670 struct rl_softc *sc; 3671 3672 sc = device_get_softc(dev); 3673 3674 RL_LOCK(sc); 3675 re_stop(sc); 3676 re_setwol(sc); 3677 sc->suspended = 1; 3678 RL_UNLOCK(sc); 3679 3680 return (0); 3681} 3682 3683/* 3684 * Device resume routine. Restore some PCI settings in case the BIOS 3685 * doesn't, re-enable busmastering, and restart the interface if 3686 * appropriate. 3687 */ 3688static int 3689re_resume(device_t dev) 3690{ 3691 struct rl_softc *sc; 3692 struct ifnet *ifp; 3693 3694 sc = device_get_softc(dev); 3695 3696 RL_LOCK(sc); 3697 3698 ifp = sc->rl_ifp; 3699 /* Take controller out of sleep mode. */ 3700 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 3701 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 3702 CSR_WRITE_1(sc, RL_GPIO, 3703 CSR_READ_1(sc, RL_GPIO) | 0x01); 3704 } 3705 3706 /* 3707 * Clear WOL matching such that normal Rx filtering 3708 * wouldn't interfere with WOL patterns. 3709 */ 3710 re_clrwol(sc); 3711 3712 /* reinitialize interface if necessary */ 3713 if (ifp->if_flags & IFF_UP) 3714 re_init_locked(sc); 3715 3716 sc->suspended = 0; 3717 RL_UNLOCK(sc); 3718 3719 return (0); 3720} 3721 3722/* 3723 * Stop all chip I/O so that the kernel's probe routines don't 3724 * get confused by errant DMAs when rebooting. 3725 */ 3726static int 3727re_shutdown(device_t dev) 3728{ 3729 struct rl_softc *sc; 3730 3731 sc = device_get_softc(dev); 3732 3733 RL_LOCK(sc); 3734 re_stop(sc); 3735 /* 3736 * Mark interface as down since otherwise we will panic if 3737 * interrupt comes in later on, which can happen in some 3738 * cases. 3739 */ 3740 sc->rl_ifp->if_flags &= ~IFF_UP; 3741 re_setwol(sc); 3742 RL_UNLOCK(sc); 3743 3744 return (0); 3745} 3746 3747static void 3748re_set_linkspeed(struct rl_softc *sc) 3749{ 3750 struct mii_softc *miisc; 3751 struct mii_data *mii; 3752 int aneg, i, phyno; 3753 3754 RL_LOCK_ASSERT(sc); 3755 3756 mii = device_get_softc(sc->rl_miibus); 3757 mii_pollstat(mii); 3758 aneg = 0; 3759 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 3760 (IFM_ACTIVE | IFM_AVALID)) { 3761 switch IFM_SUBTYPE(mii->mii_media_active) { 3762 case IFM_10_T: 3763 case IFM_100_TX: 3764 return; 3765 case IFM_1000_T: 3766 aneg++; 3767 break; 3768 default: 3769 break; 3770 } 3771 } 3772 miisc = LIST_FIRST(&mii->mii_phys); 3773 phyno = miisc->mii_phy; 3774 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3775 PHY_RESET(miisc); 3776 re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0); 3777 re_miibus_writereg(sc->rl_dev, phyno, 3778 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 3779 re_miibus_writereg(sc->rl_dev, phyno, 3780 MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG); 3781 DELAY(1000); 3782 if (aneg != 0) { 3783 /* 3784 * Poll link state until re(4) get a 10/100Mbps link. 3785 */ 3786 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 3787 mii_pollstat(mii); 3788 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 3789 == (IFM_ACTIVE | IFM_AVALID)) { 3790 switch (IFM_SUBTYPE(mii->mii_media_active)) { 3791 case IFM_10_T: 3792 case IFM_100_TX: 3793 return; 3794 default: 3795 break; 3796 } 3797 } 3798 RL_UNLOCK(sc); 3799 pause("relnk", hz); 3800 RL_LOCK(sc); 3801 } 3802 if (i == MII_ANEGTICKS_GIGE) 3803 device_printf(sc->rl_dev, 3804 "establishing a link failed, WOL may not work!"); 3805 } 3806 /* 3807 * No link, force MAC to have 100Mbps, full-duplex link. 3808 * MAC does not require reprogramming on resolved speed/duplex, 3809 * so this is just for completeness. 3810 */ 3811 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 3812 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 3813} 3814 3815static void 3816re_setwol(struct rl_softc *sc) 3817{ 3818 struct ifnet *ifp; 3819 int pmc; 3820 uint16_t pmstat; 3821 uint8_t v; 3822 3823 RL_LOCK_ASSERT(sc); 3824 3825 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 3826 return; 3827 3828 ifp = sc->rl_ifp; 3829 /* Put controller into sleep mode. */ 3830 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 3831 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 3832 CSR_WRITE_1(sc, RL_GPIO, 3833 CSR_READ_1(sc, RL_GPIO) & ~0x01); 3834 } 3835 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 3836 re_set_rxmode(sc); 3837 if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0) 3838 re_set_linkspeed(sc); 3839 if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0) 3840 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB); 3841 } 3842 /* Enable config register write. */ 3843 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 3844 3845 /* Enable PME. */ 3846 v = CSR_READ_1(sc, sc->rl_cfg1); 3847 v &= ~RL_CFG1_PME; 3848 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3849 v |= RL_CFG1_PME; 3850 CSR_WRITE_1(sc, sc->rl_cfg1, v); 3851 3852 v = CSR_READ_1(sc, sc->rl_cfg3); 3853 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 3854 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 3855 v |= RL_CFG3_WOL_MAGIC; 3856 CSR_WRITE_1(sc, sc->rl_cfg3, v); 3857 3858 v = CSR_READ_1(sc, sc->rl_cfg5); 3859 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST | 3860 RL_CFG5_WOL_LANWAKE); 3861 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 3862 v |= RL_CFG5_WOL_UCAST; 3863 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 3864 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST; 3865 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3866 v |= RL_CFG5_WOL_LANWAKE; 3867 CSR_WRITE_1(sc, sc->rl_cfg5, v); 3868 3869 /* Config register write done. */ 3870 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3871 3872 if ((ifp->if_capenable & IFCAP_WOL) == 0 && 3873 (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) 3874 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80); 3875 /* 3876 * It seems that hardware resets its link speed to 100Mbps in 3877 * power down mode so switching to 100Mbps in driver is not 3878 * needed. 3879 */ 3880 3881 /* Request PME if WOL is requested. */ 3882 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); 3883 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 3884 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3885 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3886 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 3887} 3888 3889static void 3890re_clrwol(struct rl_softc *sc) 3891{ 3892 int pmc; 3893 uint8_t v; 3894 3895 RL_LOCK_ASSERT(sc); 3896 3897 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 3898 return; 3899 3900 /* Enable config register write. */ 3901 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 3902 3903 v = CSR_READ_1(sc, sc->rl_cfg3); 3904 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 3905 CSR_WRITE_1(sc, sc->rl_cfg3, v); 3906 3907 /* Config register write done. */ 3908 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3909 3910 v = CSR_READ_1(sc, sc->rl_cfg5); 3911 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); 3912 v &= ~RL_CFG5_WOL_LANWAKE; 3913 CSR_WRITE_1(sc, sc->rl_cfg5, v); 3914} 3915 3916static void 3917re_add_sysctls(struct rl_softc *sc) 3918{ 3919 struct sysctl_ctx_list *ctx; 3920 struct sysctl_oid_list *children; 3921 int error; 3922 3923 ctx = device_get_sysctl_ctx(sc->rl_dev); 3924 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev)); 3925 3926 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats", 3927 CTLTYPE_INT | CTLFLAG_RW, sc, 0, re_sysctl_stats, "I", 3928 "Statistics Information"); 3929 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) 3930 return; 3931 3932 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod", 3933 CTLTYPE_INT | CTLFLAG_RW, &sc->rl_int_rx_mod, 0, 3934 sysctl_hw_re_int_mod, "I", "re RX interrupt moderation"); 3935 /* Pull in device tunables. */ 3936 sc->rl_int_rx_mod = RL_TIMER_DEFAULT; 3937 error = resource_int_value(device_get_name(sc->rl_dev), 3938 device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod); 3939 if (error == 0) { 3940 if (sc->rl_int_rx_mod < RL_TIMER_MIN || 3941 sc->rl_int_rx_mod > RL_TIMER_MAX) { 3942 device_printf(sc->rl_dev, "int_rx_mod value out of " 3943 "range; using default: %d\n", 3944 RL_TIMER_DEFAULT); 3945 sc->rl_int_rx_mod = RL_TIMER_DEFAULT; 3946 } 3947 } 3948 3949} 3950 3951static int 3952re_sysctl_stats(SYSCTL_HANDLER_ARGS) 3953{ 3954 struct rl_softc *sc; 3955 struct rl_stats *stats; 3956 int error, i, result; 3957 3958 result = -1; 3959 error = sysctl_handle_int(oidp, &result, 0, req); 3960 if (error || req->newptr == NULL) 3961 return (error); 3962 3963 if (result == 1) { 3964 sc = (struct rl_softc *)arg1; 3965 RL_LOCK(sc); 3966 if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3967 RL_UNLOCK(sc); 3968 goto done; 3969 } 3970 bus_dmamap_sync(sc->rl_ldata.rl_stag, 3971 sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD); 3972 CSR_WRITE_4(sc, RL_DUMPSTATS_HI, 3973 RL_ADDR_HI(sc->rl_ldata.rl_stats_addr)); 3974 CSR_WRITE_4(sc, RL_DUMPSTATS_LO, 3975 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr)); 3976 CSR_WRITE_4(sc, RL_DUMPSTATS_LO, 3977 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr | 3978 RL_DUMPSTATS_START)); 3979 for (i = RL_TIMEOUT; i > 0; i--) { 3980 if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) & 3981 RL_DUMPSTATS_START) == 0) 3982 break; 3983 DELAY(1000); 3984 } 3985 bus_dmamap_sync(sc->rl_ldata.rl_stag, 3986 sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD); 3987 RL_UNLOCK(sc); 3988 if (i == 0) { 3989 device_printf(sc->rl_dev, 3990 "DUMP statistics request timed out\n"); 3991 return (ETIMEDOUT); 3992 } 3993done: 3994 stats = sc->rl_ldata.rl_stats; 3995 printf("%s statistics:\n", device_get_nameunit(sc->rl_dev)); 3996 printf("Tx frames : %ju\n", 3997 (uintmax_t)le64toh(stats->rl_tx_pkts)); 3998 printf("Rx frames : %ju\n", 3999 (uintmax_t)le64toh(stats->rl_rx_pkts)); 4000 printf("Tx errors : %ju\n", 4001 (uintmax_t)le64toh(stats->rl_tx_errs)); 4002 printf("Rx errors : %u\n", 4003 le32toh(stats->rl_rx_errs)); 4004 printf("Rx missed frames : %u\n", 4005 (uint32_t)le16toh(stats->rl_missed_pkts)); 4006 printf("Rx frame alignment errs : %u\n", 4007 (uint32_t)le16toh(stats->rl_rx_framealign_errs)); 4008 printf("Tx single collisions : %u\n", 4009 le32toh(stats->rl_tx_onecoll)); 4010 printf("Tx multiple collisions : %u\n", 4011 le32toh(stats->rl_tx_multicolls)); 4012 printf("Rx unicast frames : %ju\n", 4013 (uintmax_t)le64toh(stats->rl_rx_ucasts)); 4014 printf("Rx broadcast frames : %ju\n", 4015 (uintmax_t)le64toh(stats->rl_rx_bcasts)); 4016 printf("Rx multicast frames : %u\n", 4017 le32toh(stats->rl_rx_mcasts)); 4018 printf("Tx aborts : %u\n", 4019 (uint32_t)le16toh(stats->rl_tx_aborts)); 4020 printf("Tx underruns : %u\n", 4021 (uint32_t)le16toh(stats->rl_rx_underruns)); 4022 } 4023 4024 return (error); 4025} 4026 4027static int 4028sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 4029{ 4030 int error, value; 4031 4032 if (arg1 == NULL) 4033 return (EINVAL); 4034 value = *(int *)arg1; 4035 error = sysctl_handle_int(oidp, &value, 0, req); 4036 if (error || req->newptr == NULL) 4037 return (error); 4038 if (value < low || value > high) 4039 return (EINVAL); 4040 *(int *)arg1 = value; 4041 4042 return (0); 4043} 4044 4045static int 4046sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS) 4047{ 4048 4049 return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN, 4050 RL_TIMER_MAX)); 4051} 4052