if_re.c revision 292780
1/*- 2 * Copyright (c) 1997, 1998-2003 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: stable/10/sys/dev/re/if_re.c 292780 2015-12-27 17:12:54Z marius $"); 35 36/* 37 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44/* 45 * This driver is designed to support RealTek's next generation of 46 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 47 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 48 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 49 * 50 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 51 * with the older 8139 family, however it also supports a special 52 * C+ mode of operation that provides several new performance enhancing 53 * features. These include: 54 * 55 * o Descriptor based DMA mechanism. Each descriptor represents 56 * a single packet fragment. Data buffers may be aligned on 57 * any byte boundary. 58 * 59 * o 64-bit DMA 60 * 61 * o TCP/IP checksum offload for both RX and TX 62 * 63 * o High and normal priority transmit DMA rings 64 * 65 * o VLAN tag insertion and extraction 66 * 67 * o TCP large send (segmentation offload) 68 * 69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 70 * programming API is fairly straightforward. The RX filtering, EEPROM 71 * access and PHY access is the same as it is on the older 8139 series 72 * chips. 73 * 74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 75 * same programming API and feature set as the 8139C+ with the following 76 * differences and additions: 77 * 78 * o 1000Mbps mode 79 * 80 * o Jumbo frames 81 * 82 * o GMII and TBI ports/registers for interfacing with copper 83 * or fiber PHYs 84 * 85 * o RX and TX DMA rings can have up to 1024 descriptors 86 * (the 8139C+ allows a maximum of 64) 87 * 88 * o Slight differences in register layout from the 8139C+ 89 * 90 * The TX start and timer interrupt registers are at different locations 91 * on the 8169 than they are on the 8139C+. Also, the status word in the 92 * RX descriptor has a slightly different bit layout. The 8169 does not 93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 94 * copper gigE PHY. 95 * 96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 97 * (the 'S' stands for 'single-chip'). These devices have the same 98 * programming API as the older 8169, but also have some vendor-specific 99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 101 * 102 * This driver takes advantage of the RX and TX checksum offload and 103 * VLAN tag insertion/extraction features. It also implements TX 104 * interrupt moderation using the timer interrupt registers, which 105 * significantly reduces TX interrupt load. There is also support 106 * for jumbo frames, however the 8169/8169S/8110S can not transmit 107 * jumbo frames larger than 7440, so the max MTU possible with this 108 * driver is 7422 bytes. 109 */ 110 111#ifdef HAVE_KERNEL_OPTION_HEADERS 112#include "opt_device_polling.h" 113#endif 114 115#include <sys/param.h> 116#include <sys/endian.h> 117#include <sys/systm.h> 118#include <sys/sockio.h> 119#include <sys/mbuf.h> 120#include <sys/malloc.h> 121#include <sys/module.h> 122#include <sys/kernel.h> 123#include <sys/socket.h> 124#include <sys/lock.h> 125#include <sys/mutex.h> 126#include <sys/sysctl.h> 127#include <sys/taskqueue.h> 128 129#include <net/if.h> 130#include <net/if_arp.h> 131#include <net/ethernet.h> 132#include <net/if_dl.h> 133#include <net/if_media.h> 134#include <net/if_types.h> 135#include <net/if_vlan_var.h> 136 137#include <net/bpf.h> 138 139#include <machine/bus.h> 140#include <machine/resource.h> 141#include <sys/bus.h> 142#include <sys/rman.h> 143 144#include <dev/mii/mii.h> 145#include <dev/mii/miivar.h> 146 147#include <dev/pci/pcireg.h> 148#include <dev/pci/pcivar.h> 149 150#include <dev/rl/if_rlreg.h> 151 152MODULE_DEPEND(re, pci, 1, 1, 1); 153MODULE_DEPEND(re, ether, 1, 1, 1); 154MODULE_DEPEND(re, miibus, 1, 1, 1); 155 156/* "device miibus" required. See GENERIC if you get errors here. */ 157#include "miibus_if.h" 158 159/* Tunables. */ 160static int intr_filter = 0; 161TUNABLE_INT("hw.re.intr_filter", &intr_filter); 162static int msi_disable = 0; 163TUNABLE_INT("hw.re.msi_disable", &msi_disable); 164static int msix_disable = 0; 165TUNABLE_INT("hw.re.msix_disable", &msix_disable); 166static int prefer_iomap = 0; 167TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap); 168 169#define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 170 171/* 172 * Various supported device vendors/types and their names. 173 */ 174static const struct rl_type re_devs[] = { 175 { DLINK_VENDORID, DLINK_DEVICEID_528T, 0, 176 "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, 177 { DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0, 178 "D-Link DGE-530(T) Gigabit Ethernet Adapter" }, 179 { RT_VENDORID, RT_DEVICEID_8139, 0, 180 "RealTek 8139C+ 10/100BaseTX" }, 181 { RT_VENDORID, RT_DEVICEID_8101E, 0, 182 "RealTek 810xE PCIe 10/100baseTX" }, 183 { RT_VENDORID, RT_DEVICEID_8168, 0, 184 "RealTek 8168/8111 B/C/CP/D/DP/E/F/G PCIe Gigabit Ethernet" }, 185 { RT_VENDORID, RT_DEVICEID_8169, 0, 186 "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" }, 187 { RT_VENDORID, RT_DEVICEID_8169SC, 0, 188 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, 189 { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0, 190 "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" }, 191 { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0, 192 "Linksys EG1032 (RTL8169S) Gigabit Ethernet" }, 193 { USR_VENDORID, USR_DEVICEID_997902, 0, 194 "US Robotics 997902 (RTL8169S) Gigabit Ethernet" } 195}; 196 197static const struct rl_hwrev re_hwrevs[] = { 198 { RL_HWREV_8139, RL_8139, "", RL_MTU }, 199 { RL_HWREV_8139A, RL_8139, "A", RL_MTU }, 200 { RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU }, 201 { RL_HWREV_8139B, RL_8139, "B", RL_MTU }, 202 { RL_HWREV_8130, RL_8139, "8130", RL_MTU }, 203 { RL_HWREV_8139C, RL_8139, "C", RL_MTU }, 204 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU }, 205 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU }, 206 { RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU }, 207 { RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU }, 208 { RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU }, 209 { RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU }, 210 { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU }, 211 { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, 212 { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU }, 213 { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, 214 { RL_HWREV_8100, RL_8139, "8100", RL_MTU }, 215 { RL_HWREV_8101, RL_8139, "8101", RL_MTU }, 216 { RL_HWREV_8100E, RL_8169, "8100E", RL_MTU }, 217 { RL_HWREV_8101E, RL_8169, "8101E", RL_MTU }, 218 { RL_HWREV_8102E, RL_8169, "8102E", RL_MTU }, 219 { RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU }, 220 { RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU }, 221 { RL_HWREV_8103E, RL_8169, "8103E", RL_MTU }, 222 { RL_HWREV_8401E, RL_8169, "8401E", RL_MTU }, 223 { RL_HWREV_8402, RL_8169, "8402", RL_MTU }, 224 { RL_HWREV_8105E, RL_8169, "8105E", RL_MTU }, 225 { RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU }, 226 { RL_HWREV_8106E, RL_8169, "8106E", RL_MTU }, 227 { RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU }, 228 { RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU }, 229 { RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, 230 { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, 231 { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K }, 232 { RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K }, 233 { RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K }, 234 { RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K}, 235 { RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K}, 236 { RL_HWREV_8168EP, RL_8169, "8168EP/8111EP", RL_JUMBO_MTU_9K}, 237 { RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K}, 238 { RL_HWREV_8168G, RL_8169, "8168G/8111G", RL_JUMBO_MTU_9K}, 239 { RL_HWREV_8168GU, RL_8169, "8168GU/8111GU", RL_JUMBO_MTU_9K}, 240 { RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K}, 241 { RL_HWREV_8411B, RL_8169, "8411B", RL_JUMBO_MTU_9K}, 242 { 0, 0, NULL, 0 } 243}; 244 245static int re_probe (device_t); 246static int re_attach (device_t); 247static int re_detach (device_t); 248 249static int re_encap (struct rl_softc *, struct mbuf **); 250 251static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int); 252static int re_allocmem (device_t, struct rl_softc *); 253static __inline void re_discard_rxbuf 254 (struct rl_softc *, int); 255static int re_newbuf (struct rl_softc *, int); 256static int re_jumbo_newbuf (struct rl_softc *, int); 257static int re_rx_list_init (struct rl_softc *); 258static int re_jrx_list_init (struct rl_softc *); 259static int re_tx_list_init (struct rl_softc *); 260#ifdef RE_FIXUP_RX 261static __inline void re_fixup_rx 262 (struct mbuf *); 263#endif 264static int re_rxeof (struct rl_softc *, int *); 265static void re_txeof (struct rl_softc *); 266#ifdef DEVICE_POLLING 267static int re_poll (struct ifnet *, enum poll_cmd, int); 268static int re_poll_locked (struct ifnet *, enum poll_cmd, int); 269#endif 270static int re_intr (void *); 271static void re_intr_msi (void *); 272static void re_tick (void *); 273static void re_int_task (void *, int); 274static void re_start (struct ifnet *); 275static void re_start_locked (struct ifnet *); 276static int re_ioctl (struct ifnet *, u_long, caddr_t); 277static void re_init (void *); 278static void re_init_locked (struct rl_softc *); 279static void re_stop (struct rl_softc *); 280static void re_watchdog (struct rl_softc *); 281static int re_suspend (device_t); 282static int re_resume (device_t); 283static int re_shutdown (device_t); 284static int re_ifmedia_upd (struct ifnet *); 285static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *); 286 287static void re_eeprom_putbyte (struct rl_softc *, int); 288static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *); 289static void re_read_eeprom (struct rl_softc *, caddr_t, int, int); 290static int re_gmii_readreg (device_t, int, int); 291static int re_gmii_writereg (device_t, int, int, int); 292 293static int re_miibus_readreg (device_t, int, int); 294static int re_miibus_writereg (device_t, int, int, int); 295static void re_miibus_statchg (device_t); 296 297static void re_set_jumbo (struct rl_softc *, int); 298static void re_set_rxmode (struct rl_softc *); 299static void re_reset (struct rl_softc *); 300static void re_setwol (struct rl_softc *); 301static void re_clrwol (struct rl_softc *); 302static void re_set_linkspeed (struct rl_softc *); 303 304#ifdef DEV_NETMAP /* see ixgbe.c for details */ 305#include <dev/netmap/if_re_netmap.h> 306#endif /* !DEV_NETMAP */ 307 308#ifdef RE_DIAG 309static int re_diag (struct rl_softc *); 310#endif 311 312static void re_add_sysctls (struct rl_softc *); 313static int re_sysctl_stats (SYSCTL_HANDLER_ARGS); 314static int sysctl_int_range (SYSCTL_HANDLER_ARGS, int, int); 315static int sysctl_hw_re_int_mod (SYSCTL_HANDLER_ARGS); 316 317static device_method_t re_methods[] = { 318 /* Device interface */ 319 DEVMETHOD(device_probe, re_probe), 320 DEVMETHOD(device_attach, re_attach), 321 DEVMETHOD(device_detach, re_detach), 322 DEVMETHOD(device_suspend, re_suspend), 323 DEVMETHOD(device_resume, re_resume), 324 DEVMETHOD(device_shutdown, re_shutdown), 325 326 /* MII interface */ 327 DEVMETHOD(miibus_readreg, re_miibus_readreg), 328 DEVMETHOD(miibus_writereg, re_miibus_writereg), 329 DEVMETHOD(miibus_statchg, re_miibus_statchg), 330 331 DEVMETHOD_END 332}; 333 334static driver_t re_driver = { 335 "re", 336 re_methods, 337 sizeof(struct rl_softc) 338}; 339 340static devclass_t re_devclass; 341 342DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0); 343DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); 344 345#define EE_SET(x) \ 346 CSR_WRITE_1(sc, RL_EECMD, \ 347 CSR_READ_1(sc, RL_EECMD) | x) 348 349#define EE_CLR(x) \ 350 CSR_WRITE_1(sc, RL_EECMD, \ 351 CSR_READ_1(sc, RL_EECMD) & ~x) 352 353/* 354 * Send a read command and address to the EEPROM, check for ACK. 355 */ 356static void 357re_eeprom_putbyte(struct rl_softc *sc, int addr) 358{ 359 int d, i; 360 361 d = addr | (RL_9346_READ << sc->rl_eewidth); 362 363 /* 364 * Feed in each bit and strobe the clock. 365 */ 366 367 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 368 if (d & i) { 369 EE_SET(RL_EE_DATAIN); 370 } else { 371 EE_CLR(RL_EE_DATAIN); 372 } 373 DELAY(100); 374 EE_SET(RL_EE_CLK); 375 DELAY(150); 376 EE_CLR(RL_EE_CLK); 377 DELAY(100); 378 } 379} 380 381/* 382 * Read a word of data stored in the EEPROM at address 'addr.' 383 */ 384static void 385re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) 386{ 387 int i; 388 u_int16_t word = 0; 389 390 /* 391 * Send address of word we want to read. 392 */ 393 re_eeprom_putbyte(sc, addr); 394 395 /* 396 * Start reading bits from EEPROM. 397 */ 398 for (i = 0x8000; i; i >>= 1) { 399 EE_SET(RL_EE_CLK); 400 DELAY(100); 401 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 402 word |= i; 403 EE_CLR(RL_EE_CLK); 404 DELAY(100); 405 } 406 407 *dest = word; 408} 409 410/* 411 * Read a sequence of words from the EEPROM. 412 */ 413static void 414re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) 415{ 416 int i; 417 u_int16_t word = 0, *ptr; 418 419 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 420 421 DELAY(100); 422 423 for (i = 0; i < cnt; i++) { 424 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 425 re_eeprom_getword(sc, off + i, &word); 426 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 427 ptr = (u_int16_t *)(dest + (i * 2)); 428 *ptr = word; 429 } 430 431 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 432} 433 434static int 435re_gmii_readreg(device_t dev, int phy, int reg) 436{ 437 struct rl_softc *sc; 438 u_int32_t rval; 439 int i; 440 441 sc = device_get_softc(dev); 442 443 /* Let the rgephy driver read the GMEDIASTAT register */ 444 445 if (reg == RL_GMEDIASTAT) { 446 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 447 return (rval); 448 } 449 450 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 451 452 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 453 rval = CSR_READ_4(sc, RL_PHYAR); 454 if (rval & RL_PHYAR_BUSY) 455 break; 456 DELAY(25); 457 } 458 459 if (i == RL_PHY_TIMEOUT) { 460 device_printf(sc->rl_dev, "PHY read failed\n"); 461 return (0); 462 } 463 464 /* 465 * Controller requires a 20us delay to process next MDIO request. 466 */ 467 DELAY(20); 468 469 return (rval & RL_PHYAR_PHYDATA); 470} 471 472static int 473re_gmii_writereg(device_t dev, int phy, int reg, int data) 474{ 475 struct rl_softc *sc; 476 u_int32_t rval; 477 int i; 478 479 sc = device_get_softc(dev); 480 481 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 482 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 483 484 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 485 rval = CSR_READ_4(sc, RL_PHYAR); 486 if (!(rval & RL_PHYAR_BUSY)) 487 break; 488 DELAY(25); 489 } 490 491 if (i == RL_PHY_TIMEOUT) { 492 device_printf(sc->rl_dev, "PHY write failed\n"); 493 return (0); 494 } 495 496 /* 497 * Controller requires a 20us delay to process next MDIO request. 498 */ 499 DELAY(20); 500 501 return (0); 502} 503 504static int 505re_miibus_readreg(device_t dev, int phy, int reg) 506{ 507 struct rl_softc *sc; 508 u_int16_t rval = 0; 509 u_int16_t re8139_reg = 0; 510 511 sc = device_get_softc(dev); 512 513 if (sc->rl_type == RL_8169) { 514 rval = re_gmii_readreg(dev, phy, reg); 515 return (rval); 516 } 517 518 switch (reg) { 519 case MII_BMCR: 520 re8139_reg = RL_BMCR; 521 break; 522 case MII_BMSR: 523 re8139_reg = RL_BMSR; 524 break; 525 case MII_ANAR: 526 re8139_reg = RL_ANAR; 527 break; 528 case MII_ANER: 529 re8139_reg = RL_ANER; 530 break; 531 case MII_ANLPAR: 532 re8139_reg = RL_LPAR; 533 break; 534 case MII_PHYIDR1: 535 case MII_PHYIDR2: 536 return (0); 537 /* 538 * Allow the rlphy driver to read the media status 539 * register. If we have a link partner which does not 540 * support NWAY, this is the register which will tell 541 * us the results of parallel detection. 542 */ 543 case RL_MEDIASTAT: 544 rval = CSR_READ_1(sc, RL_MEDIASTAT); 545 return (rval); 546 default: 547 device_printf(sc->rl_dev, "bad phy register\n"); 548 return (0); 549 } 550 rval = CSR_READ_2(sc, re8139_reg); 551 if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) { 552 /* 8139C+ has different bit layout. */ 553 rval &= ~(BMCR_LOOP | BMCR_ISO); 554 } 555 return (rval); 556} 557 558static int 559re_miibus_writereg(device_t dev, int phy, int reg, int data) 560{ 561 struct rl_softc *sc; 562 u_int16_t re8139_reg = 0; 563 int rval = 0; 564 565 sc = device_get_softc(dev); 566 567 if (sc->rl_type == RL_8169) { 568 rval = re_gmii_writereg(dev, phy, reg, data); 569 return (rval); 570 } 571 572 switch (reg) { 573 case MII_BMCR: 574 re8139_reg = RL_BMCR; 575 if (sc->rl_type == RL_8139CPLUS) { 576 /* 8139C+ has different bit layout. */ 577 data &= ~(BMCR_LOOP | BMCR_ISO); 578 } 579 break; 580 case MII_BMSR: 581 re8139_reg = RL_BMSR; 582 break; 583 case MII_ANAR: 584 re8139_reg = RL_ANAR; 585 break; 586 case MII_ANER: 587 re8139_reg = RL_ANER; 588 break; 589 case MII_ANLPAR: 590 re8139_reg = RL_LPAR; 591 break; 592 case MII_PHYIDR1: 593 case MII_PHYIDR2: 594 return (0); 595 break; 596 default: 597 device_printf(sc->rl_dev, "bad phy register\n"); 598 return (0); 599 } 600 CSR_WRITE_2(sc, re8139_reg, data); 601 return (0); 602} 603 604static void 605re_miibus_statchg(device_t dev) 606{ 607 struct rl_softc *sc; 608 struct ifnet *ifp; 609 struct mii_data *mii; 610 611 sc = device_get_softc(dev); 612 mii = device_get_softc(sc->rl_miibus); 613 ifp = sc->rl_ifp; 614 if (mii == NULL || ifp == NULL || 615 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 616 return; 617 618 sc->rl_flags &= ~RL_FLAG_LINK; 619 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 620 (IFM_ACTIVE | IFM_AVALID)) { 621 switch (IFM_SUBTYPE(mii->mii_media_active)) { 622 case IFM_10_T: 623 case IFM_100_TX: 624 sc->rl_flags |= RL_FLAG_LINK; 625 break; 626 case IFM_1000_T: 627 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) 628 break; 629 sc->rl_flags |= RL_FLAG_LINK; 630 break; 631 default: 632 break; 633 } 634 } 635 /* 636 * RealTek controllers does not provide any interface to 637 * Tx/Rx MACs for resolved speed, duplex and flow-control 638 * parameters. 639 */ 640} 641 642/* 643 * Set the RX configuration and 64-bit multicast hash filter. 644 */ 645static void 646re_set_rxmode(struct rl_softc *sc) 647{ 648 struct ifnet *ifp; 649 struct ifmultiaddr *ifma; 650 uint32_t hashes[2] = { 0, 0 }; 651 uint32_t h, rxfilt; 652 653 RL_LOCK_ASSERT(sc); 654 655 ifp = sc->rl_ifp; 656 657 rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; 658 if ((sc->rl_flags & RL_FLAG_EARLYOFF) != 0) 659 rxfilt |= RL_RXCFG_EARLYOFF; 660 else if ((sc->rl_flags & RL_FLAG_EARLYOFFV2) != 0) 661 rxfilt |= RL_RXCFG_EARLYOFFV2; 662 663 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 664 if (ifp->if_flags & IFF_PROMISC) 665 rxfilt |= RL_RXCFG_RX_ALLPHYS; 666 /* 667 * Unlike other hardwares, we have to explicitly set 668 * RL_RXCFG_RX_MULTI to receive multicast frames in 669 * promiscuous mode. 670 */ 671 rxfilt |= RL_RXCFG_RX_MULTI; 672 hashes[0] = hashes[1] = 0xffffffff; 673 goto done; 674 } 675 676 if_maddr_rlock(ifp); 677 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 678 if (ifma->ifma_addr->sa_family != AF_LINK) 679 continue; 680 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 681 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 682 if (h < 32) 683 hashes[0] |= (1 << h); 684 else 685 hashes[1] |= (1 << (h - 32)); 686 } 687 if_maddr_runlock(ifp); 688 689 if (hashes[0] != 0 || hashes[1] != 0) { 690 /* 691 * For some unfathomable reason, RealTek decided to 692 * reverse the order of the multicast hash registers 693 * in the PCI Express parts. This means we have to 694 * write the hash pattern in reverse order for those 695 * devices. 696 */ 697 if ((sc->rl_flags & RL_FLAG_PCIE) != 0) { 698 h = bswap32(hashes[0]); 699 hashes[0] = bswap32(hashes[1]); 700 hashes[1] = h; 701 } 702 rxfilt |= RL_RXCFG_RX_MULTI; 703 } 704 705 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168F) { 706 /* Disable multicast filtering due to silicon bug. */ 707 hashes[0] = 0xffffffff; 708 hashes[1] = 0xffffffff; 709 } 710 711done: 712 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 713 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 714 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 715} 716 717static void 718re_reset(struct rl_softc *sc) 719{ 720 int i; 721 722 RL_LOCK_ASSERT(sc); 723 724 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 725 726 for (i = 0; i < RL_TIMEOUT; i++) { 727 DELAY(10); 728 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 729 break; 730 } 731 if (i == RL_TIMEOUT) 732 device_printf(sc->rl_dev, "reset never completed!\n"); 733 734 if ((sc->rl_flags & RL_FLAG_MACRESET) != 0) 735 CSR_WRITE_1(sc, 0x82, 1); 736 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S) 737 re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0); 738} 739 740#ifdef RE_DIAG 741 742/* 743 * The following routine is designed to test for a defect on some 744 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# 745 * lines connected to the bus, however for a 32-bit only card, they 746 * should be pulled high. The result of this defect is that the 747 * NIC will not work right if you plug it into a 64-bit slot: DMA 748 * operations will be done with 64-bit transfers, which will fail 749 * because the 64-bit data lines aren't connected. 750 * 751 * There's no way to work around this (short of talking a soldering 752 * iron to the board), however we can detect it. The method we use 753 * here is to put the NIC into digital loopback mode, set the receiver 754 * to promiscuous mode, and then try to send a frame. We then compare 755 * the frame data we sent to what was received. If the data matches, 756 * then the NIC is working correctly, otherwise we know the user has 757 * a defective NIC which has been mistakenly plugged into a 64-bit PCI 758 * slot. In the latter case, there's no way the NIC can work correctly, 759 * so we print out a message on the console and abort the device attach. 760 */ 761 762static int 763re_diag(struct rl_softc *sc) 764{ 765 struct ifnet *ifp = sc->rl_ifp; 766 struct mbuf *m0; 767 struct ether_header *eh; 768 struct rl_desc *cur_rx; 769 u_int16_t status; 770 u_int32_t rxstat; 771 int total_len, i, error = 0, phyaddr; 772 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; 773 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; 774 775 /* Allocate a single mbuf */ 776 MGETHDR(m0, M_NOWAIT, MT_DATA); 777 if (m0 == NULL) 778 return (ENOBUFS); 779 780 RL_LOCK(sc); 781 782 /* 783 * Initialize the NIC in test mode. This sets the chip up 784 * so that it can send and receive frames, but performs the 785 * following special functions: 786 * - Puts receiver in promiscuous mode 787 * - Enables digital loopback mode 788 * - Leaves interrupts turned off 789 */ 790 791 ifp->if_flags |= IFF_PROMISC; 792 sc->rl_testmode = 1; 793 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 794 re_init_locked(sc); 795 sc->rl_flags |= RL_FLAG_LINK; 796 if (sc->rl_type == RL_8169) 797 phyaddr = 1; 798 else 799 phyaddr = 0; 800 801 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET); 802 for (i = 0; i < RL_TIMEOUT; i++) { 803 status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR); 804 if (!(status & BMCR_RESET)) 805 break; 806 } 807 808 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP); 809 CSR_WRITE_2(sc, RL_ISR, RL_INTRS); 810 811 DELAY(100000); 812 813 /* Put some data in the mbuf */ 814 815 eh = mtod(m0, struct ether_header *); 816 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); 817 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); 818 eh->ether_type = htons(ETHERTYPE_IP); 819 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; 820 821 /* 822 * Queue the packet, start transmission. 823 * Note: IF_HANDOFF() ultimately calls re_start() for us. 824 */ 825 826 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 827 RL_UNLOCK(sc); 828 /* XXX: re_diag must not be called when in ALTQ mode */ 829 IF_HANDOFF(&ifp->if_snd, m0, ifp); 830 RL_LOCK(sc); 831 m0 = NULL; 832 833 /* Wait for it to propagate through the chip */ 834 835 DELAY(100000); 836 for (i = 0; i < RL_TIMEOUT; i++) { 837 status = CSR_READ_2(sc, RL_ISR); 838 CSR_WRITE_2(sc, RL_ISR, status); 839 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == 840 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) 841 break; 842 DELAY(10); 843 } 844 845 if (i == RL_TIMEOUT) { 846 device_printf(sc->rl_dev, 847 "diagnostic failed, failed to receive packet in" 848 " loopback mode\n"); 849 error = EIO; 850 goto done; 851 } 852 853 /* 854 * The packet should have been dumped into the first 855 * entry in the RX DMA ring. Grab it from there. 856 */ 857 858 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 859 sc->rl_ldata.rl_rx_list_map, 860 BUS_DMASYNC_POSTREAD); 861 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, 862 sc->rl_ldata.rl_rx_desc[0].rx_dmamap, 863 BUS_DMASYNC_POSTREAD); 864 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, 865 sc->rl_ldata.rl_rx_desc[0].rx_dmamap); 866 867 m0 = sc->rl_ldata.rl_rx_desc[0].rx_m; 868 sc->rl_ldata.rl_rx_desc[0].rx_m = NULL; 869 eh = mtod(m0, struct ether_header *); 870 871 cur_rx = &sc->rl_ldata.rl_rx_list[0]; 872 total_len = RL_RXBYTES(cur_rx); 873 rxstat = le32toh(cur_rx->rl_cmdstat); 874 875 if (total_len != ETHER_MIN_LEN) { 876 device_printf(sc->rl_dev, 877 "diagnostic failed, received short packet\n"); 878 error = EIO; 879 goto done; 880 } 881 882 /* Test that the received packet data matches what we sent. */ 883 884 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || 885 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || 886 ntohs(eh->ether_type) != ETHERTYPE_IP) { 887 device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n"); 888 device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n", 889 dst, ":", src, ":", ETHERTYPE_IP); 890 device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n", 891 eh->ether_dhost, ":", eh->ether_shost, ":", 892 ntohs(eh->ether_type)); 893 device_printf(sc->rl_dev, "You may have a defective 32-bit " 894 "NIC plugged into a 64-bit PCI slot.\n"); 895 device_printf(sc->rl_dev, "Please re-install the NIC in a " 896 "32-bit slot for proper operation.\n"); 897 device_printf(sc->rl_dev, "Read the re(4) man page for more " 898 "details.\n"); 899 error = EIO; 900 } 901 902done: 903 /* Turn interface off, release resources */ 904 905 sc->rl_testmode = 0; 906 sc->rl_flags &= ~RL_FLAG_LINK; 907 ifp->if_flags &= ~IFF_PROMISC; 908 re_stop(sc); 909 if (m0 != NULL) 910 m_freem(m0); 911 912 RL_UNLOCK(sc); 913 914 return (error); 915} 916 917#endif 918 919/* 920 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device 921 * IDs against our list and return a device name if we find a match. 922 */ 923static int 924re_probe(device_t dev) 925{ 926 const struct rl_type *t; 927 uint16_t devid, vendor; 928 uint16_t revid, sdevid; 929 int i; 930 931 vendor = pci_get_vendor(dev); 932 devid = pci_get_device(dev); 933 revid = pci_get_revid(dev); 934 sdevid = pci_get_subdevice(dev); 935 936 if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) { 937 if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) { 938 /* 939 * Only attach to rev. 3 of the Linksys EG1032 adapter. 940 * Rev. 2 is supported by sk(4). 941 */ 942 return (ENXIO); 943 } 944 } 945 946 if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) { 947 if (revid != 0x20) { 948 /* 8139, let rl(4) take care of this device. */ 949 return (ENXIO); 950 } 951 } 952 953 t = re_devs; 954 for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) { 955 if (vendor == t->rl_vid && devid == t->rl_did) { 956 device_set_desc(dev, t->rl_name); 957 return (BUS_PROBE_DEFAULT); 958 } 959 } 960 961 return (ENXIO); 962} 963 964/* 965 * Map a single buffer address. 966 */ 967 968static void 969re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 970{ 971 bus_addr_t *addr; 972 973 if (error) 974 return; 975 976 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 977 addr = arg; 978 *addr = segs->ds_addr; 979} 980 981static int 982re_allocmem(device_t dev, struct rl_softc *sc) 983{ 984 bus_addr_t lowaddr; 985 bus_size_t rx_list_size, tx_list_size; 986 int error; 987 int i; 988 989 rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc); 990 tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc); 991 992 /* 993 * Allocate the parent bus DMA tag appropriate for PCI. 994 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD 995 * register should be set. However some RealTek chips are known 996 * to be buggy on DAC handling, therefore disable DAC by limiting 997 * DMA address space to 32bit. PCIe variants of RealTek chips 998 * may not have the limitation. 999 */ 1000 lowaddr = BUS_SPACE_MAXADDR; 1001 if ((sc->rl_flags & RL_FLAG_PCIE) == 0) 1002 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1003 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, 1004 lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, 1005 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 1006 NULL, NULL, &sc->rl_parent_tag); 1007 if (error) { 1008 device_printf(dev, "could not allocate parent DMA tag\n"); 1009 return (error); 1010 } 1011 1012 /* 1013 * Allocate map for TX mbufs. 1014 */ 1015 error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0, 1016 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1017 NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0, 1018 NULL, NULL, &sc->rl_ldata.rl_tx_mtag); 1019 if (error) { 1020 device_printf(dev, "could not allocate TX DMA tag\n"); 1021 return (error); 1022 } 1023 1024 /* 1025 * Allocate map for RX mbufs. 1026 */ 1027 1028 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 1029 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 1030 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1031 MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL, 1032 &sc->rl_ldata.rl_jrx_mtag); 1033 if (error) { 1034 device_printf(dev, 1035 "could not allocate jumbo RX DMA tag\n"); 1036 return (error); 1037 } 1038 } 1039 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, 1040 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1041 MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag); 1042 if (error) { 1043 device_printf(dev, "could not allocate RX DMA tag\n"); 1044 return (error); 1045 } 1046 1047 /* 1048 * Allocate map for TX descriptor list. 1049 */ 1050 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1051 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1052 NULL, tx_list_size, 1, tx_list_size, 0, 1053 NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); 1054 if (error) { 1055 device_printf(dev, "could not allocate TX DMA ring tag\n"); 1056 return (error); 1057 } 1058 1059 /* Allocate DMA'able memory for the TX ring */ 1060 1061 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, 1062 (void **)&sc->rl_ldata.rl_tx_list, 1063 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1064 &sc->rl_ldata.rl_tx_list_map); 1065 if (error) { 1066 device_printf(dev, "could not allocate TX DMA ring\n"); 1067 return (error); 1068 } 1069 1070 /* Load the map for the TX ring. */ 1071 1072 sc->rl_ldata.rl_tx_list_addr = 0; 1073 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, 1074 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 1075 tx_list_size, re_dma_map_addr, 1076 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); 1077 if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) { 1078 device_printf(dev, "could not load TX DMA ring\n"); 1079 return (ENOMEM); 1080 } 1081 1082 /* Create DMA maps for TX buffers */ 1083 1084 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1085 error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0, 1086 &sc->rl_ldata.rl_tx_desc[i].tx_dmamap); 1087 if (error) { 1088 device_printf(dev, "could not create DMA map for TX\n"); 1089 return (error); 1090 } 1091 } 1092 1093 /* 1094 * Allocate map for RX descriptor list. 1095 */ 1096 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1097 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1098 NULL, rx_list_size, 1, rx_list_size, 0, 1099 NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); 1100 if (error) { 1101 device_printf(dev, "could not create RX DMA ring tag\n"); 1102 return (error); 1103 } 1104 1105 /* Allocate DMA'able memory for the RX ring */ 1106 1107 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, 1108 (void **)&sc->rl_ldata.rl_rx_list, 1109 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1110 &sc->rl_ldata.rl_rx_list_map); 1111 if (error) { 1112 device_printf(dev, "could not allocate RX DMA ring\n"); 1113 return (error); 1114 } 1115 1116 /* Load the map for the RX ring. */ 1117 1118 sc->rl_ldata.rl_rx_list_addr = 0; 1119 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, 1120 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1121 rx_list_size, re_dma_map_addr, 1122 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); 1123 if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) { 1124 device_printf(dev, "could not load RX DMA ring\n"); 1125 return (ENOMEM); 1126 } 1127 1128 /* Create DMA maps for RX buffers */ 1129 1130 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 1131 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, 1132 &sc->rl_ldata.rl_jrx_sparemap); 1133 if (error) { 1134 device_printf(dev, 1135 "could not create spare DMA map for jumbo RX\n"); 1136 return (error); 1137 } 1138 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1139 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, 1140 &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); 1141 if (error) { 1142 device_printf(dev, 1143 "could not create DMA map for jumbo RX\n"); 1144 return (error); 1145 } 1146 } 1147 } 1148 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, 1149 &sc->rl_ldata.rl_rx_sparemap); 1150 if (error) { 1151 device_printf(dev, "could not create spare DMA map for RX\n"); 1152 return (error); 1153 } 1154 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1155 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, 1156 &sc->rl_ldata.rl_rx_desc[i].rx_dmamap); 1157 if (error) { 1158 device_printf(dev, "could not create DMA map for RX\n"); 1159 return (error); 1160 } 1161 } 1162 1163 /* Create DMA map for statistics. */ 1164 error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0, 1165 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1166 sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL, 1167 &sc->rl_ldata.rl_stag); 1168 if (error) { 1169 device_printf(dev, "could not create statistics DMA tag\n"); 1170 return (error); 1171 } 1172 /* Allocate DMA'able memory for statistics. */ 1173 error = bus_dmamem_alloc(sc->rl_ldata.rl_stag, 1174 (void **)&sc->rl_ldata.rl_stats, 1175 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1176 &sc->rl_ldata.rl_smap); 1177 if (error) { 1178 device_printf(dev, 1179 "could not allocate statistics DMA memory\n"); 1180 return (error); 1181 } 1182 /* Load the map for statistics. */ 1183 sc->rl_ldata.rl_stats_addr = 0; 1184 error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, 1185 sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr, 1186 &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT); 1187 if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) { 1188 device_printf(dev, "could not load statistics DMA memory\n"); 1189 return (ENOMEM); 1190 } 1191 1192 return (0); 1193} 1194 1195/* 1196 * Attach the interface. Allocate softc structures, do ifmedia 1197 * setup and ethernet/BPF attach. 1198 */ 1199static int 1200re_attach(device_t dev) 1201{ 1202 u_char eaddr[ETHER_ADDR_LEN]; 1203 u_int16_t as[ETHER_ADDR_LEN / 2]; 1204 struct rl_softc *sc; 1205 struct ifnet *ifp; 1206 const struct rl_hwrev *hw_rev; 1207 u_int32_t cap, ctl; 1208 int hwrev; 1209 u_int16_t devid, re_did = 0; 1210 int error = 0, i, phy, rid; 1211 int msic, msixc, reg; 1212 uint8_t cfg; 1213 1214 sc = device_get_softc(dev); 1215 sc->rl_dev = dev; 1216 1217 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1218 MTX_DEF); 1219 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); 1220 1221 /* 1222 * Map control/status registers. 1223 */ 1224 pci_enable_busmaster(dev); 1225 1226 devid = pci_get_device(dev); 1227 /* 1228 * Prefer memory space register mapping over IO space. 1229 * Because RTL8169SC does not seem to work when memory mapping 1230 * is used always activate io mapping. 1231 */ 1232 if (devid == RT_DEVICEID_8169SC) 1233 prefer_iomap = 1; 1234 if (prefer_iomap == 0) { 1235 sc->rl_res_id = PCIR_BAR(1); 1236 sc->rl_res_type = SYS_RES_MEMORY; 1237 /* RTL8168/8101E seems to use different BARs. */ 1238 if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E) 1239 sc->rl_res_id = PCIR_BAR(2); 1240 } else { 1241 sc->rl_res_id = PCIR_BAR(0); 1242 sc->rl_res_type = SYS_RES_IOPORT; 1243 } 1244 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 1245 &sc->rl_res_id, RF_ACTIVE); 1246 if (sc->rl_res == NULL && prefer_iomap == 0) { 1247 sc->rl_res_id = PCIR_BAR(0); 1248 sc->rl_res_type = SYS_RES_IOPORT; 1249 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 1250 &sc->rl_res_id, RF_ACTIVE); 1251 } 1252 if (sc->rl_res == NULL) { 1253 device_printf(dev, "couldn't map ports/memory\n"); 1254 error = ENXIO; 1255 goto fail; 1256 } 1257 1258 sc->rl_btag = rman_get_bustag(sc->rl_res); 1259 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 1260 1261 msic = pci_msi_count(dev); 1262 msixc = pci_msix_count(dev); 1263 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { 1264 sc->rl_flags |= RL_FLAG_PCIE; 1265 sc->rl_expcap = reg; 1266 } 1267 if (bootverbose) { 1268 device_printf(dev, "MSI count : %d\n", msic); 1269 device_printf(dev, "MSI-X count : %d\n", msixc); 1270 } 1271 if (msix_disable > 0) 1272 msixc = 0; 1273 if (msi_disable > 0) 1274 msic = 0; 1275 /* Prefer MSI-X to MSI. */ 1276 if (msixc > 0) { 1277 msixc = RL_MSI_MESSAGES; 1278 rid = PCIR_BAR(4); 1279 sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1280 &rid, RF_ACTIVE); 1281 if (sc->rl_res_pba == NULL) { 1282 device_printf(sc->rl_dev, 1283 "could not allocate MSI-X PBA resource\n"); 1284 } 1285 if (sc->rl_res_pba != NULL && 1286 pci_alloc_msix(dev, &msixc) == 0) { 1287 if (msixc == RL_MSI_MESSAGES) { 1288 device_printf(dev, "Using %d MSI-X message\n", 1289 msixc); 1290 sc->rl_flags |= RL_FLAG_MSIX; 1291 } else 1292 pci_release_msi(dev); 1293 } 1294 if ((sc->rl_flags & RL_FLAG_MSIX) == 0) { 1295 if (sc->rl_res_pba != NULL) 1296 bus_release_resource(dev, SYS_RES_MEMORY, rid, 1297 sc->rl_res_pba); 1298 sc->rl_res_pba = NULL; 1299 msixc = 0; 1300 } 1301 } 1302 /* Prefer MSI to INTx. */ 1303 if (msixc == 0 && msic > 0) { 1304 msic = RL_MSI_MESSAGES; 1305 if (pci_alloc_msi(dev, &msic) == 0) { 1306 if (msic == RL_MSI_MESSAGES) { 1307 device_printf(dev, "Using %d MSI message\n", 1308 msic); 1309 sc->rl_flags |= RL_FLAG_MSI; 1310 /* Explicitly set MSI enable bit. */ 1311 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1312 cfg = CSR_READ_1(sc, RL_CFG2); 1313 cfg |= RL_CFG2_MSI; 1314 CSR_WRITE_1(sc, RL_CFG2, cfg); 1315 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1316 } else 1317 pci_release_msi(dev); 1318 } 1319 if ((sc->rl_flags & RL_FLAG_MSI) == 0) 1320 msic = 0; 1321 } 1322 1323 /* Allocate interrupt */ 1324 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) { 1325 rid = 0; 1326 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1327 RF_SHAREABLE | RF_ACTIVE); 1328 if (sc->rl_irq[0] == NULL) { 1329 device_printf(dev, "couldn't allocate IRQ resources\n"); 1330 error = ENXIO; 1331 goto fail; 1332 } 1333 } else { 1334 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { 1335 sc->rl_irq[i] = bus_alloc_resource_any(dev, 1336 SYS_RES_IRQ, &rid, RF_ACTIVE); 1337 if (sc->rl_irq[i] == NULL) { 1338 device_printf(dev, 1339 "couldn't allocate IRQ resources for " 1340 "message %d\n", rid); 1341 error = ENXIO; 1342 goto fail; 1343 } 1344 } 1345 } 1346 1347 if ((sc->rl_flags & RL_FLAG_MSI) == 0) { 1348 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1349 cfg = CSR_READ_1(sc, RL_CFG2); 1350 if ((cfg & RL_CFG2_MSI) != 0) { 1351 device_printf(dev, "turning off MSI enable bit.\n"); 1352 cfg &= ~RL_CFG2_MSI; 1353 CSR_WRITE_1(sc, RL_CFG2, cfg); 1354 } 1355 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1356 } 1357 1358 /* Disable ASPM L0S/L1. */ 1359 if (sc->rl_expcap != 0) { 1360 cap = pci_read_config(dev, sc->rl_expcap + 1361 PCIER_LINK_CAP, 2); 1362 if ((cap & PCIEM_LINK_CAP_ASPM) != 0) { 1363 ctl = pci_read_config(dev, sc->rl_expcap + 1364 PCIER_LINK_CTL, 2); 1365 if ((ctl & PCIEM_LINK_CTL_ASPMC) != 0) { 1366 ctl &= ~PCIEM_LINK_CTL_ASPMC; 1367 pci_write_config(dev, sc->rl_expcap + 1368 PCIER_LINK_CTL, ctl, 2); 1369 device_printf(dev, "ASPM disabled\n"); 1370 } 1371 } else 1372 device_printf(dev, "no ASPM capability\n"); 1373 } 1374 1375 hw_rev = re_hwrevs; 1376 hwrev = CSR_READ_4(sc, RL_TXCFG); 1377 switch (hwrev & 0x70000000) { 1378 case 0x00000000: 1379 case 0x10000000: 1380 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000); 1381 hwrev &= (RL_TXCFG_HWREV | 0x80000000); 1382 break; 1383 default: 1384 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000); 1385 sc->rl_macrev = hwrev & 0x00700000; 1386 hwrev &= RL_TXCFG_HWREV; 1387 break; 1388 } 1389 device_printf(dev, "MAC rev. 0x%08x\n", sc->rl_macrev); 1390 while (hw_rev->rl_desc != NULL) { 1391 if (hw_rev->rl_rev == hwrev) { 1392 sc->rl_type = hw_rev->rl_type; 1393 sc->rl_hwrev = hw_rev; 1394 break; 1395 } 1396 hw_rev++; 1397 } 1398 if (hw_rev->rl_desc == NULL) { 1399 device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev); 1400 error = ENXIO; 1401 goto fail; 1402 } 1403 1404 switch (hw_rev->rl_rev) { 1405 case RL_HWREV_8139CPLUS: 1406 sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; 1407 break; 1408 case RL_HWREV_8100E: 1409 case RL_HWREV_8101E: 1410 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; 1411 break; 1412 case RL_HWREV_8102E: 1413 case RL_HWREV_8102EL: 1414 case RL_HWREV_8102EL_SPIN1: 1415 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | 1416 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | 1417 RL_FLAG_AUTOPAD; 1418 break; 1419 case RL_HWREV_8103E: 1420 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | 1421 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | 1422 RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP; 1423 break; 1424 case RL_HWREV_8401E: 1425 case RL_HWREV_8105E: 1426 case RL_HWREV_8105E_SPIN1: 1427 case RL_HWREV_8106E: 1428 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1429 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1430 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 1431 break; 1432 case RL_HWREV_8402: 1433 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1434 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1435 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | 1436 RL_FLAG_CMDSTOP_WAIT_TXQ; 1437 break; 1438 case RL_HWREV_8168B_SPIN1: 1439 case RL_HWREV_8168B_SPIN2: 1440 sc->rl_flags |= RL_FLAG_WOLRXENB; 1441 /* FALLTHROUGH */ 1442 case RL_HWREV_8168B_SPIN3: 1443 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; 1444 break; 1445 case RL_HWREV_8168C_SPIN2: 1446 sc->rl_flags |= RL_FLAG_MACSLEEP; 1447 /* FALLTHROUGH */ 1448 case RL_HWREV_8168C: 1449 if (sc->rl_macrev == 0x00200000) 1450 sc->rl_flags |= RL_FLAG_MACSLEEP; 1451 /* FALLTHROUGH */ 1452 case RL_HWREV_8168CP: 1453 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1454 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 1455 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 1456 break; 1457 case RL_HWREV_8168D: 1458 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1459 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1460 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 1461 RL_FLAG_WOL_MANLINK; 1462 break; 1463 case RL_HWREV_8168DP: 1464 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1465 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD | 1466 RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK; 1467 break; 1468 case RL_HWREV_8168E: 1469 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1470 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1471 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 1472 RL_FLAG_WOL_MANLINK; 1473 break; 1474 case RL_HWREV_8168E_VL: 1475 case RL_HWREV_8168F: 1476 sc->rl_flags |= RL_FLAG_EARLYOFF; 1477 /* FALLTHROUGH */ 1478 case RL_HWREV_8411: 1479 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1480 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 1481 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 1482 RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK; 1483 break; 1484 case RL_HWREV_8168EP: 1485 case RL_HWREV_8168G: 1486 case RL_HWREV_8411B: 1487 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1488 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 1489 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 1490 RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK | 1491 RL_FLAG_EARLYOFFV2 | RL_FLAG_RXDV_GATED; 1492 break; 1493 case RL_HWREV_8168GU: 1494 if (pci_get_device(dev) == RT_DEVICEID_8101E) { 1495 /* RTL8106EUS */ 1496 sc->rl_flags |= RL_FLAG_FASTETHER; 1497 } else 1498 sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 1499 1500 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1501 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 1502 RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ | 1503 RL_FLAG_EARLYOFFV2 | RL_FLAG_RXDV_GATED; 1504 break; 1505 case RL_HWREV_8169_8110SB: 1506 case RL_HWREV_8169_8110SBL: 1507 case RL_HWREV_8169_8110SC: 1508 case RL_HWREV_8169_8110SCE: 1509 sc->rl_flags |= RL_FLAG_PHYWAKE; 1510 /* FALLTHROUGH */ 1511 case RL_HWREV_8169: 1512 case RL_HWREV_8169S: 1513 case RL_HWREV_8110S: 1514 sc->rl_flags |= RL_FLAG_MACRESET; 1515 break; 1516 default: 1517 break; 1518 } 1519 1520 if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) { 1521 sc->rl_cfg0 = RL_8139_CFG0; 1522 sc->rl_cfg1 = RL_8139_CFG1; 1523 sc->rl_cfg2 = 0; 1524 sc->rl_cfg3 = RL_8139_CFG3; 1525 sc->rl_cfg4 = RL_8139_CFG4; 1526 sc->rl_cfg5 = RL_8139_CFG5; 1527 } else { 1528 sc->rl_cfg0 = RL_CFG0; 1529 sc->rl_cfg1 = RL_CFG1; 1530 sc->rl_cfg2 = RL_CFG2; 1531 sc->rl_cfg3 = RL_CFG3; 1532 sc->rl_cfg4 = RL_CFG4; 1533 sc->rl_cfg5 = RL_CFG5; 1534 } 1535 1536 /* Reset the adapter. */ 1537 RL_LOCK(sc); 1538 re_reset(sc); 1539 RL_UNLOCK(sc); 1540 1541 /* Enable PME. */ 1542 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1543 cfg = CSR_READ_1(sc, sc->rl_cfg1); 1544 cfg |= RL_CFG1_PME; 1545 CSR_WRITE_1(sc, sc->rl_cfg1, cfg); 1546 cfg = CSR_READ_1(sc, sc->rl_cfg5); 1547 cfg &= RL_CFG5_PME_STS; 1548 CSR_WRITE_1(sc, sc->rl_cfg5, cfg); 1549 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1550 1551 if ((sc->rl_flags & RL_FLAG_PAR) != 0) { 1552 /* 1553 * XXX Should have a better way to extract station 1554 * address from EEPROM. 1555 */ 1556 for (i = 0; i < ETHER_ADDR_LEN; i++) 1557 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 1558 } else { 1559 sc->rl_eewidth = RL_9356_ADDR_LEN; 1560 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 1561 if (re_did != 0x8129) 1562 sc->rl_eewidth = RL_9346_ADDR_LEN; 1563 1564 /* 1565 * Get station address from the EEPROM. 1566 */ 1567 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 1568 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 1569 as[i] = le16toh(as[i]); 1570 bcopy(as, eaddr, ETHER_ADDR_LEN); 1571 } 1572 1573 if (sc->rl_type == RL_8169) { 1574 /* Set RX length mask and number of descriptors. */ 1575 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 1576 sc->rl_txstart = RL_GTXSTART; 1577 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; 1578 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; 1579 } else { 1580 /* Set RX length mask and number of descriptors. */ 1581 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 1582 sc->rl_txstart = RL_TXSTART; 1583 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; 1584 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; 1585 } 1586 1587 error = re_allocmem(dev, sc); 1588 if (error) 1589 goto fail; 1590 re_add_sysctls(sc); 1591 1592 ifp = sc->rl_ifp = if_alloc(IFT_ETHER); 1593 if (ifp == NULL) { 1594 device_printf(dev, "can not if_alloc()\n"); 1595 error = ENOSPC; 1596 goto fail; 1597 } 1598 1599 /* Take controller out of deep sleep mode. */ 1600 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 1601 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 1602 CSR_WRITE_1(sc, RL_GPIO, 1603 CSR_READ_1(sc, RL_GPIO) | 0x01); 1604 else 1605 CSR_WRITE_1(sc, RL_GPIO, 1606 CSR_READ_1(sc, RL_GPIO) & ~0x01); 1607 } 1608 1609 /* Take PHY out of power down mode. */ 1610 if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) { 1611 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); 1612 if (hw_rev->rl_rev == RL_HWREV_8401E) 1613 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); 1614 } 1615 if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) { 1616 re_gmii_writereg(dev, 1, 0x1f, 0); 1617 re_gmii_writereg(dev, 1, 0x0e, 0); 1618 } 1619 1620 ifp->if_softc = sc; 1621 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1622 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1623 ifp->if_ioctl = re_ioctl; 1624 ifp->if_start = re_start; 1625 /* 1626 * RTL8168/8111C generates wrong IP checksummed frame if the 1627 * packet has IP options so disable TX checksum offloading. 1628 */ 1629 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C || 1630 sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2 || 1631 sc->rl_hwrev->rl_rev == RL_HWREV_8168CP) { 1632 ifp->if_hwassist = 0; 1633 ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TSO4; 1634 } else { 1635 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; 1636 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; 1637 } 1638 ifp->if_hwassist |= CSUM_TSO; 1639 ifp->if_capenable = ifp->if_capabilities; 1640 ifp->if_init = re_init; 1641 IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN); 1642 ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; 1643 IFQ_SET_READY(&ifp->if_snd); 1644 1645 TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); 1646 1647#define RE_PHYAD_INTERNAL 0 1648 1649 /* Do MII setup. */ 1650 phy = RE_PHYAD_INTERNAL; 1651 if (sc->rl_type == RL_8169) 1652 phy = 1; 1653 error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd, 1654 re_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, MIIF_DOPAUSE); 1655 if (error != 0) { 1656 device_printf(dev, "attaching PHYs failed\n"); 1657 goto fail; 1658 } 1659 1660 /* 1661 * Call MI attach routine. 1662 */ 1663 ether_ifattach(ifp, eaddr); 1664 1665 /* VLAN capability setup */ 1666 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 1667 if (ifp->if_capabilities & IFCAP_HWCSUM) 1668 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1669 /* Enable WOL if PM is supported. */ 1670 if (pci_find_cap(sc->rl_dev, PCIY_PMG, ®) == 0) 1671 ifp->if_capabilities |= IFCAP_WOL; 1672 ifp->if_capenable = ifp->if_capabilities; 1673 ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST); 1674 /* 1675 * Don't enable TSO by default. It is known to generate 1676 * corrupted TCP segments(bad TCP options) under certain 1677 * circumstances. 1678 */ 1679 ifp->if_hwassist &= ~CSUM_TSO; 1680 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO); 1681#ifdef DEVICE_POLLING 1682 ifp->if_capabilities |= IFCAP_POLLING; 1683#endif 1684 /* 1685 * Tell the upper layer(s) we support long frames. 1686 * Must appear after the call to ether_ifattach() because 1687 * ether_ifattach() sets ifi_hdrlen to the default value. 1688 */ 1689 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1690 1691#ifdef DEV_NETMAP 1692 re_netmap_attach(sc); 1693#endif /* DEV_NETMAP */ 1694#ifdef RE_DIAG 1695 /* 1696 * Perform hardware diagnostic on the original RTL8169. 1697 * Some 32-bit cards were incorrectly wired and would 1698 * malfunction if plugged into a 64-bit slot. 1699 */ 1700 1701 if (hwrev == RL_HWREV_8169) { 1702 error = re_diag(sc); 1703 if (error) { 1704 device_printf(dev, 1705 "attach aborted due to hardware diag failure\n"); 1706 ether_ifdetach(ifp); 1707 goto fail; 1708 } 1709 } 1710#endif 1711 1712#ifdef RE_TX_MODERATION 1713 intr_filter = 1; 1714#endif 1715 /* Hook interrupt last to avoid having to lock softc */ 1716 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && 1717 intr_filter == 0) { 1718 error = bus_setup_intr(dev, sc->rl_irq[0], 1719 INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc, 1720 &sc->rl_intrhand[0]); 1721 } else { 1722 error = bus_setup_intr(dev, sc->rl_irq[0], 1723 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, 1724 &sc->rl_intrhand[0]); 1725 } 1726 if (error) { 1727 device_printf(dev, "couldn't set up irq\n"); 1728 ether_ifdetach(ifp); 1729 } 1730 1731fail: 1732 1733 if (error) 1734 re_detach(dev); 1735 1736 return (error); 1737} 1738 1739/* 1740 * Shutdown hardware and free up resources. This can be called any 1741 * time after the mutex has been initialized. It is called in both 1742 * the error case in attach and the normal detach case so it needs 1743 * to be careful about only freeing resources that have actually been 1744 * allocated. 1745 */ 1746static int 1747re_detach(device_t dev) 1748{ 1749 struct rl_softc *sc; 1750 struct ifnet *ifp; 1751 int i, rid; 1752 1753 sc = device_get_softc(dev); 1754 ifp = sc->rl_ifp; 1755 KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); 1756 1757 /* These should only be active if attach succeeded */ 1758 if (device_is_attached(dev)) { 1759#ifdef DEVICE_POLLING 1760 if (ifp->if_capenable & IFCAP_POLLING) 1761 ether_poll_deregister(ifp); 1762#endif 1763 RL_LOCK(sc); 1764#if 0 1765 sc->suspended = 1; 1766#endif 1767 re_stop(sc); 1768 RL_UNLOCK(sc); 1769 callout_drain(&sc->rl_stat_callout); 1770 taskqueue_drain(taskqueue_fast, &sc->rl_inttask); 1771 /* 1772 * Force off the IFF_UP flag here, in case someone 1773 * still had a BPF descriptor attached to this 1774 * interface. If they do, ether_ifdetach() will cause 1775 * the BPF code to try and clear the promisc mode 1776 * flag, which will bubble down to re_ioctl(), 1777 * which will try to call re_init() again. This will 1778 * turn the NIC back on and restart the MII ticker, 1779 * which will panic the system when the kernel tries 1780 * to invoke the re_tick() function that isn't there 1781 * anymore. 1782 */ 1783 ifp->if_flags &= ~IFF_UP; 1784 ether_ifdetach(ifp); 1785 } 1786 if (sc->rl_miibus) 1787 device_delete_child(dev, sc->rl_miibus); 1788 bus_generic_detach(dev); 1789 1790 /* 1791 * The rest is resource deallocation, so we should already be 1792 * stopped here. 1793 */ 1794 1795 if (sc->rl_intrhand[0] != NULL) { 1796 bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]); 1797 sc->rl_intrhand[0] = NULL; 1798 } 1799 if (ifp != NULL) { 1800#ifdef DEV_NETMAP 1801 netmap_detach(ifp); 1802#endif /* DEV_NETMAP */ 1803 if_free(ifp); 1804 } 1805 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) 1806 rid = 0; 1807 else 1808 rid = 1; 1809 if (sc->rl_irq[0] != NULL) { 1810 bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]); 1811 sc->rl_irq[0] = NULL; 1812 } 1813 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0) 1814 pci_release_msi(dev); 1815 if (sc->rl_res_pba) { 1816 rid = PCIR_BAR(4); 1817 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba); 1818 } 1819 if (sc->rl_res) 1820 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id, 1821 sc->rl_res); 1822 1823 /* Unload and free the RX DMA ring memory and map */ 1824 1825 if (sc->rl_ldata.rl_rx_list_tag) { 1826 if (sc->rl_ldata.rl_rx_list_map) 1827 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, 1828 sc->rl_ldata.rl_rx_list_map); 1829 if (sc->rl_ldata.rl_rx_list_map && sc->rl_ldata.rl_rx_list) 1830 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, 1831 sc->rl_ldata.rl_rx_list, 1832 sc->rl_ldata.rl_rx_list_map); 1833 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); 1834 } 1835 1836 /* Unload and free the TX DMA ring memory and map */ 1837 1838 if (sc->rl_ldata.rl_tx_list_tag) { 1839 if (sc->rl_ldata.rl_tx_list_map) 1840 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, 1841 sc->rl_ldata.rl_tx_list_map); 1842 if (sc->rl_ldata.rl_tx_list_map && sc->rl_ldata.rl_tx_list) 1843 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, 1844 sc->rl_ldata.rl_tx_list, 1845 sc->rl_ldata.rl_tx_list_map); 1846 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); 1847 } 1848 1849 /* Destroy all the RX and TX buffer maps */ 1850 1851 if (sc->rl_ldata.rl_tx_mtag) { 1852 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1853 if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap) 1854 bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag, 1855 sc->rl_ldata.rl_tx_desc[i].tx_dmamap); 1856 } 1857 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag); 1858 } 1859 if (sc->rl_ldata.rl_rx_mtag) { 1860 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1861 if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap) 1862 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, 1863 sc->rl_ldata.rl_rx_desc[i].rx_dmamap); 1864 } 1865 if (sc->rl_ldata.rl_rx_sparemap) 1866 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, 1867 sc->rl_ldata.rl_rx_sparemap); 1868 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag); 1869 } 1870 if (sc->rl_ldata.rl_jrx_mtag) { 1871 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1872 if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap) 1873 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, 1874 sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); 1875 } 1876 if (sc->rl_ldata.rl_jrx_sparemap) 1877 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, 1878 sc->rl_ldata.rl_jrx_sparemap); 1879 bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag); 1880 } 1881 /* Unload and free the stats buffer and map */ 1882 1883 if (sc->rl_ldata.rl_stag) { 1884 if (sc->rl_ldata.rl_smap) 1885 bus_dmamap_unload(sc->rl_ldata.rl_stag, 1886 sc->rl_ldata.rl_smap); 1887 if (sc->rl_ldata.rl_smap && sc->rl_ldata.rl_stats) 1888 bus_dmamem_free(sc->rl_ldata.rl_stag, 1889 sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap); 1890 bus_dma_tag_destroy(sc->rl_ldata.rl_stag); 1891 } 1892 1893 if (sc->rl_parent_tag) 1894 bus_dma_tag_destroy(sc->rl_parent_tag); 1895 1896 mtx_destroy(&sc->rl_mtx); 1897 1898 return (0); 1899} 1900 1901static __inline void 1902re_discard_rxbuf(struct rl_softc *sc, int idx) 1903{ 1904 struct rl_desc *desc; 1905 struct rl_rxdesc *rxd; 1906 uint32_t cmdstat; 1907 1908 if (sc->rl_ifp->if_mtu > RL_MTU && 1909 (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) 1910 rxd = &sc->rl_ldata.rl_jrx_desc[idx]; 1911 else 1912 rxd = &sc->rl_ldata.rl_rx_desc[idx]; 1913 desc = &sc->rl_ldata.rl_rx_list[idx]; 1914 desc->rl_vlanctl = 0; 1915 cmdstat = rxd->rx_size; 1916 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1917 cmdstat |= RL_RDESC_CMD_EOR; 1918 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1919} 1920 1921static int 1922re_newbuf(struct rl_softc *sc, int idx) 1923{ 1924 struct mbuf *m; 1925 struct rl_rxdesc *rxd; 1926 bus_dma_segment_t segs[1]; 1927 bus_dmamap_t map; 1928 struct rl_desc *desc; 1929 uint32_t cmdstat; 1930 int error, nsegs; 1931 1932 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1933 if (m == NULL) 1934 return (ENOBUFS); 1935 1936 m->m_len = m->m_pkthdr.len = MCLBYTES; 1937#ifdef RE_FIXUP_RX 1938 /* 1939 * This is part of an evil trick to deal with non-x86 platforms. 1940 * The RealTek chip requires RX buffers to be aligned on 64-bit 1941 * boundaries, but that will hose non-x86 machines. To get around 1942 * this, we leave some empty space at the start of each buffer 1943 * and for non-x86 hosts, we copy the buffer back six bytes 1944 * to achieve word alignment. This is slightly more efficient 1945 * than allocating a new buffer, copying the contents, and 1946 * discarding the old buffer. 1947 */ 1948 m_adj(m, RE_ETHER_ALIGN); 1949#endif 1950 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag, 1951 sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); 1952 if (error != 0) { 1953 m_freem(m); 1954 return (ENOBUFS); 1955 } 1956 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); 1957 1958 rxd = &sc->rl_ldata.rl_rx_desc[idx]; 1959 if (rxd->rx_m != NULL) { 1960 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, 1961 BUS_DMASYNC_POSTREAD); 1962 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); 1963 } 1964 1965 rxd->rx_m = m; 1966 map = rxd->rx_dmamap; 1967 rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap; 1968 rxd->rx_size = segs[0].ds_len; 1969 sc->rl_ldata.rl_rx_sparemap = map; 1970 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, 1971 BUS_DMASYNC_PREREAD); 1972 1973 desc = &sc->rl_ldata.rl_rx_list[idx]; 1974 desc->rl_vlanctl = 0; 1975 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); 1976 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); 1977 cmdstat = segs[0].ds_len; 1978 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1979 cmdstat |= RL_RDESC_CMD_EOR; 1980 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1981 1982 return (0); 1983} 1984 1985static int 1986re_jumbo_newbuf(struct rl_softc *sc, int idx) 1987{ 1988 struct mbuf *m; 1989 struct rl_rxdesc *rxd; 1990 bus_dma_segment_t segs[1]; 1991 bus_dmamap_t map; 1992 struct rl_desc *desc; 1993 uint32_t cmdstat; 1994 int error, nsegs; 1995 1996 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 1997 if (m == NULL) 1998 return (ENOBUFS); 1999 m->m_len = m->m_pkthdr.len = MJUM9BYTES; 2000#ifdef RE_FIXUP_RX 2001 m_adj(m, RE_ETHER_ALIGN); 2002#endif 2003 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag, 2004 sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); 2005 if (error != 0) { 2006 m_freem(m); 2007 return (ENOBUFS); 2008 } 2009 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); 2010 2011 rxd = &sc->rl_ldata.rl_jrx_desc[idx]; 2012 if (rxd->rx_m != NULL) { 2013 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, 2014 BUS_DMASYNC_POSTREAD); 2015 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap); 2016 } 2017 2018 rxd->rx_m = m; 2019 map = rxd->rx_dmamap; 2020 rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap; 2021 rxd->rx_size = segs[0].ds_len; 2022 sc->rl_ldata.rl_jrx_sparemap = map; 2023 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, 2024 BUS_DMASYNC_PREREAD); 2025 2026 desc = &sc->rl_ldata.rl_rx_list[idx]; 2027 desc->rl_vlanctl = 0; 2028 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); 2029 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); 2030 cmdstat = segs[0].ds_len; 2031 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 2032 cmdstat |= RL_RDESC_CMD_EOR; 2033 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 2034 2035 return (0); 2036} 2037 2038#ifdef RE_FIXUP_RX 2039static __inline void 2040re_fixup_rx(struct mbuf *m) 2041{ 2042 int i; 2043 uint16_t *src, *dst; 2044 2045 src = mtod(m, uint16_t *); 2046 dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src; 2047 2048 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 2049 *dst++ = *src++; 2050 2051 m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN; 2052} 2053#endif 2054 2055static int 2056re_tx_list_init(struct rl_softc *sc) 2057{ 2058 struct rl_desc *desc; 2059 int i; 2060 2061 RL_LOCK_ASSERT(sc); 2062 2063 bzero(sc->rl_ldata.rl_tx_list, 2064 sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)); 2065 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) 2066 sc->rl_ldata.rl_tx_desc[i].tx_m = NULL; 2067#ifdef DEV_NETMAP 2068 re_netmap_tx_init(sc); 2069#endif /* DEV_NETMAP */ 2070 /* Set EOR. */ 2071 desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1]; 2072 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR); 2073 2074 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2075 sc->rl_ldata.rl_tx_list_map, 2076 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2077 2078 sc->rl_ldata.rl_tx_prodidx = 0; 2079 sc->rl_ldata.rl_tx_considx = 0; 2080 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; 2081 2082 return (0); 2083} 2084 2085static int 2086re_rx_list_init(struct rl_softc *sc) 2087{ 2088 int error, i; 2089 2090 bzero(sc->rl_ldata.rl_rx_list, 2091 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); 2092 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 2093 sc->rl_ldata.rl_rx_desc[i].rx_m = NULL; 2094 if ((error = re_newbuf(sc, i)) != 0) 2095 return (error); 2096 } 2097#ifdef DEV_NETMAP 2098 re_netmap_rx_init(sc); 2099#endif /* DEV_NETMAP */ 2100 2101 /* Flush the RX descriptors */ 2102 2103 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2104 sc->rl_ldata.rl_rx_list_map, 2105 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2106 2107 sc->rl_ldata.rl_rx_prodidx = 0; 2108 sc->rl_head = sc->rl_tail = NULL; 2109 sc->rl_int_rx_act = 0; 2110 2111 return (0); 2112} 2113 2114static int 2115re_jrx_list_init(struct rl_softc *sc) 2116{ 2117 int error, i; 2118 2119 bzero(sc->rl_ldata.rl_rx_list, 2120 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); 2121 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 2122 sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL; 2123 if ((error = re_jumbo_newbuf(sc, i)) != 0) 2124 return (error); 2125 } 2126 2127 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2128 sc->rl_ldata.rl_rx_list_map, 2129 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2130 2131 sc->rl_ldata.rl_rx_prodidx = 0; 2132 sc->rl_head = sc->rl_tail = NULL; 2133 sc->rl_int_rx_act = 0; 2134 2135 return (0); 2136} 2137 2138/* 2139 * RX handler for C+ and 8169. For the gigE chips, we support 2140 * the reception of jumbo frames that have been fragmented 2141 * across multiple 2K mbuf cluster buffers. 2142 */ 2143static int 2144re_rxeof(struct rl_softc *sc, int *rx_npktsp) 2145{ 2146 struct mbuf *m; 2147 struct ifnet *ifp; 2148 int i, rxerr, total_len; 2149 struct rl_desc *cur_rx; 2150 u_int32_t rxstat, rxvlan; 2151 int jumbo, maxpkt = 16, rx_npkts = 0; 2152 2153 RL_LOCK_ASSERT(sc); 2154 2155 ifp = sc->rl_ifp; 2156#ifdef DEV_NETMAP 2157 if (netmap_rx_irq(ifp, 0, &rx_npkts)) 2158 return 0; 2159#endif /* DEV_NETMAP */ 2160 if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) 2161 jumbo = 1; 2162 else 2163 jumbo = 0; 2164 2165 /* Invalidate the descriptor memory */ 2166 2167 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2168 sc->rl_ldata.rl_rx_list_map, 2169 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2170 2171 for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0; 2172 i = RL_RX_DESC_NXT(sc, i)) { 2173 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2174 break; 2175 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 2176 rxstat = le32toh(cur_rx->rl_cmdstat); 2177 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 2178 break; 2179 total_len = rxstat & sc->rl_rxlenmask; 2180 rxvlan = le32toh(cur_rx->rl_vlanctl); 2181 if (jumbo != 0) 2182 m = sc->rl_ldata.rl_jrx_desc[i].rx_m; 2183 else 2184 m = sc->rl_ldata.rl_rx_desc[i].rx_m; 2185 2186 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 2187 (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) != 2188 (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) { 2189 /* 2190 * RTL8168C or later controllers do not 2191 * support multi-fragment packet. 2192 */ 2193 re_discard_rxbuf(sc, i); 2194 continue; 2195 } else if ((rxstat & RL_RDESC_STAT_EOF) == 0) { 2196 if (re_newbuf(sc, i) != 0) { 2197 /* 2198 * If this is part of a multi-fragment packet, 2199 * discard all the pieces. 2200 */ 2201 if (sc->rl_head != NULL) { 2202 m_freem(sc->rl_head); 2203 sc->rl_head = sc->rl_tail = NULL; 2204 } 2205 re_discard_rxbuf(sc, i); 2206 continue; 2207 } 2208 m->m_len = RE_RX_DESC_BUFLEN; 2209 if (sc->rl_head == NULL) 2210 sc->rl_head = sc->rl_tail = m; 2211 else { 2212 m->m_flags &= ~M_PKTHDR; 2213 sc->rl_tail->m_next = m; 2214 sc->rl_tail = m; 2215 } 2216 continue; 2217 } 2218 2219 /* 2220 * NOTE: for the 8139C+, the frame length field 2221 * is always 12 bits in size, but for the gigE chips, 2222 * it is 13 bits (since the max RX frame length is 16K). 2223 * Unfortunately, all 32 bits in the status word 2224 * were already used, so to make room for the extra 2225 * length bit, RealTek took out the 'frame alignment 2226 * error' bit and shifted the other status bits 2227 * over one slot. The OWN, EOR, FS and LS bits are 2228 * still in the same places. We have already extracted 2229 * the frame length and checked the OWN bit, so rather 2230 * than using an alternate bit mapping, we shift the 2231 * status bits one space to the right so we can evaluate 2232 * them using the 8169 status as though it was in the 2233 * same format as that of the 8139C+. 2234 */ 2235 if (sc->rl_type == RL_8169) 2236 rxstat >>= 1; 2237 2238 /* 2239 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 2240 * set, but if CRC is clear, it will still be a valid frame. 2241 */ 2242 if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) { 2243 rxerr = 1; 2244 if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 && 2245 total_len > 8191 && 2246 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT) 2247 rxerr = 0; 2248 if (rxerr != 0) { 2249 ifp->if_ierrors++; 2250 /* 2251 * If this is part of a multi-fragment packet, 2252 * discard all the pieces. 2253 */ 2254 if (sc->rl_head != NULL) { 2255 m_freem(sc->rl_head); 2256 sc->rl_head = sc->rl_tail = NULL; 2257 } 2258 re_discard_rxbuf(sc, i); 2259 continue; 2260 } 2261 } 2262 2263 /* 2264 * If allocating a replacement mbuf fails, 2265 * reload the current one. 2266 */ 2267 if (jumbo != 0) 2268 rxerr = re_jumbo_newbuf(sc, i); 2269 else 2270 rxerr = re_newbuf(sc, i); 2271 if (rxerr != 0) { 2272 ifp->if_iqdrops++; 2273 if (sc->rl_head != NULL) { 2274 m_freem(sc->rl_head); 2275 sc->rl_head = sc->rl_tail = NULL; 2276 } 2277 re_discard_rxbuf(sc, i); 2278 continue; 2279 } 2280 2281 if (sc->rl_head != NULL) { 2282 if (jumbo != 0) 2283 m->m_len = total_len; 2284 else { 2285 m->m_len = total_len % RE_RX_DESC_BUFLEN; 2286 if (m->m_len == 0) 2287 m->m_len = RE_RX_DESC_BUFLEN; 2288 } 2289 /* 2290 * Special case: if there's 4 bytes or less 2291 * in this buffer, the mbuf can be discarded: 2292 * the last 4 bytes is the CRC, which we don't 2293 * care about anyway. 2294 */ 2295 if (m->m_len <= ETHER_CRC_LEN) { 2296 sc->rl_tail->m_len -= 2297 (ETHER_CRC_LEN - m->m_len); 2298 m_freem(m); 2299 } else { 2300 m->m_len -= ETHER_CRC_LEN; 2301 m->m_flags &= ~M_PKTHDR; 2302 sc->rl_tail->m_next = m; 2303 } 2304 m = sc->rl_head; 2305 sc->rl_head = sc->rl_tail = NULL; 2306 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 2307 } else 2308 m->m_pkthdr.len = m->m_len = 2309 (total_len - ETHER_CRC_LEN); 2310 2311#ifdef RE_FIXUP_RX 2312 re_fixup_rx(m); 2313#endif 2314 ifp->if_ipackets++; 2315 m->m_pkthdr.rcvif = ifp; 2316 2317 /* Do RX checksumming if enabled */ 2318 2319 if (ifp->if_capenable & IFCAP_RXCSUM) { 2320 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { 2321 /* Check IP header checksum */ 2322 if (rxstat & RL_RDESC_STAT_PROTOID) 2323 m->m_pkthdr.csum_flags |= 2324 CSUM_IP_CHECKED; 2325 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) 2326 m->m_pkthdr.csum_flags |= 2327 CSUM_IP_VALID; 2328 2329 /* Check TCP/UDP checksum */ 2330 if ((RL_TCPPKT(rxstat) && 2331 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 2332 (RL_UDPPKT(rxstat) && 2333 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 2334 m->m_pkthdr.csum_flags |= 2335 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 2336 m->m_pkthdr.csum_data = 0xffff; 2337 } 2338 } else { 2339 /* 2340 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP 2341 */ 2342 if ((rxstat & RL_RDESC_STAT_PROTOID) && 2343 (rxvlan & RL_RDESC_IPV4)) 2344 m->m_pkthdr.csum_flags |= 2345 CSUM_IP_CHECKED; 2346 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) && 2347 (rxvlan & RL_RDESC_IPV4)) 2348 m->m_pkthdr.csum_flags |= 2349 CSUM_IP_VALID; 2350 if (((rxstat & RL_RDESC_STAT_TCP) && 2351 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 2352 ((rxstat & RL_RDESC_STAT_UDP) && 2353 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 2354 m->m_pkthdr.csum_flags |= 2355 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 2356 m->m_pkthdr.csum_data = 0xffff; 2357 } 2358 } 2359 } 2360 maxpkt--; 2361 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 2362 m->m_pkthdr.ether_vtag = 2363 bswap16((rxvlan & RL_RDESC_VLANCTL_DATA)); 2364 m->m_flags |= M_VLANTAG; 2365 } 2366 RL_UNLOCK(sc); 2367 (*ifp->if_input)(ifp, m); 2368 RL_LOCK(sc); 2369 rx_npkts++; 2370 } 2371 2372 /* Flush the RX DMA ring */ 2373 2374 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2375 sc->rl_ldata.rl_rx_list_map, 2376 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2377 2378 sc->rl_ldata.rl_rx_prodidx = i; 2379 2380 if (rx_npktsp != NULL) 2381 *rx_npktsp = rx_npkts; 2382 if (maxpkt) 2383 return (EAGAIN); 2384 2385 return (0); 2386} 2387 2388static void 2389re_txeof(struct rl_softc *sc) 2390{ 2391 struct ifnet *ifp; 2392 struct rl_txdesc *txd; 2393 u_int32_t txstat; 2394 int cons; 2395 2396 cons = sc->rl_ldata.rl_tx_considx; 2397 if (cons == sc->rl_ldata.rl_tx_prodidx) 2398 return; 2399 2400 ifp = sc->rl_ifp; 2401#ifdef DEV_NETMAP 2402 if (netmap_tx_irq(ifp, 0)) 2403 return; 2404#endif /* DEV_NETMAP */ 2405 /* Invalidate the TX descriptor list */ 2406 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2407 sc->rl_ldata.rl_tx_list_map, 2408 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2409 2410 for (; cons != sc->rl_ldata.rl_tx_prodidx; 2411 cons = RL_TX_DESC_NXT(sc, cons)) { 2412 txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat); 2413 if (txstat & RL_TDESC_STAT_OWN) 2414 break; 2415 /* 2416 * We only stash mbufs in the last descriptor 2417 * in a fragment chain, which also happens to 2418 * be the only place where the TX status bits 2419 * are valid. 2420 */ 2421 if (txstat & RL_TDESC_CMD_EOF) { 2422 txd = &sc->rl_ldata.rl_tx_desc[cons]; 2423 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 2424 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2425 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, 2426 txd->tx_dmamap); 2427 KASSERT(txd->tx_m != NULL, 2428 ("%s: freeing NULL mbufs!", __func__)); 2429 m_freem(txd->tx_m); 2430 txd->tx_m = NULL; 2431 if (txstat & (RL_TDESC_STAT_EXCESSCOL| 2432 RL_TDESC_STAT_COLCNT)) 2433 ifp->if_collisions++; 2434 if (txstat & RL_TDESC_STAT_TXERRSUM) 2435 ifp->if_oerrors++; 2436 else 2437 ifp->if_opackets++; 2438 } 2439 sc->rl_ldata.rl_tx_free++; 2440 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2441 } 2442 sc->rl_ldata.rl_tx_considx = cons; 2443 2444 /* No changes made to the TX ring, so no flush needed */ 2445 2446 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) { 2447#ifdef RE_TX_MODERATION 2448 /* 2449 * If not all descriptors have been reaped yet, reload 2450 * the timer so that we will eventually get another 2451 * interrupt that will cause us to re-enter this routine. 2452 * This is done in case the transmitter has gone idle. 2453 */ 2454 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2455#endif 2456 } else 2457 sc->rl_watchdog_timer = 0; 2458} 2459 2460static void 2461re_tick(void *xsc) 2462{ 2463 struct rl_softc *sc; 2464 struct mii_data *mii; 2465 2466 sc = xsc; 2467 2468 RL_LOCK_ASSERT(sc); 2469 2470 mii = device_get_softc(sc->rl_miibus); 2471 mii_tick(mii); 2472 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 2473 re_miibus_statchg(sc->rl_dev); 2474 /* 2475 * Reclaim transmitted frames here. Technically it is not 2476 * necessary to do here but it ensures periodic reclamation 2477 * regardless of Tx completion interrupt which seems to be 2478 * lost on PCIe based controllers under certain situations. 2479 */ 2480 re_txeof(sc); 2481 re_watchdog(sc); 2482 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 2483} 2484 2485#ifdef DEVICE_POLLING 2486static int 2487re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2488{ 2489 struct rl_softc *sc = ifp->if_softc; 2490 int rx_npkts = 0; 2491 2492 RL_LOCK(sc); 2493 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2494 rx_npkts = re_poll_locked(ifp, cmd, count); 2495 RL_UNLOCK(sc); 2496 return (rx_npkts); 2497} 2498 2499static int 2500re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 2501{ 2502 struct rl_softc *sc = ifp->if_softc; 2503 int rx_npkts; 2504 2505 RL_LOCK_ASSERT(sc); 2506 2507 sc->rxcycles = count; 2508 re_rxeof(sc, &rx_npkts); 2509 re_txeof(sc); 2510 2511 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2512 re_start_locked(ifp); 2513 2514 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 2515 u_int16_t status; 2516 2517 status = CSR_READ_2(sc, RL_ISR); 2518 if (status == 0xffff) 2519 return (rx_npkts); 2520 if (status) 2521 CSR_WRITE_2(sc, RL_ISR, status); 2522 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2523 (sc->rl_flags & RL_FLAG_PCIE)) 2524 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2525 2526 /* 2527 * XXX check behaviour on receiver stalls. 2528 */ 2529 2530 if (status & RL_ISR_SYSTEM_ERR) { 2531 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2532 re_init_locked(sc); 2533 } 2534 } 2535 return (rx_npkts); 2536} 2537#endif /* DEVICE_POLLING */ 2538 2539static int 2540re_intr(void *arg) 2541{ 2542 struct rl_softc *sc; 2543 uint16_t status; 2544 2545 sc = arg; 2546 2547 status = CSR_READ_2(sc, RL_ISR); 2548 if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0) 2549 return (FILTER_STRAY); 2550 CSR_WRITE_2(sc, RL_IMR, 0); 2551 2552 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2553 2554 return (FILTER_HANDLED); 2555} 2556 2557static void 2558re_int_task(void *arg, int npending) 2559{ 2560 struct rl_softc *sc; 2561 struct ifnet *ifp; 2562 u_int16_t status; 2563 int rval = 0; 2564 2565 sc = arg; 2566 ifp = sc->rl_ifp; 2567 2568 RL_LOCK(sc); 2569 2570 status = CSR_READ_2(sc, RL_ISR); 2571 CSR_WRITE_2(sc, RL_ISR, status); 2572 2573 if (sc->suspended || 2574 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2575 RL_UNLOCK(sc); 2576 return; 2577 } 2578 2579#ifdef DEVICE_POLLING 2580 if (ifp->if_capenable & IFCAP_POLLING) { 2581 RL_UNLOCK(sc); 2582 return; 2583 } 2584#endif 2585 2586 if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) 2587 rval = re_rxeof(sc, NULL); 2588 2589 /* 2590 * Some chips will ignore a second TX request issued 2591 * while an existing transmission is in progress. If 2592 * the transmitter goes idle but there are still 2593 * packets waiting to be sent, we need to restart the 2594 * channel here to flush them out. This only seems to 2595 * be required with the PCIe devices. 2596 */ 2597 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2598 (sc->rl_flags & RL_FLAG_PCIE)) 2599 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2600 if (status & ( 2601#ifdef RE_TX_MODERATION 2602 RL_ISR_TIMEOUT_EXPIRED| 2603#else 2604 RL_ISR_TX_OK| 2605#endif 2606 RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL)) 2607 re_txeof(sc); 2608 2609 if (status & RL_ISR_SYSTEM_ERR) { 2610 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2611 re_init_locked(sc); 2612 } 2613 2614 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2615 re_start_locked(ifp); 2616 2617 RL_UNLOCK(sc); 2618 2619 if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) { 2620 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2621 return; 2622 } 2623 2624 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2625} 2626 2627static void 2628re_intr_msi(void *xsc) 2629{ 2630 struct rl_softc *sc; 2631 struct ifnet *ifp; 2632 uint16_t intrs, status; 2633 2634 sc = xsc; 2635 RL_LOCK(sc); 2636 2637 ifp = sc->rl_ifp; 2638#ifdef DEVICE_POLLING 2639 if (ifp->if_capenable & IFCAP_POLLING) { 2640 RL_UNLOCK(sc); 2641 return; 2642 } 2643#endif 2644 /* Disable interrupts. */ 2645 CSR_WRITE_2(sc, RL_IMR, 0); 2646 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2647 RL_UNLOCK(sc); 2648 return; 2649 } 2650 2651 intrs = RL_INTRS_CPLUS; 2652 status = CSR_READ_2(sc, RL_ISR); 2653 CSR_WRITE_2(sc, RL_ISR, status); 2654 if (sc->rl_int_rx_act > 0) { 2655 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | 2656 RL_ISR_RX_OVERRUN); 2657 status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | 2658 RL_ISR_RX_OVERRUN); 2659 } 2660 2661 if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR | 2662 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) { 2663 re_rxeof(sc, NULL); 2664 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2665 if (sc->rl_int_rx_mod != 0 && 2666 (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR | 2667 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) { 2668 /* Rearm one-shot timer. */ 2669 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2670 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | 2671 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); 2672 sc->rl_int_rx_act = 1; 2673 } else { 2674 intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR | 2675 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN; 2676 sc->rl_int_rx_act = 0; 2677 } 2678 } 2679 } 2680 2681 /* 2682 * Some chips will ignore a second TX request issued 2683 * while an existing transmission is in progress. If 2684 * the transmitter goes idle but there are still 2685 * packets waiting to be sent, we need to restart the 2686 * channel here to flush them out. This only seems to 2687 * be required with the PCIe devices. 2688 */ 2689 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2690 (sc->rl_flags & RL_FLAG_PCIE)) 2691 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2692 if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL)) 2693 re_txeof(sc); 2694 2695 if (status & RL_ISR_SYSTEM_ERR) { 2696 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2697 re_init_locked(sc); 2698 } 2699 2700 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2701 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2702 re_start_locked(ifp); 2703 CSR_WRITE_2(sc, RL_IMR, intrs); 2704 } 2705 RL_UNLOCK(sc); 2706} 2707 2708static int 2709re_encap(struct rl_softc *sc, struct mbuf **m_head) 2710{ 2711 struct rl_txdesc *txd, *txd_last; 2712 bus_dma_segment_t segs[RL_NTXSEGS]; 2713 bus_dmamap_t map; 2714 struct mbuf *m_new; 2715 struct rl_desc *desc; 2716 int nsegs, prod; 2717 int i, error, ei, si; 2718 int padlen; 2719 uint32_t cmdstat, csum_flags, vlanctl; 2720 2721 RL_LOCK_ASSERT(sc); 2722 M_ASSERTPKTHDR((*m_head)); 2723 2724 /* 2725 * With some of the RealTek chips, using the checksum offload 2726 * support in conjunction with the autopadding feature results 2727 * in the transmission of corrupt frames. For example, if we 2728 * need to send a really small IP fragment that's less than 60 2729 * bytes in size, and IP header checksumming is enabled, the 2730 * resulting ethernet frame that appears on the wire will 2731 * have garbled payload. To work around this, if TX IP checksum 2732 * offload is enabled, we always manually pad short frames out 2733 * to the minimum ethernet frame size. 2734 */ 2735 if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && 2736 (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && 2737 ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) { 2738 padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len; 2739 if (M_WRITABLE(*m_head) == 0) { 2740 /* Get a writable copy. */ 2741 m_new = m_dup(*m_head, M_NOWAIT); 2742 m_freem(*m_head); 2743 if (m_new == NULL) { 2744 *m_head = NULL; 2745 return (ENOBUFS); 2746 } 2747 *m_head = m_new; 2748 } 2749 if ((*m_head)->m_next != NULL || 2750 M_TRAILINGSPACE(*m_head) < padlen) { 2751 m_new = m_defrag(*m_head, M_NOWAIT); 2752 if (m_new == NULL) { 2753 m_freem(*m_head); 2754 *m_head = NULL; 2755 return (ENOBUFS); 2756 } 2757 } else 2758 m_new = *m_head; 2759 2760 /* 2761 * Manually pad short frames, and zero the pad space 2762 * to avoid leaking data. 2763 */ 2764 bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen); 2765 m_new->m_pkthdr.len += padlen; 2766 m_new->m_len = m_new->m_pkthdr.len; 2767 *m_head = m_new; 2768 } 2769 2770 prod = sc->rl_ldata.rl_tx_prodidx; 2771 txd = &sc->rl_ldata.rl_tx_desc[prod]; 2772 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, 2773 *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2774 if (error == EFBIG) { 2775 m_new = m_collapse(*m_head, M_NOWAIT, RL_NTXSEGS); 2776 if (m_new == NULL) { 2777 m_freem(*m_head); 2778 *m_head = NULL; 2779 return (ENOBUFS); 2780 } 2781 *m_head = m_new; 2782 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, 2783 txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2784 if (error != 0) { 2785 m_freem(*m_head); 2786 *m_head = NULL; 2787 return (error); 2788 } 2789 } else if (error != 0) 2790 return (error); 2791 if (nsegs == 0) { 2792 m_freem(*m_head); 2793 *m_head = NULL; 2794 return (EIO); 2795 } 2796 2797 /* Check for number of available descriptors. */ 2798 if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { 2799 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); 2800 return (ENOBUFS); 2801 } 2802 2803 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, 2804 BUS_DMASYNC_PREWRITE); 2805 2806 /* 2807 * Set up checksum offload. Note: checksum offload bits must 2808 * appear in all descriptors of a multi-descriptor transmit 2809 * attempt. This is according to testing done with an 8169 2810 * chip. This is a requirement. 2811 */ 2812 vlanctl = 0; 2813 csum_flags = 0; 2814 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2815 if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) { 2816 csum_flags |= RL_TDESC_CMD_LGSEND; 2817 vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << 2818 RL_TDESC_CMD_MSSVALV2_SHIFT); 2819 } else { 2820 csum_flags |= RL_TDESC_CMD_LGSEND | 2821 ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << 2822 RL_TDESC_CMD_MSSVAL_SHIFT); 2823 } 2824 } else { 2825 /* 2826 * Unconditionally enable IP checksum if TCP or UDP 2827 * checksum is required. Otherwise, TCP/UDP checksum 2828 * doesn't make effects. 2829 */ 2830 if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) { 2831 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { 2832 csum_flags |= RL_TDESC_CMD_IPCSUM; 2833 if (((*m_head)->m_pkthdr.csum_flags & 2834 CSUM_TCP) != 0) 2835 csum_flags |= RL_TDESC_CMD_TCPCSUM; 2836 if (((*m_head)->m_pkthdr.csum_flags & 2837 CSUM_UDP) != 0) 2838 csum_flags |= RL_TDESC_CMD_UDPCSUM; 2839 } else { 2840 vlanctl |= RL_TDESC_CMD_IPCSUMV2; 2841 if (((*m_head)->m_pkthdr.csum_flags & 2842 CSUM_TCP) != 0) 2843 vlanctl |= RL_TDESC_CMD_TCPCSUMV2; 2844 if (((*m_head)->m_pkthdr.csum_flags & 2845 CSUM_UDP) != 0) 2846 vlanctl |= RL_TDESC_CMD_UDPCSUMV2; 2847 } 2848 } 2849 } 2850 2851 /* 2852 * Set up hardware VLAN tagging. Note: vlan tag info must 2853 * appear in all descriptors of a multi-descriptor 2854 * transmission attempt. 2855 */ 2856 if ((*m_head)->m_flags & M_VLANTAG) 2857 vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) | 2858 RL_TDESC_VLANCTL_TAG; 2859 2860 si = prod; 2861 for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) { 2862 desc = &sc->rl_ldata.rl_tx_list[prod]; 2863 desc->rl_vlanctl = htole32(vlanctl); 2864 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); 2865 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); 2866 cmdstat = segs[i].ds_len; 2867 if (i != 0) 2868 cmdstat |= RL_TDESC_CMD_OWN; 2869 if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1) 2870 cmdstat |= RL_TDESC_CMD_EOR; 2871 desc->rl_cmdstat = htole32(cmdstat | csum_flags); 2872 sc->rl_ldata.rl_tx_free--; 2873 } 2874 /* Update producer index. */ 2875 sc->rl_ldata.rl_tx_prodidx = prod; 2876 2877 /* Set EOF on the last descriptor. */ 2878 ei = RL_TX_DESC_PRV(sc, prod); 2879 desc = &sc->rl_ldata.rl_tx_list[ei]; 2880 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); 2881 2882 desc = &sc->rl_ldata.rl_tx_list[si]; 2883 /* Set SOF and transfer ownership of packet to the chip. */ 2884 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF); 2885 2886 /* 2887 * Insure that the map for this transmission 2888 * is placed at the array index of the last descriptor 2889 * in this chain. (Swap last and first dmamaps.) 2890 */ 2891 txd_last = &sc->rl_ldata.rl_tx_desc[ei]; 2892 map = txd->tx_dmamap; 2893 txd->tx_dmamap = txd_last->tx_dmamap; 2894 txd_last->tx_dmamap = map; 2895 txd_last->tx_m = *m_head; 2896 2897 return (0); 2898} 2899 2900static void 2901re_start(struct ifnet *ifp) 2902{ 2903 struct rl_softc *sc; 2904 2905 sc = ifp->if_softc; 2906 RL_LOCK(sc); 2907 re_start_locked(ifp); 2908 RL_UNLOCK(sc); 2909} 2910 2911/* 2912 * Main transmit routine for C+ and gigE NICs. 2913 */ 2914static void 2915re_start_locked(struct ifnet *ifp) 2916{ 2917 struct rl_softc *sc; 2918 struct mbuf *m_head; 2919 int queued; 2920 2921 sc = ifp->if_softc; 2922 2923#ifdef DEV_NETMAP 2924 /* XXX is this necessary ? */ 2925 if (ifp->if_capenable & IFCAP_NETMAP) { 2926 struct netmap_kring *kring = &NA(ifp)->tx_rings[0]; 2927 if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) { 2928 /* kick the tx unit */ 2929 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2930#ifdef RE_TX_MODERATION 2931 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2932#endif 2933 sc->rl_watchdog_timer = 5; 2934 } 2935 return; 2936 } 2937#endif /* DEV_NETMAP */ 2938 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2939 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) 2940 return; 2941 2942 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2943 sc->rl_ldata.rl_tx_free > 1;) { 2944 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2945 if (m_head == NULL) 2946 break; 2947 2948 if (re_encap(sc, &m_head) != 0) { 2949 if (m_head == NULL) 2950 break; 2951 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2952 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2953 break; 2954 } 2955 2956 /* 2957 * If there's a BPF listener, bounce a copy of this frame 2958 * to him. 2959 */ 2960 ETHER_BPF_MTAP(ifp, m_head); 2961 2962 queued++; 2963 } 2964 2965 if (queued == 0) { 2966#ifdef RE_TX_MODERATION 2967 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) 2968 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2969#endif 2970 return; 2971 } 2972 2973 /* Flush the TX descriptors */ 2974 2975 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2976 sc->rl_ldata.rl_tx_list_map, 2977 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2978 2979 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2980 2981#ifdef RE_TX_MODERATION 2982 /* 2983 * Use the countdown timer for interrupt moderation. 2984 * 'TX done' interrupts are disabled. Instead, we reset the 2985 * countdown timer, which will begin counting until it hits 2986 * the value in the TIMERINT register, and then trigger an 2987 * interrupt. Each time we write to the TIMERCNT register, 2988 * the timer count is reset to 0. 2989 */ 2990 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2991#endif 2992 2993 /* 2994 * Set a timeout in case the chip goes out to lunch. 2995 */ 2996 sc->rl_watchdog_timer = 5; 2997} 2998 2999static void 3000re_set_jumbo(struct rl_softc *sc, int jumbo) 3001{ 3002 3003 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) { 3004 pci_set_max_read_req(sc->rl_dev, 4096); 3005 return; 3006 } 3007 3008 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 3009 if (jumbo != 0) { 3010 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) | 3011 RL_CFG3_JUMBO_EN0); 3012 switch (sc->rl_hwrev->rl_rev) { 3013 case RL_HWREV_8168DP: 3014 break; 3015 case RL_HWREV_8168E: 3016 CSR_WRITE_1(sc, sc->rl_cfg4, 3017 CSR_READ_1(sc, sc->rl_cfg4) | 0x01); 3018 break; 3019 default: 3020 CSR_WRITE_1(sc, sc->rl_cfg4, 3021 CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1); 3022 } 3023 } else { 3024 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) & 3025 ~RL_CFG3_JUMBO_EN0); 3026 switch (sc->rl_hwrev->rl_rev) { 3027 case RL_HWREV_8168DP: 3028 break; 3029 case RL_HWREV_8168E: 3030 CSR_WRITE_1(sc, sc->rl_cfg4, 3031 CSR_READ_1(sc, sc->rl_cfg4) & ~0x01); 3032 break; 3033 default: 3034 CSR_WRITE_1(sc, sc->rl_cfg4, 3035 CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1); 3036 } 3037 } 3038 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3039 3040 switch (sc->rl_hwrev->rl_rev) { 3041 case RL_HWREV_8168DP: 3042 pci_set_max_read_req(sc->rl_dev, 4096); 3043 break; 3044 default: 3045 if (jumbo != 0) 3046 pci_set_max_read_req(sc->rl_dev, 512); 3047 else 3048 pci_set_max_read_req(sc->rl_dev, 4096); 3049 } 3050} 3051 3052static void 3053re_init(void *xsc) 3054{ 3055 struct rl_softc *sc = xsc; 3056 3057 RL_LOCK(sc); 3058 re_init_locked(sc); 3059 RL_UNLOCK(sc); 3060} 3061 3062static void 3063re_init_locked(struct rl_softc *sc) 3064{ 3065 struct ifnet *ifp = sc->rl_ifp; 3066 struct mii_data *mii; 3067 uint32_t reg; 3068 uint16_t cfg; 3069 union { 3070 uint32_t align_dummy; 3071 u_char eaddr[ETHER_ADDR_LEN]; 3072 } eaddr; 3073 3074 RL_LOCK_ASSERT(sc); 3075 3076 mii = device_get_softc(sc->rl_miibus); 3077 3078 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3079 return; 3080 3081 /* 3082 * Cancel pending I/O and free all RX/TX buffers. 3083 */ 3084 re_stop(sc); 3085 3086 /* Put controller into known state. */ 3087 re_reset(sc); 3088 3089 /* 3090 * For C+ mode, initialize the RX descriptors and mbufs. 3091 */ 3092 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 3093 if (ifp->if_mtu > RL_MTU) { 3094 if (re_jrx_list_init(sc) != 0) { 3095 device_printf(sc->rl_dev, 3096 "no memory for jumbo RX buffers\n"); 3097 re_stop(sc); 3098 return; 3099 } 3100 /* Disable checksum offloading for jumbo frames. */ 3101 ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4); 3102 ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO); 3103 } else { 3104 if (re_rx_list_init(sc) != 0) { 3105 device_printf(sc->rl_dev, 3106 "no memory for RX buffers\n"); 3107 re_stop(sc); 3108 return; 3109 } 3110 } 3111 re_set_jumbo(sc, ifp->if_mtu > RL_MTU); 3112 } else { 3113 if (re_rx_list_init(sc) != 0) { 3114 device_printf(sc->rl_dev, "no memory for RX buffers\n"); 3115 re_stop(sc); 3116 return; 3117 } 3118 if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && 3119 pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) { 3120 if (ifp->if_mtu > RL_MTU) 3121 pci_set_max_read_req(sc->rl_dev, 512); 3122 else 3123 pci_set_max_read_req(sc->rl_dev, 4096); 3124 } 3125 } 3126 re_tx_list_init(sc); 3127 3128 /* 3129 * Enable C+ RX and TX mode, as well as VLAN stripping and 3130 * RX checksum offload. We must configure the C+ register 3131 * before all others. 3132 */ 3133 cfg = RL_CPLUSCMD_PCI_MRW; 3134 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 3135 cfg |= RL_CPLUSCMD_RXCSUM_ENB; 3136 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3137 cfg |= RL_CPLUSCMD_VLANSTRIP; 3138 if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) { 3139 cfg |= RL_CPLUSCMD_MACSTAT_DIS; 3140 /* XXX magic. */ 3141 cfg |= 0x0001; 3142 } else 3143 cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB; 3144 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); 3145 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC || 3146 sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) { 3147 reg = 0x000fff00; 3148 if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0) 3149 reg |= 0x000000ff; 3150 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) 3151 reg |= 0x00f00000; 3152 CSR_WRITE_4(sc, 0x7c, reg); 3153 /* Disable interrupt mitigation. */ 3154 CSR_WRITE_2(sc, 0xe2, 0); 3155 } 3156 /* 3157 * Disable TSO if interface MTU size is greater than MSS 3158 * allowed in controller. 3159 */ 3160 if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { 3161 ifp->if_capenable &= ~IFCAP_TSO4; 3162 ifp->if_hwassist &= ~CSUM_TSO; 3163 } 3164 3165 /* 3166 * Init our MAC address. Even though the chipset 3167 * documentation doesn't mention it, we need to enter "Config 3168 * register write enable" mode to modify the ID registers. 3169 */ 3170 /* Copy MAC address on stack to align. */ 3171 bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN); 3172 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 3173 CSR_WRITE_4(sc, RL_IDR0, 3174 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 3175 CSR_WRITE_4(sc, RL_IDR4, 3176 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 3177 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3178 3179 /* 3180 * Load the addresses of the RX and TX lists into the chip. 3181 */ 3182 3183 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 3184 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); 3185 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 3186 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); 3187 3188 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 3189 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); 3190 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 3191 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); 3192 3193 if ((sc->rl_flags & RL_FLAG_RXDV_GATED) != 0) 3194 CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) & 3195 ~0x00080000); 3196 3197 /* 3198 * Set the initial TX configuration. 3199 */ 3200 if (sc->rl_testmode) { 3201 if (sc->rl_type == RL_8169) 3202 CSR_WRITE_4(sc, RL_TXCFG, 3203 RL_TXCFG_CONFIG|RL_LOOPTEST_ON); 3204 else 3205 CSR_WRITE_4(sc, RL_TXCFG, 3206 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); 3207 } else 3208 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 3209 3210 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 3211 3212 /* 3213 * Set the initial RX configuration. 3214 */ 3215 re_set_rxmode(sc); 3216 3217 /* Configure interrupt moderation. */ 3218 if (sc->rl_type == RL_8169) { 3219 /* Magic from vendor. */ 3220 CSR_WRITE_2(sc, RL_INTRMOD, 0x5100); 3221 } 3222 3223 /* 3224 * Enable transmit and receive. 3225 */ 3226 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB); 3227 3228#ifdef DEVICE_POLLING 3229 /* 3230 * Disable interrupts if we are polling. 3231 */ 3232 if (ifp->if_capenable & IFCAP_POLLING) 3233 CSR_WRITE_2(sc, RL_IMR, 0); 3234 else /* otherwise ... */ 3235#endif 3236 3237 /* 3238 * Enable interrupts. 3239 */ 3240 if (sc->rl_testmode) 3241 CSR_WRITE_2(sc, RL_IMR, 0); 3242 else 3243 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 3244 CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS); 3245 3246 /* Set initial TX threshold */ 3247 sc->rl_txthresh = RL_TX_THRESH_INIT; 3248 3249 /* Start RX/TX process. */ 3250 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 3251 3252 /* 3253 * Initialize the timer interrupt register so that 3254 * a timer interrupt will be generated once the timer 3255 * reaches a certain number of ticks. The timer is 3256 * reloaded on each transmit. 3257 */ 3258#ifdef RE_TX_MODERATION 3259 /* 3260 * Use timer interrupt register to moderate TX interrupt 3261 * moderation, which dramatically improves TX frame rate. 3262 */ 3263 if (sc->rl_type == RL_8169) 3264 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); 3265 else 3266 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); 3267#else 3268 /* 3269 * Use timer interrupt register to moderate RX interrupt 3270 * moderation. 3271 */ 3272 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && 3273 intr_filter == 0) { 3274 if (sc->rl_type == RL_8169) 3275 CSR_WRITE_4(sc, RL_TIMERINT_8169, 3276 RL_USECS(sc->rl_int_rx_mod)); 3277 } else { 3278 if (sc->rl_type == RL_8169) 3279 CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0)); 3280 } 3281#endif 3282 3283 /* 3284 * For 8169 gigE NICs, set the max allowed RX packet 3285 * size so we can receive jumbo frames. 3286 */ 3287 if (sc->rl_type == RL_8169) { 3288 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 3289 /* 3290 * For controllers that use new jumbo frame scheme, 3291 * set maximum size of jumbo frame depending on 3292 * controller revisions. 3293 */ 3294 if (ifp->if_mtu > RL_MTU) 3295 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 3296 sc->rl_hwrev->rl_max_mtu + 3297 ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN + 3298 ETHER_CRC_LEN); 3299 else 3300 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 3301 RE_RX_DESC_BUFLEN); 3302 } else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && 3303 sc->rl_hwrev->rl_max_mtu == RL_MTU) { 3304 /* RTL810x has no jumbo frame support. */ 3305 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); 3306 } else 3307 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 3308 } 3309 3310 if (sc->rl_testmode) 3311 return; 3312 3313 CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) | 3314 RL_CFG1_DRVLOAD); 3315 3316 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3317 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3318 3319 sc->rl_flags &= ~RL_FLAG_LINK; 3320 mii_mediachg(mii); 3321 3322 sc->rl_watchdog_timer = 0; 3323 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 3324} 3325 3326/* 3327 * Set media options. 3328 */ 3329static int 3330re_ifmedia_upd(struct ifnet *ifp) 3331{ 3332 struct rl_softc *sc; 3333 struct mii_data *mii; 3334 int error; 3335 3336 sc = ifp->if_softc; 3337 mii = device_get_softc(sc->rl_miibus); 3338 RL_LOCK(sc); 3339 error = mii_mediachg(mii); 3340 RL_UNLOCK(sc); 3341 3342 return (error); 3343} 3344 3345/* 3346 * Report current media status. 3347 */ 3348static void 3349re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3350{ 3351 struct rl_softc *sc; 3352 struct mii_data *mii; 3353 3354 sc = ifp->if_softc; 3355 mii = device_get_softc(sc->rl_miibus); 3356 3357 RL_LOCK(sc); 3358 mii_pollstat(mii); 3359 ifmr->ifm_active = mii->mii_media_active; 3360 ifmr->ifm_status = mii->mii_media_status; 3361 RL_UNLOCK(sc); 3362} 3363 3364static int 3365re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3366{ 3367 struct rl_softc *sc = ifp->if_softc; 3368 struct ifreq *ifr = (struct ifreq *) data; 3369 struct mii_data *mii; 3370 int error = 0; 3371 3372 switch (command) { 3373 case SIOCSIFMTU: 3374 if (ifr->ifr_mtu < ETHERMIN || 3375 ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu || 3376 ((sc->rl_flags & RL_FLAG_FASTETHER) != 0 && 3377 ifr->ifr_mtu > RL_MTU)) { 3378 error = EINVAL; 3379 break; 3380 } 3381 RL_LOCK(sc); 3382 if (ifp->if_mtu != ifr->ifr_mtu) { 3383 ifp->if_mtu = ifr->ifr_mtu; 3384 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 3385 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 3386 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3387 re_init_locked(sc); 3388 } 3389 if (ifp->if_mtu > RL_TSO_MTU && 3390 (ifp->if_capenable & IFCAP_TSO4) != 0) { 3391 ifp->if_capenable &= ~(IFCAP_TSO4 | 3392 IFCAP_VLAN_HWTSO); 3393 ifp->if_hwassist &= ~CSUM_TSO; 3394 } 3395 VLAN_CAPABILITIES(ifp); 3396 } 3397 RL_UNLOCK(sc); 3398 break; 3399 case SIOCSIFFLAGS: 3400 RL_LOCK(sc); 3401 if ((ifp->if_flags & IFF_UP) != 0) { 3402 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 3403 if (((ifp->if_flags ^ sc->rl_if_flags) 3404 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 3405 re_set_rxmode(sc); 3406 } else 3407 re_init_locked(sc); 3408 } else { 3409 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3410 re_stop(sc); 3411 } 3412 sc->rl_if_flags = ifp->if_flags; 3413 RL_UNLOCK(sc); 3414 break; 3415 case SIOCADDMULTI: 3416 case SIOCDELMULTI: 3417 RL_LOCK(sc); 3418 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3419 re_set_rxmode(sc); 3420 RL_UNLOCK(sc); 3421 break; 3422 case SIOCGIFMEDIA: 3423 case SIOCSIFMEDIA: 3424 mii = device_get_softc(sc->rl_miibus); 3425 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3426 break; 3427 case SIOCSIFCAP: 3428 { 3429 int mask, reinit; 3430 3431 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3432 reinit = 0; 3433#ifdef DEVICE_POLLING 3434 if (mask & IFCAP_POLLING) { 3435 if (ifr->ifr_reqcap & IFCAP_POLLING) { 3436 error = ether_poll_register(re_poll, ifp); 3437 if (error) 3438 return (error); 3439 RL_LOCK(sc); 3440 /* Disable interrupts */ 3441 CSR_WRITE_2(sc, RL_IMR, 0x0000); 3442 ifp->if_capenable |= IFCAP_POLLING; 3443 RL_UNLOCK(sc); 3444 } else { 3445 error = ether_poll_deregister(ifp); 3446 /* Enable interrupts. */ 3447 RL_LOCK(sc); 3448 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 3449 ifp->if_capenable &= ~IFCAP_POLLING; 3450 RL_UNLOCK(sc); 3451 } 3452 } 3453#endif /* DEVICE_POLLING */ 3454 RL_LOCK(sc); 3455 if ((mask & IFCAP_TXCSUM) != 0 && 3456 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 3457 ifp->if_capenable ^= IFCAP_TXCSUM; 3458 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 3459 ifp->if_hwassist |= RE_CSUM_FEATURES; 3460 else 3461 ifp->if_hwassist &= ~RE_CSUM_FEATURES; 3462 reinit = 1; 3463 } 3464 if ((mask & IFCAP_RXCSUM) != 0 && 3465 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 3466 ifp->if_capenable ^= IFCAP_RXCSUM; 3467 reinit = 1; 3468 } 3469 if ((mask & IFCAP_TSO4) != 0 && 3470 (ifp->if_capabilities & IFCAP_TSO4) != 0) { 3471 ifp->if_capenable ^= IFCAP_TSO4; 3472 if ((IFCAP_TSO4 & ifp->if_capenable) != 0) 3473 ifp->if_hwassist |= CSUM_TSO; 3474 else 3475 ifp->if_hwassist &= ~CSUM_TSO; 3476 if (ifp->if_mtu > RL_TSO_MTU && 3477 (ifp->if_capenable & IFCAP_TSO4) != 0) { 3478 ifp->if_capenable &= ~IFCAP_TSO4; 3479 ifp->if_hwassist &= ~CSUM_TSO; 3480 } 3481 } 3482 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 3483 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 3484 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 3485 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 3486 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 3487 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3488 /* TSO over VLAN requires VLAN hardware tagging. */ 3489 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 3490 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; 3491 reinit = 1; 3492 } 3493 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 3494 (mask & (IFCAP_HWCSUM | IFCAP_TSO4 | 3495 IFCAP_VLAN_HWTSO)) != 0) 3496 reinit = 1; 3497 if ((mask & IFCAP_WOL) != 0 && 3498 (ifp->if_capabilities & IFCAP_WOL) != 0) { 3499 if ((mask & IFCAP_WOL_UCAST) != 0) 3500 ifp->if_capenable ^= IFCAP_WOL_UCAST; 3501 if ((mask & IFCAP_WOL_MCAST) != 0) 3502 ifp->if_capenable ^= IFCAP_WOL_MCAST; 3503 if ((mask & IFCAP_WOL_MAGIC) != 0) 3504 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 3505 } 3506 if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) { 3507 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3508 re_init_locked(sc); 3509 } 3510 RL_UNLOCK(sc); 3511 VLAN_CAPABILITIES(ifp); 3512 } 3513 break; 3514 default: 3515 error = ether_ioctl(ifp, command, data); 3516 break; 3517 } 3518 3519 return (error); 3520} 3521 3522static void 3523re_watchdog(struct rl_softc *sc) 3524{ 3525 struct ifnet *ifp; 3526 3527 RL_LOCK_ASSERT(sc); 3528 3529 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0) 3530 return; 3531 3532 ifp = sc->rl_ifp; 3533 re_txeof(sc); 3534 if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) { 3535 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 3536 "-- recovering\n"); 3537 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3538 re_start_locked(ifp); 3539 return; 3540 } 3541 3542 if_printf(ifp, "watchdog timeout\n"); 3543 ifp->if_oerrors++; 3544 3545 re_rxeof(sc, NULL); 3546 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3547 re_init_locked(sc); 3548 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3549 re_start_locked(ifp); 3550} 3551 3552/* 3553 * Stop the adapter and free any mbufs allocated to the 3554 * RX and TX lists. 3555 */ 3556static void 3557re_stop(struct rl_softc *sc) 3558{ 3559 int i; 3560 struct ifnet *ifp; 3561 struct rl_txdesc *txd; 3562 struct rl_rxdesc *rxd; 3563 3564 RL_LOCK_ASSERT(sc); 3565 3566 ifp = sc->rl_ifp; 3567 3568 sc->rl_watchdog_timer = 0; 3569 callout_stop(&sc->rl_stat_callout); 3570 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 3571 3572 /* 3573 * Disable accepting frames to put RX MAC into idle state. 3574 * Otherwise it's possible to get frames while stop command 3575 * execution is in progress and controller can DMA the frame 3576 * to already freed RX buffer during that period. 3577 */ 3578 CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) & 3579 ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI | 3580 RL_RXCFG_RX_BROAD)); 3581 3582 if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) { 3583 for (i = RL_TIMEOUT; i > 0; i--) { 3584 if ((CSR_READ_1(sc, sc->rl_txstart) & 3585 RL_TXSTART_START) == 0) 3586 break; 3587 DELAY(20); 3588 } 3589 if (i == 0) 3590 device_printf(sc->rl_dev, 3591 "stopping TX poll timed out!\n"); 3592 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 3593 } else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) { 3594 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | 3595 RL_CMD_RX_ENB); 3596 if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) { 3597 for (i = RL_TIMEOUT; i > 0; i--) { 3598 if ((CSR_READ_4(sc, RL_TXCFG) & 3599 RL_TXCFG_QUEUE_EMPTY) != 0) 3600 break; 3601 DELAY(100); 3602 } 3603 if (i == 0) 3604 device_printf(sc->rl_dev, 3605 "stopping TXQ timed out!\n"); 3606 } 3607 } else 3608 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 3609 DELAY(1000); 3610 CSR_WRITE_2(sc, RL_IMR, 0x0000); 3611 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 3612 3613 if (sc->rl_head != NULL) { 3614 m_freem(sc->rl_head); 3615 sc->rl_head = sc->rl_tail = NULL; 3616 } 3617 3618 /* Free the TX list buffers. */ 3619 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 3620 txd = &sc->rl_ldata.rl_tx_desc[i]; 3621 if (txd->tx_m != NULL) { 3622 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 3623 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3624 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, 3625 txd->tx_dmamap); 3626 m_freem(txd->tx_m); 3627 txd->tx_m = NULL; 3628 } 3629 } 3630 3631 /* Free the RX list buffers. */ 3632 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 3633 rxd = &sc->rl_ldata.rl_rx_desc[i]; 3634 if (rxd->rx_m != NULL) { 3635 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, 3636 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3637 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, 3638 rxd->rx_dmamap); 3639 m_freem(rxd->rx_m); 3640 rxd->rx_m = NULL; 3641 } 3642 } 3643 3644 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 3645 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 3646 rxd = &sc->rl_ldata.rl_jrx_desc[i]; 3647 if (rxd->rx_m != NULL) { 3648 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, 3649 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3650 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, 3651 rxd->rx_dmamap); 3652 m_freem(rxd->rx_m); 3653 rxd->rx_m = NULL; 3654 } 3655 } 3656 } 3657} 3658 3659/* 3660 * Device suspend routine. Stop the interface and save some PCI 3661 * settings in case the BIOS doesn't restore them properly on 3662 * resume. 3663 */ 3664static int 3665re_suspend(device_t dev) 3666{ 3667 struct rl_softc *sc; 3668 3669 sc = device_get_softc(dev); 3670 3671 RL_LOCK(sc); 3672 re_stop(sc); 3673 re_setwol(sc); 3674 sc->suspended = 1; 3675 RL_UNLOCK(sc); 3676 3677 return (0); 3678} 3679 3680/* 3681 * Device resume routine. Restore some PCI settings in case the BIOS 3682 * doesn't, re-enable busmastering, and restart the interface if 3683 * appropriate. 3684 */ 3685static int 3686re_resume(device_t dev) 3687{ 3688 struct rl_softc *sc; 3689 struct ifnet *ifp; 3690 3691 sc = device_get_softc(dev); 3692 3693 RL_LOCK(sc); 3694 3695 ifp = sc->rl_ifp; 3696 /* Take controller out of sleep mode. */ 3697 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 3698 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 3699 CSR_WRITE_1(sc, RL_GPIO, 3700 CSR_READ_1(sc, RL_GPIO) | 0x01); 3701 } 3702 3703 /* 3704 * Clear WOL matching such that normal Rx filtering 3705 * wouldn't interfere with WOL patterns. 3706 */ 3707 re_clrwol(sc); 3708 3709 /* reinitialize interface if necessary */ 3710 if (ifp->if_flags & IFF_UP) 3711 re_init_locked(sc); 3712 3713 sc->suspended = 0; 3714 RL_UNLOCK(sc); 3715 3716 return (0); 3717} 3718 3719/* 3720 * Stop all chip I/O so that the kernel's probe routines don't 3721 * get confused by errant DMAs when rebooting. 3722 */ 3723static int 3724re_shutdown(device_t dev) 3725{ 3726 struct rl_softc *sc; 3727 3728 sc = device_get_softc(dev); 3729 3730 RL_LOCK(sc); 3731 re_stop(sc); 3732 /* 3733 * Mark interface as down since otherwise we will panic if 3734 * interrupt comes in later on, which can happen in some 3735 * cases. 3736 */ 3737 sc->rl_ifp->if_flags &= ~IFF_UP; 3738 re_setwol(sc); 3739 RL_UNLOCK(sc); 3740 3741 return (0); 3742} 3743 3744static void 3745re_set_linkspeed(struct rl_softc *sc) 3746{ 3747 struct mii_softc *miisc; 3748 struct mii_data *mii; 3749 int aneg, i, phyno; 3750 3751 RL_LOCK_ASSERT(sc); 3752 3753 mii = device_get_softc(sc->rl_miibus); 3754 mii_pollstat(mii); 3755 aneg = 0; 3756 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 3757 (IFM_ACTIVE | IFM_AVALID)) { 3758 switch IFM_SUBTYPE(mii->mii_media_active) { 3759 case IFM_10_T: 3760 case IFM_100_TX: 3761 return; 3762 case IFM_1000_T: 3763 aneg++; 3764 break; 3765 default: 3766 break; 3767 } 3768 } 3769 miisc = LIST_FIRST(&mii->mii_phys); 3770 phyno = miisc->mii_phy; 3771 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3772 PHY_RESET(miisc); 3773 re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0); 3774 re_miibus_writereg(sc->rl_dev, phyno, 3775 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 3776 re_miibus_writereg(sc->rl_dev, phyno, 3777 MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG); 3778 DELAY(1000); 3779 if (aneg != 0) { 3780 /* 3781 * Poll link state until re(4) get a 10/100Mbps link. 3782 */ 3783 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 3784 mii_pollstat(mii); 3785 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 3786 == (IFM_ACTIVE | IFM_AVALID)) { 3787 switch (IFM_SUBTYPE(mii->mii_media_active)) { 3788 case IFM_10_T: 3789 case IFM_100_TX: 3790 return; 3791 default: 3792 break; 3793 } 3794 } 3795 RL_UNLOCK(sc); 3796 pause("relnk", hz); 3797 RL_LOCK(sc); 3798 } 3799 if (i == MII_ANEGTICKS_GIGE) 3800 device_printf(sc->rl_dev, 3801 "establishing a link failed, WOL may not work!"); 3802 } 3803 /* 3804 * No link, force MAC to have 100Mbps, full-duplex link. 3805 * MAC does not require reprogramming on resolved speed/duplex, 3806 * so this is just for completeness. 3807 */ 3808 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 3809 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 3810} 3811 3812static void 3813re_setwol(struct rl_softc *sc) 3814{ 3815 struct ifnet *ifp; 3816 int pmc; 3817 uint16_t pmstat; 3818 uint8_t v; 3819 3820 RL_LOCK_ASSERT(sc); 3821 3822 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 3823 return; 3824 3825 ifp = sc->rl_ifp; 3826 /* Put controller into sleep mode. */ 3827 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 3828 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 3829 CSR_WRITE_1(sc, RL_GPIO, 3830 CSR_READ_1(sc, RL_GPIO) & ~0x01); 3831 } 3832 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 3833 re_set_rxmode(sc); 3834 if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0) 3835 re_set_linkspeed(sc); 3836 if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0) 3837 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB); 3838 } 3839 /* Enable config register write. */ 3840 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 3841 3842 /* Enable PME. */ 3843 v = CSR_READ_1(sc, sc->rl_cfg1); 3844 v &= ~RL_CFG1_PME; 3845 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3846 v |= RL_CFG1_PME; 3847 CSR_WRITE_1(sc, sc->rl_cfg1, v); 3848 3849 v = CSR_READ_1(sc, sc->rl_cfg3); 3850 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 3851 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 3852 v |= RL_CFG3_WOL_MAGIC; 3853 CSR_WRITE_1(sc, sc->rl_cfg3, v); 3854 3855 v = CSR_READ_1(sc, sc->rl_cfg5); 3856 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST | 3857 RL_CFG5_WOL_LANWAKE); 3858 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 3859 v |= RL_CFG5_WOL_UCAST; 3860 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 3861 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST; 3862 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3863 v |= RL_CFG5_WOL_LANWAKE; 3864 CSR_WRITE_1(sc, sc->rl_cfg5, v); 3865 3866 /* Config register write done. */ 3867 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3868 3869 if ((ifp->if_capenable & IFCAP_WOL) == 0 && 3870 (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) 3871 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80); 3872 /* 3873 * It seems that hardware resets its link speed to 100Mbps in 3874 * power down mode so switching to 100Mbps in driver is not 3875 * needed. 3876 */ 3877 3878 /* Request PME if WOL is requested. */ 3879 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); 3880 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 3881 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3882 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3883 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 3884} 3885 3886static void 3887re_clrwol(struct rl_softc *sc) 3888{ 3889 int pmc; 3890 uint8_t v; 3891 3892 RL_LOCK_ASSERT(sc); 3893 3894 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 3895 return; 3896 3897 /* Enable config register write. */ 3898 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 3899 3900 v = CSR_READ_1(sc, sc->rl_cfg3); 3901 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 3902 CSR_WRITE_1(sc, sc->rl_cfg3, v); 3903 3904 /* Config register write done. */ 3905 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3906 3907 v = CSR_READ_1(sc, sc->rl_cfg5); 3908 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); 3909 v &= ~RL_CFG5_WOL_LANWAKE; 3910 CSR_WRITE_1(sc, sc->rl_cfg5, v); 3911} 3912 3913static void 3914re_add_sysctls(struct rl_softc *sc) 3915{ 3916 struct sysctl_ctx_list *ctx; 3917 struct sysctl_oid_list *children; 3918 int error; 3919 3920 ctx = device_get_sysctl_ctx(sc->rl_dev); 3921 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev)); 3922 3923 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats", 3924 CTLTYPE_INT | CTLFLAG_RW, sc, 0, re_sysctl_stats, "I", 3925 "Statistics Information"); 3926 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) 3927 return; 3928 3929 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod", 3930 CTLTYPE_INT | CTLFLAG_RW, &sc->rl_int_rx_mod, 0, 3931 sysctl_hw_re_int_mod, "I", "re RX interrupt moderation"); 3932 /* Pull in device tunables. */ 3933 sc->rl_int_rx_mod = RL_TIMER_DEFAULT; 3934 error = resource_int_value(device_get_name(sc->rl_dev), 3935 device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod); 3936 if (error == 0) { 3937 if (sc->rl_int_rx_mod < RL_TIMER_MIN || 3938 sc->rl_int_rx_mod > RL_TIMER_MAX) { 3939 device_printf(sc->rl_dev, "int_rx_mod value out of " 3940 "range; using default: %d\n", 3941 RL_TIMER_DEFAULT); 3942 sc->rl_int_rx_mod = RL_TIMER_DEFAULT; 3943 } 3944 } 3945 3946} 3947 3948static int 3949re_sysctl_stats(SYSCTL_HANDLER_ARGS) 3950{ 3951 struct rl_softc *sc; 3952 struct rl_stats *stats; 3953 int error, i, result; 3954 3955 result = -1; 3956 error = sysctl_handle_int(oidp, &result, 0, req); 3957 if (error || req->newptr == NULL) 3958 return (error); 3959 3960 if (result == 1) { 3961 sc = (struct rl_softc *)arg1; 3962 RL_LOCK(sc); 3963 if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3964 RL_UNLOCK(sc); 3965 goto done; 3966 } 3967 bus_dmamap_sync(sc->rl_ldata.rl_stag, 3968 sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD); 3969 CSR_WRITE_4(sc, RL_DUMPSTATS_HI, 3970 RL_ADDR_HI(sc->rl_ldata.rl_stats_addr)); 3971 CSR_WRITE_4(sc, RL_DUMPSTATS_LO, 3972 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr)); 3973 CSR_WRITE_4(sc, RL_DUMPSTATS_LO, 3974 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr | 3975 RL_DUMPSTATS_START)); 3976 for (i = RL_TIMEOUT; i > 0; i--) { 3977 if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) & 3978 RL_DUMPSTATS_START) == 0) 3979 break; 3980 DELAY(1000); 3981 } 3982 bus_dmamap_sync(sc->rl_ldata.rl_stag, 3983 sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD); 3984 RL_UNLOCK(sc); 3985 if (i == 0) { 3986 device_printf(sc->rl_dev, 3987 "DUMP statistics request timed out\n"); 3988 return (ETIMEDOUT); 3989 } 3990done: 3991 stats = sc->rl_ldata.rl_stats; 3992 printf("%s statistics:\n", device_get_nameunit(sc->rl_dev)); 3993 printf("Tx frames : %ju\n", 3994 (uintmax_t)le64toh(stats->rl_tx_pkts)); 3995 printf("Rx frames : %ju\n", 3996 (uintmax_t)le64toh(stats->rl_rx_pkts)); 3997 printf("Tx errors : %ju\n", 3998 (uintmax_t)le64toh(stats->rl_tx_errs)); 3999 printf("Rx errors : %u\n", 4000 le32toh(stats->rl_rx_errs)); 4001 printf("Rx missed frames : %u\n", 4002 (uint32_t)le16toh(stats->rl_missed_pkts)); 4003 printf("Rx frame alignment errs : %u\n", 4004 (uint32_t)le16toh(stats->rl_rx_framealign_errs)); 4005 printf("Tx single collisions : %u\n", 4006 le32toh(stats->rl_tx_onecoll)); 4007 printf("Tx multiple collisions : %u\n", 4008 le32toh(stats->rl_tx_multicolls)); 4009 printf("Rx unicast frames : %ju\n", 4010 (uintmax_t)le64toh(stats->rl_rx_ucasts)); 4011 printf("Rx broadcast frames : %ju\n", 4012 (uintmax_t)le64toh(stats->rl_rx_bcasts)); 4013 printf("Rx multicast frames : %u\n", 4014 le32toh(stats->rl_rx_mcasts)); 4015 printf("Tx aborts : %u\n", 4016 (uint32_t)le16toh(stats->rl_tx_aborts)); 4017 printf("Tx underruns : %u\n", 4018 (uint32_t)le16toh(stats->rl_rx_underruns)); 4019 } 4020 4021 return (error); 4022} 4023 4024static int 4025sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 4026{ 4027 int error, value; 4028 4029 if (arg1 == NULL) 4030 return (EINVAL); 4031 value = *(int *)arg1; 4032 error = sysctl_handle_int(oidp, &value, 0, req); 4033 if (error || req->newptr == NULL) 4034 return (error); 4035 if (value < low || value > high) 4036 return (EINVAL); 4037 *(int *)arg1 = value; 4038 4039 return (0); 4040} 4041 4042static int 4043sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS) 4044{ 4045 4046 return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN, 4047 RL_TIMER_MAX)); 4048} 4049