1/*- 2 * Copyright (c) 2009, Oleksandr Tymoshenko 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD$"); 30 31/* 32 * AR71XX gigabit ethernet driver 33 */ 34#ifdef HAVE_KERNEL_OPTION_HEADERS 35#include "opt_device_polling.h" 36#endif 37 38#include "opt_arge.h" 39 40#include <sys/param.h> 41#include <sys/endian.h> 42#include <sys/systm.h> 43#include <sys/sockio.h> 44#include <sys/mbuf.h> 45#include <sys/malloc.h> 46#include <sys/kernel.h> 47#include <sys/module.h> 48#include <sys/socket.h> 49#include <sys/taskqueue.h> 50#include <sys/sysctl.h> 51 52#include <net/if.h> 53#include <net/if_arp.h> 54#include <net/ethernet.h> 55#include <net/if_dl.h> 56#include <net/if_media.h> 57#include <net/if_types.h> 58 59#include <net/bpf.h> 60 61#include <machine/bus.h> 62#include <machine/cache.h> 63#include <machine/resource.h> 64#include <vm/vm_param.h> 65#include <vm/vm.h> 66#include <vm/pmap.h> 67#include <machine/pmap.h> 68#include <sys/bus.h> 69#include <sys/rman.h> 70 71#include <dev/mii/mii.h> 72#include <dev/mii/miivar.h> 73 74#include <dev/pci/pcireg.h> 75#include <dev/pci/pcivar.h> 76 77#include "opt_arge.h" 78 79#if defined(ARGE_MDIO) 80#include <dev/etherswitch/mdio.h> 81#include <dev/etherswitch/miiproxy.h> 82#include "mdio_if.h" 83#endif 84 85 86MODULE_DEPEND(arge, ether, 1, 1, 1); 87MODULE_DEPEND(arge, miibus, 1, 1, 1); 88MODULE_VERSION(arge, 1); 89 90#include "miibus_if.h" 91 92#include <mips/atheros/ar71xxreg.h> 93#include <mips/atheros/if_argevar.h> 94#include <mips/atheros/ar71xx_setup.h> 95#include <mips/atheros/ar71xx_cpudef.h> 96 97typedef enum { 98 ARGE_DBG_MII = 0x00000001, 99 ARGE_DBG_INTR = 0x00000002, 100 ARGE_DBG_TX = 0x00000004, 101 ARGE_DBG_RX = 0x00000008, 102 ARGE_DBG_ERR = 0x00000010, 103 ARGE_DBG_RESET = 0x00000020, 104 ARGE_DBG_PLL = 0x00000040, 105} arge_debug_flags; 106 107static const char * arge_miicfg_str[] = { 108 "NONE", 109 "GMII", 110 "MII", 111 "RGMII", 112 "RMII" 113}; 114 115#ifdef ARGE_DEBUG 116#define ARGEDEBUG(_sc, _m, ...) \ 117 do { \ 118 if ((_m) & (_sc)->arge_debug) \ 119 device_printf((_sc)->arge_dev, __VA_ARGS__); \ 120 } while (0) 121#else 122#define ARGEDEBUG(_sc, _m, ...) 123#endif 124 125static int arge_attach(device_t); 126static int arge_detach(device_t); 127static void arge_flush_ddr(struct arge_softc *); 128static int arge_ifmedia_upd(struct ifnet *); 129static void arge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 130static int arge_ioctl(struct ifnet *, u_long, caddr_t); 131static void arge_init(void *); 132static void arge_init_locked(struct arge_softc *); 133static void arge_link_task(void *, int); 134static void arge_update_link_locked(struct arge_softc *sc); 135static void arge_set_pll(struct arge_softc *, int, int); 136static int arge_miibus_readreg(device_t, int, int); 137static void arge_miibus_statchg(device_t); 138static int arge_miibus_writereg(device_t, int, int, int); 139static int arge_probe(device_t); 140static void arge_reset_dma(struct arge_softc *); 141static int arge_resume(device_t); 142static int arge_rx_ring_init(struct arge_softc *); 143static void arge_rx_ring_free(struct arge_softc *sc); 144static int arge_tx_ring_init(struct arge_softc *); 145static void arge_tx_ring_free(struct arge_softc *); 146#ifdef DEVICE_POLLING 147static int arge_poll(struct ifnet *, enum poll_cmd, int); 148#endif 149static int arge_shutdown(device_t); 150static void arge_start(struct ifnet *); 151static void arge_start_locked(struct ifnet *); 152static void arge_stop(struct arge_softc *); 153static int arge_suspend(device_t); 154 155static int arge_rx_locked(struct arge_softc *); 156static void arge_tx_locked(struct arge_softc *); 157static void arge_intr(void *); 158static int arge_intr_filter(void *); 159static void arge_tick(void *); 160 161static void arge_hinted_child(device_t bus, const char *dname, int dunit); 162 163/* 164 * ifmedia callbacks for multiPHY MAC 165 */ 166void arge_multiphy_mediastatus(struct ifnet *, struct ifmediareq *); 167int arge_multiphy_mediachange(struct ifnet *); 168 169static void arge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 170static int arge_dma_alloc(struct arge_softc *); 171static void arge_dma_free(struct arge_softc *); 172static int arge_newbuf(struct arge_softc *, int); 173static __inline void arge_fixup_rx(struct mbuf *); 174 175static device_method_t arge_methods[] = { 176 /* Device interface */ 177 DEVMETHOD(device_probe, arge_probe), 178 DEVMETHOD(device_attach, arge_attach), 179 DEVMETHOD(device_detach, arge_detach), 180 DEVMETHOD(device_suspend, arge_suspend), 181 DEVMETHOD(device_resume, arge_resume), 182 DEVMETHOD(device_shutdown, arge_shutdown), 183 184 /* MII interface */ 185 DEVMETHOD(miibus_readreg, arge_miibus_readreg), 186 DEVMETHOD(miibus_writereg, arge_miibus_writereg), 187 DEVMETHOD(miibus_statchg, arge_miibus_statchg), 188 189 /* bus interface */ 190 DEVMETHOD(bus_add_child, device_add_child_ordered), 191 DEVMETHOD(bus_hinted_child, arge_hinted_child), 192 193 DEVMETHOD_END 194}; 195 196static driver_t arge_driver = { 197 "arge", 198 arge_methods, 199 sizeof(struct arge_softc) 200}; 201 202static devclass_t arge_devclass; 203 204DRIVER_MODULE(arge, nexus, arge_driver, arge_devclass, 0, 0); 205DRIVER_MODULE(miibus, arge, miibus_driver, miibus_devclass, 0, 0); 206 207#if defined(ARGE_MDIO) 208static int argemdio_probe(device_t); 209static int argemdio_attach(device_t); 210static int argemdio_detach(device_t); 211 212/* 213 * Declare an additional, separate driver for accessing the MDIO bus. 214 */ 215static device_method_t argemdio_methods[] = { 216 /* Device interface */ 217 DEVMETHOD(device_probe, argemdio_probe), 218 DEVMETHOD(device_attach, argemdio_attach), 219 DEVMETHOD(device_detach, argemdio_detach), 220 221 /* bus interface */ 222 DEVMETHOD(bus_add_child, device_add_child_ordered), 223 224 /* MDIO access */ 225 DEVMETHOD(mdio_readreg, arge_miibus_readreg), 226 DEVMETHOD(mdio_writereg, arge_miibus_writereg), 227}; 228 229DEFINE_CLASS_0(argemdio, argemdio_driver, argemdio_methods, 230 sizeof(struct arge_softc)); 231static devclass_t argemdio_devclass; 232 233DRIVER_MODULE(miiproxy, arge, miiproxy_driver, miiproxy_devclass, 0, 0); 234DRIVER_MODULE(argemdio, nexus, argemdio_driver, argemdio_devclass, 0, 0); 235DRIVER_MODULE(mdio, argemdio, mdio_driver, mdio_devclass, 0, 0); 236#endif 237 238/* 239 * RedBoot passes MAC address to entry point as environment 240 * variable. platfrom_start parses it and stores in this variable 241 */ 242extern uint32_t ar711_base_mac[ETHER_ADDR_LEN]; 243 244static struct mtx miibus_mtx; 245 246MTX_SYSINIT(miibus_mtx, &miibus_mtx, "arge mii lock", MTX_DEF); 247 248/* 249 * Flushes all 250 */ 251static void 252arge_flush_ddr(struct arge_softc *sc) 253{ 254 255 ar71xx_device_flush_ddr_ge(sc->arge_mac_unit); 256} 257 258static int 259arge_probe(device_t dev) 260{ 261 262 device_set_desc(dev, "Atheros AR71xx built-in ethernet interface"); 263 return (0); 264} 265 266static void 267arge_attach_sysctl(device_t dev) 268{ 269 struct arge_softc *sc = device_get_softc(dev); 270 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 271 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 272 273#ifdef ARGE_DEBUG 274 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 275 "debug", CTLFLAG_RW, &sc->arge_debug, 0, 276 "arge interface debugging flags"); 277#endif 278 279 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 280 "tx_pkts_aligned", CTLFLAG_RW, &sc->stats.tx_pkts_aligned, 0, 281 "number of TX aligned packets"); 282 283 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 284 "tx_pkts_unaligned", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned, 285 0, "number of TX unaligned packets"); 286 287#ifdef ARGE_DEBUG 288 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_prod", 289 CTLFLAG_RW, &sc->arge_cdata.arge_tx_prod, 0, ""); 290 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cons", 291 CTLFLAG_RW, &sc->arge_cdata.arge_tx_cons, 0, ""); 292 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cnt", 293 CTLFLAG_RW, &sc->arge_cdata.arge_tx_cnt, 0, ""); 294#endif 295} 296 297static void 298arge_reset_mac(struct arge_softc *sc) 299{ 300 uint32_t reg; 301 302 /* Step 1. Soft-reset MAC */ 303 ARGE_SET_BITS(sc, AR71XX_MAC_CFG1, MAC_CFG1_SOFT_RESET); 304 DELAY(20); 305 306 /* Step 2. Punt the MAC core from the central reset register */ 307 ar71xx_device_stop(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : 308 RST_RESET_GE1_MAC); 309 DELAY(100); 310 ar71xx_device_start(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : 311 RST_RESET_GE1_MAC); 312 313 /* Step 3. Reconfigure MAC block */ 314 ARGE_WRITE(sc, AR71XX_MAC_CFG1, 315 MAC_CFG1_SYNC_RX | MAC_CFG1_RX_ENABLE | 316 MAC_CFG1_SYNC_TX | MAC_CFG1_TX_ENABLE); 317 318 reg = ARGE_READ(sc, AR71XX_MAC_CFG2); 319 reg |= MAC_CFG2_ENABLE_PADCRC | MAC_CFG2_LENGTH_FIELD ; 320 ARGE_WRITE(sc, AR71XX_MAC_CFG2, reg); 321 322 ARGE_WRITE(sc, AR71XX_MAC_MAX_FRAME_LEN, 1536); 323} 324 325static void 326arge_reset_miibus(struct arge_softc *sc) 327{ 328 329 /* Reset MII bus */ 330 ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_RESET); 331 DELAY(100); 332 ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_CLOCK_DIV_28); 333 DELAY(100); 334} 335 336static void 337arge_fetch_pll_config(struct arge_softc *sc) 338{ 339 long int val; 340 341 if (resource_long_value(device_get_name(sc->arge_dev), 342 device_get_unit(sc->arge_dev), 343 "pll_10", &val) == 0) { 344 sc->arge_pllcfg.pll_10 = val; 345 device_printf(sc->arge_dev, "%s: pll_10 = 0x%x\n", 346 __func__, (int) val); 347 } 348 if (resource_long_value(device_get_name(sc->arge_dev), 349 device_get_unit(sc->arge_dev), 350 "pll_100", &val) == 0) { 351 sc->arge_pllcfg.pll_100 = val; 352 device_printf(sc->arge_dev, "%s: pll_100 = 0x%x\n", 353 __func__, (int) val); 354 } 355 if (resource_long_value(device_get_name(sc->arge_dev), 356 device_get_unit(sc->arge_dev), 357 "pll_1000", &val) == 0) { 358 sc->arge_pllcfg.pll_1000 = val; 359 device_printf(sc->arge_dev, "%s: pll_1000 = 0x%x\n", 360 __func__, (int) val); 361 } 362} 363 364static int 365arge_attach(device_t dev) 366{ 367 struct ifnet *ifp; 368 struct arge_softc *sc; 369 int error = 0, rid; 370 uint32_t rnd; 371 int is_base_mac_empty, i; 372 uint32_t hint; 373 long eeprom_mac_addr = 0; 374 int miicfg = 0; 375 int readascii = 0; 376 377 sc = device_get_softc(dev); 378 sc->arge_dev = dev; 379 sc->arge_mac_unit = device_get_unit(dev); 380 381 /* 382 * Some units (eg the TP-Link WR-1043ND) do not have a convenient 383 * EEPROM location to read the ethernet MAC address from. 384 * OpenWRT simply snaffles it from a fixed location. 385 * 386 * Since multiple units seem to use this feature, include 387 * a method of setting the MAC address based on an flash location 388 * in CPU address space. 389 * 390 * Some vendors have decided to store the mac address as a literal 391 * string of 18 characters in xx:xx:xx:xx:xx:xx format instead of 392 * an array of numbers. Expose a hint to turn on this conversion 393 * feature via strtol() 394 */ 395 if (resource_long_value(device_get_name(dev), device_get_unit(dev), 396 "eeprommac", &eeprom_mac_addr) == 0) { 397 int i; 398 const char *mac = 399 (const char *) MIPS_PHYS_TO_KSEG1(eeprom_mac_addr); 400 device_printf(dev, "Overriding MAC from EEPROM\n"); 401 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 402 "readascii", &readascii) == 0) { 403 device_printf(dev, "Vendor stores MAC in ASCII format\n"); 404 for (i = 0; i < 6; i++) { 405 ar711_base_mac[i] = strtol(&(mac[i*3]), NULL, 16); 406 } 407 } else { 408 for (i = 0; i < 6; i++) { 409 ar711_base_mac[i] = mac[i]; 410 } 411 } 412 } 413 414 KASSERT(((sc->arge_mac_unit == 0) || (sc->arge_mac_unit == 1)), 415 ("if_arge: Only MAC0 and MAC1 supported")); 416 417 /* 418 * Fetch the PLL configuration. 419 */ 420 arge_fetch_pll_config(sc); 421 422 /* 423 * Get the MII configuration, if applicable. 424 */ 425 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 426 "miimode", &miicfg) == 0) { 427 /* XXX bounds check? */ 428 device_printf(dev, "%s: overriding MII mode to '%s'\n", 429 __func__, arge_miicfg_str[miicfg]); 430 sc->arge_miicfg = miicfg; 431 } 432 433 /* 434 * Get which PHY of 5 available we should use for this unit 435 */ 436 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 437 "phymask", &sc->arge_phymask) != 0) { 438 /* 439 * Use port 4 (WAN) for GE0. For any other port use 440 * its PHY the same as its unit number 441 */ 442 if (sc->arge_mac_unit == 0) 443 sc->arge_phymask = (1 << 4); 444 else 445 /* Use all phys up to 4 */ 446 sc->arge_phymask = (1 << 4) - 1; 447 448 device_printf(dev, "No PHY specified, using mask %d\n", sc->arge_phymask); 449 } 450 451 /* 452 * Get default media & duplex mode, by default its Base100T 453 * and full duplex 454 */ 455 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 456 "media", &hint) != 0) 457 hint = 0; 458 459 if (hint == 1000) 460 sc->arge_media_type = IFM_1000_T; 461 else 462 sc->arge_media_type = IFM_100_TX; 463 464 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 465 "fduplex", &hint) != 0) 466 hint = 1; 467 468 if (hint) 469 sc->arge_duplex_mode = IFM_FDX; 470 else 471 sc->arge_duplex_mode = 0; 472 473 mtx_init(&sc->arge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 474 MTX_DEF); 475 callout_init_mtx(&sc->arge_stat_callout, &sc->arge_mtx, 0); 476 TASK_INIT(&sc->arge_link_task, 0, arge_link_task, sc); 477 478 /* Map control/status registers. */ 479 sc->arge_rid = 0; 480 sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 481 &sc->arge_rid, RF_ACTIVE | RF_SHAREABLE); 482 483 if (sc->arge_res == NULL) { 484 device_printf(dev, "couldn't map memory\n"); 485 error = ENXIO; 486 goto fail; 487 } 488 489 /* Allocate interrupts */ 490 rid = 0; 491 sc->arge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 492 RF_SHAREABLE | RF_ACTIVE); 493 494 if (sc->arge_irq == NULL) { 495 device_printf(dev, "couldn't map interrupt\n"); 496 error = ENXIO; 497 goto fail; 498 } 499 500 /* Allocate ifnet structure. */ 501 ifp = sc->arge_ifp = if_alloc(IFT_ETHER); 502 503 if (ifp == NULL) { 504 device_printf(dev, "couldn't allocate ifnet structure\n"); 505 error = ENOSPC; 506 goto fail; 507 } 508 509 ifp->if_softc = sc; 510 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 511 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 512 ifp->if_ioctl = arge_ioctl; 513 ifp->if_start = arge_start; 514 ifp->if_init = arge_init; 515 sc->arge_if_flags = ifp->if_flags; 516 517 /* XXX: add real size */ 518 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 519 ifp->if_snd.ifq_maxlen = ifqmaxlen; 520 IFQ_SET_READY(&ifp->if_snd); 521 522 ifp->if_capenable = ifp->if_capabilities; 523#ifdef DEVICE_POLLING 524 ifp->if_capabilities |= IFCAP_POLLING; 525#endif 526 527 is_base_mac_empty = 1; 528 for (i = 0; i < ETHER_ADDR_LEN; i++) { 529 sc->arge_eaddr[i] = ar711_base_mac[i] & 0xff; 530 if (sc->arge_eaddr[i] != 0) 531 is_base_mac_empty = 0; 532 } 533 534 if (is_base_mac_empty) { 535 /* 536 * No MAC address configured. Generate the random one. 537 */ 538 if (bootverbose) 539 device_printf(dev, 540 "Generating random ethernet address.\n"); 541 542 rnd = arc4random(); 543 sc->arge_eaddr[0] = 'b'; 544 sc->arge_eaddr[1] = 's'; 545 sc->arge_eaddr[2] = 'd'; 546 sc->arge_eaddr[3] = (rnd >> 24) & 0xff; 547 sc->arge_eaddr[4] = (rnd >> 16) & 0xff; 548 sc->arge_eaddr[5] = (rnd >> 8) & 0xff; 549 } 550 if (sc->arge_mac_unit != 0) 551 sc->arge_eaddr[5] += sc->arge_mac_unit; 552 553 if (arge_dma_alloc(sc) != 0) { 554 error = ENXIO; 555 goto fail; 556 } 557 558 /* 559 * Don't do this for the MDIO bus case - it's already done 560 * as part of the MDIO bus attachment. 561 */ 562#if !defined(ARGE_MDIO) 563 /* Initialize the MAC block */ 564 arge_reset_mac(sc); 565 arge_reset_miibus(sc); 566#endif 567 568 /* Configure MII mode, just for convienence */ 569 if (sc->arge_miicfg != 0) 570 ar71xx_device_set_mii_if(sc->arge_mac_unit, sc->arge_miicfg); 571 572 /* 573 * Set all Ethernet address registers to the same initial values 574 * set all four addresses to 66-88-aa-cc-dd-ee 575 */ 576 ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR1, (sc->arge_eaddr[2] << 24) 577 | (sc->arge_eaddr[3] << 16) | (sc->arge_eaddr[4] << 8) 578 | sc->arge_eaddr[5]); 579 ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR2, (sc->arge_eaddr[0] << 8) 580 | sc->arge_eaddr[1]); 581 582 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG0, 583 FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT); 584 585 switch (ar71xx_soc) { 586 case AR71XX_SOC_AR7240: 587 case AR71XX_SOC_AR7241: 588 case AR71XX_SOC_AR7242: 589 case AR71XX_SOC_AR9330: 590 case AR71XX_SOC_AR9331: 591 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0010ffff); 592 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x015500aa); 593 break; 594 default: 595 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0fff0000); 596 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x00001fff); 597 } 598 599 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMATCH, 600 FIFO_RX_FILTMATCH_DEFAULT); 601 602 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK, 603 FIFO_RX_FILTMASK_DEFAULT); 604 605#if defined(ARGE_MDIO) 606 sc->arge_miiproxy = mii_attach_proxy(sc->arge_dev); 607#endif 608 609 device_printf(sc->arge_dev, "finishing attachment, phymask %04x" 610 ", proxy %s \n", sc->arge_phymask, sc->arge_miiproxy == NULL ? 611 "null" : "set"); 612 for (i = 0; i < ARGE_NPHY; i++) { 613 if (((1 << i) & sc->arge_phymask) != 0) { 614 error = mii_attach(sc->arge_miiproxy != NULL ? 615 sc->arge_miiproxy : sc->arge_dev, 616 &sc->arge_miibus, sc->arge_ifp, 617 arge_ifmedia_upd, arge_ifmedia_sts, 618 BMSR_DEFCAPMASK, i, MII_OFFSET_ANY, 0); 619 if (error != 0) { 620 device_printf(sc->arge_dev, "unable to attach" 621 " PHY %d: %d\n", i, error); 622 goto fail; 623 } 624 } 625 } 626 if (sc->arge_miibus == NULL) { 627 /* no PHY, so use hard-coded values */ 628 ifmedia_init(&sc->arge_ifmedia, 0, 629 arge_multiphy_mediachange, 630 arge_multiphy_mediastatus); 631 ifmedia_add(&sc->arge_ifmedia, 632 IFM_ETHER | sc->arge_media_type | sc->arge_duplex_mode, 633 0, NULL); 634 ifmedia_set(&sc->arge_ifmedia, 635 IFM_ETHER | sc->arge_media_type | sc->arge_duplex_mode); 636 arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode); 637 } 638 639 /* Call MI attach routine. */ 640 ether_ifattach(sc->arge_ifp, sc->arge_eaddr); 641 642 /* Hook interrupt last to avoid having to lock softc */ 643 error = bus_setup_intr(sc->arge_dev, sc->arge_irq, INTR_TYPE_NET | INTR_MPSAFE, 644 arge_intr_filter, arge_intr, sc, &sc->arge_intrhand); 645 646 if (error) { 647 device_printf(sc->arge_dev, "couldn't set up irq\n"); 648 ether_ifdetach(sc->arge_ifp); 649 goto fail; 650 } 651 652 /* setup sysctl variables */ 653 arge_attach_sysctl(sc->arge_dev); 654 655fail: 656 if (error) 657 arge_detach(dev); 658 659 return (error); 660} 661 662static int 663arge_detach(device_t dev) 664{ 665 struct arge_softc *sc = device_get_softc(dev); 666 struct ifnet *ifp = sc->arge_ifp; 667 668 KASSERT(mtx_initialized(&sc->arge_mtx), 669 ("arge mutex not initialized")); 670 671 /* These should only be active if attach succeeded */ 672 if (device_is_attached(dev)) { 673 ARGE_LOCK(sc); 674 sc->arge_detach = 1; 675#ifdef DEVICE_POLLING 676 if (ifp->if_capenable & IFCAP_POLLING) 677 ether_poll_deregister(ifp); 678#endif 679 680 arge_stop(sc); 681 ARGE_UNLOCK(sc); 682 taskqueue_drain(taskqueue_swi, &sc->arge_link_task); 683 ether_ifdetach(ifp); 684 } 685 686 if (sc->arge_miibus) 687 device_delete_child(dev, sc->arge_miibus); 688 689 if (sc->arge_miiproxy) 690 device_delete_child(dev, sc->arge_miiproxy); 691 692 bus_generic_detach(dev); 693 694 if (sc->arge_intrhand) 695 bus_teardown_intr(dev, sc->arge_irq, sc->arge_intrhand); 696 697 if (sc->arge_res) 698 bus_release_resource(dev, SYS_RES_MEMORY, sc->arge_rid, 699 sc->arge_res); 700 701 if (ifp) 702 if_free(ifp); 703 704 arge_dma_free(sc); 705 706 mtx_destroy(&sc->arge_mtx); 707 708 return (0); 709 710} 711 712static int 713arge_suspend(device_t dev) 714{ 715 716 panic("%s", __func__); 717 return 0; 718} 719 720static int 721arge_resume(device_t dev) 722{ 723 724 panic("%s", __func__); 725 return 0; 726} 727 728static int 729arge_shutdown(device_t dev) 730{ 731 struct arge_softc *sc; 732 733 sc = device_get_softc(dev); 734 735 ARGE_LOCK(sc); 736 arge_stop(sc); 737 ARGE_UNLOCK(sc); 738 739 return (0); 740} 741 742static void 743arge_hinted_child(device_t bus, const char *dname, int dunit) 744{ 745 BUS_ADD_CHILD(bus, 0, dname, dunit); 746 device_printf(bus, "hinted child %s%d\n", dname, dunit); 747} 748 749static int 750arge_miibus_readreg(device_t dev, int phy, int reg) 751{ 752 struct arge_softc * sc = device_get_softc(dev); 753 int i, result; 754 uint32_t addr = (phy << MAC_MII_PHY_ADDR_SHIFT) 755 | (reg & MAC_MII_REG_MASK); 756 757 mtx_lock(&miibus_mtx); 758 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE); 759 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_ADDR, addr); 760 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_READ); 761 762 i = ARGE_MII_TIMEOUT; 763 while ((ARGE_MDIO_READ(sc, AR71XX_MAC_MII_INDICATOR) & 764 MAC_MII_INDICATOR_BUSY) && (i--)) 765 DELAY(5); 766 767 if (i < 0) { 768 mtx_unlock(&miibus_mtx); 769 ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__); 770 /* XXX: return ERRNO istead? */ 771 return (-1); 772 } 773 774 result = ARGE_MDIO_READ(sc, AR71XX_MAC_MII_STATUS) & MAC_MII_STATUS_MASK; 775 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE); 776 mtx_unlock(&miibus_mtx); 777 778 ARGEDEBUG(sc, ARGE_DBG_MII, 779 "%s: phy=%d, reg=%02x, value[%08x]=%04x\n", 780 __func__, phy, reg, addr, result); 781 782 return (result); 783} 784 785static int 786arge_miibus_writereg(device_t dev, int phy, int reg, int data) 787{ 788 struct arge_softc * sc = device_get_softc(dev); 789 int i; 790 uint32_t addr = 791 (phy << MAC_MII_PHY_ADDR_SHIFT) | (reg & MAC_MII_REG_MASK); 792 793 ARGEDEBUG(sc, ARGE_DBG_MII, "%s: phy=%d, reg=%02x, value=%04x\n", __func__, 794 phy, reg, data); 795 796 mtx_lock(&miibus_mtx); 797 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_ADDR, addr); 798 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CONTROL, data); 799 800 i = ARGE_MII_TIMEOUT; 801 while ((ARGE_MDIO_READ(sc, AR71XX_MAC_MII_INDICATOR) & 802 MAC_MII_INDICATOR_BUSY) && (i--)) 803 DELAY(5); 804 805 mtx_unlock(&miibus_mtx); 806 807 if (i < 0) { 808 ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__); 809 /* XXX: return ERRNO istead? */ 810 return (-1); 811 } 812 813 return (0); 814} 815 816static void 817arge_miibus_statchg(device_t dev) 818{ 819 struct arge_softc *sc; 820 821 sc = device_get_softc(dev); 822 taskqueue_enqueue(taskqueue_swi, &sc->arge_link_task); 823} 824 825static void 826arge_link_task(void *arg, int pending) 827{ 828 struct arge_softc *sc; 829 sc = (struct arge_softc *)arg; 830 831 ARGE_LOCK(sc); 832 arge_update_link_locked(sc); 833 ARGE_UNLOCK(sc); 834} 835 836static void 837arge_update_link_locked(struct arge_softc *sc) 838{ 839 struct mii_data *mii; 840 struct ifnet *ifp; 841 uint32_t media, duplex; 842 843 mii = device_get_softc(sc->arge_miibus); 844 ifp = sc->arge_ifp; 845 if (mii == NULL || ifp == NULL || 846 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 847 return; 848 } 849 850 if (mii->mii_media_status & IFM_ACTIVE) { 851 852 media = IFM_SUBTYPE(mii->mii_media_active); 853 if (media != IFM_NONE) { 854 sc->arge_link_status = 1; 855 duplex = mii->mii_media_active & IFM_GMASK; 856 ARGEDEBUG(sc, ARGE_DBG_MII, "%s: media=%d, duplex=%d\n", 857 __func__, 858 media, 859 duplex); 860 arge_set_pll(sc, media, duplex); 861 } 862 } else { 863 sc->arge_link_status = 0; 864 } 865} 866 867static void 868arge_set_pll(struct arge_softc *sc, int media, int duplex) 869{ 870 uint32_t cfg, ifcontrol, rx_filtmask; 871 uint32_t fifo_tx, pll; 872 int if_speed; 873 874 ARGEDEBUG(sc, ARGE_DBG_PLL, "set_pll(%04x, %s)\n", media, 875 duplex == IFM_FDX ? "full" : "half"); 876 cfg = ARGE_READ(sc, AR71XX_MAC_CFG2); 877 cfg &= ~(MAC_CFG2_IFACE_MODE_1000 878 | MAC_CFG2_IFACE_MODE_10_100 879 | MAC_CFG2_FULL_DUPLEX); 880 881 if (duplex == IFM_FDX) 882 cfg |= MAC_CFG2_FULL_DUPLEX; 883 884 ifcontrol = ARGE_READ(sc, AR71XX_MAC_IFCONTROL); 885 ifcontrol &= ~MAC_IFCONTROL_SPEED; 886 rx_filtmask = 887 ARGE_READ(sc, AR71XX_MAC_FIFO_RX_FILTMASK); 888 rx_filtmask &= ~FIFO_RX_MASK_BYTE_MODE; 889 890 switch(media) { 891 case IFM_10_T: 892 cfg |= MAC_CFG2_IFACE_MODE_10_100; 893 if_speed = 10; 894 break; 895 case IFM_100_TX: 896 cfg |= MAC_CFG2_IFACE_MODE_10_100; 897 ifcontrol |= MAC_IFCONTROL_SPEED; 898 if_speed = 100; 899 break; 900 case IFM_1000_T: 901 case IFM_1000_SX: 902 cfg |= MAC_CFG2_IFACE_MODE_1000; 903 rx_filtmask |= FIFO_RX_MASK_BYTE_MODE; 904 if_speed = 1000; 905 break; 906 default: 907 if_speed = 100; 908 device_printf(sc->arge_dev, 909 "Unknown media %d\n", media); 910 } 911 912 ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: if_speed=%d\n", __func__, if_speed); 913 914 switch (ar71xx_soc) { 915 case AR71XX_SOC_AR7240: 916 case AR71XX_SOC_AR7241: 917 case AR71XX_SOC_AR7242: 918 case AR71XX_SOC_AR9330: 919 case AR71XX_SOC_AR9331: 920 fifo_tx = 0x01f00140; 921 break; 922 case AR71XX_SOC_AR9130: 923 case AR71XX_SOC_AR9132: 924 fifo_tx = 0x00780fff; 925 break; 926 default: 927 fifo_tx = 0x008001ff; 928 } 929 930 ARGE_WRITE(sc, AR71XX_MAC_CFG2, cfg); 931 ARGE_WRITE(sc, AR71XX_MAC_IFCONTROL, ifcontrol); 932 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK, 933 rx_filtmask); 934 ARGE_WRITE(sc, AR71XX_MAC_FIFO_TX_THRESHOLD, fifo_tx); 935 936 /* fetch PLL registers */ 937 pll = ar71xx_device_get_eth_pll(sc->arge_mac_unit, if_speed); 938 ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: pll=0x%x\n", __func__, pll); 939 940 /* Override if required by platform data */ 941 if (if_speed == 10 && sc->arge_pllcfg.pll_10 != 0) 942 pll = sc->arge_pllcfg.pll_10; 943 else if (if_speed == 100 && sc->arge_pllcfg.pll_100 != 0) 944 pll = sc->arge_pllcfg.pll_100; 945 else if (if_speed == 1000 && sc->arge_pllcfg.pll_1000 != 0) 946 pll = sc->arge_pllcfg.pll_1000; 947 ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: final pll=0x%x\n", __func__, pll); 948 949 /* XXX ensure pll != 0 */ 950 ar71xx_device_set_pll_ge(sc->arge_mac_unit, if_speed, pll); 951 952 /* set MII registers */ 953 /* 954 * This was introduced to match what the Linux ag71xx ethernet 955 * driver does. For the AR71xx case, it does set the port 956 * MII speed. However, if this is done, non-gigabit speeds 957 * are not at all reliable when speaking via RGMII through 958 * 'bridge' PHY port that's pretending to be a local PHY. 959 * 960 * Until that gets root caused, and until an AR71xx + normal 961 * PHY board is tested, leave this disabled. 962 */ 963#if 0 964 ar71xx_device_set_mii_speed(sc->arge_mac_unit, if_speed); 965#endif 966} 967 968 969static void 970arge_reset_dma(struct arge_softc *sc) 971{ 972 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, 0); 973 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 0); 974 975 ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, 0); 976 ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, 0); 977 978 /* Clear all possible RX interrupts */ 979 while(ARGE_READ(sc, AR71XX_DMA_RX_STATUS) & DMA_RX_STATUS_PKT_RECVD) 980 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD); 981 982 /* 983 * Clear all possible TX interrupts 984 */ 985 while(ARGE_READ(sc, AR71XX_DMA_TX_STATUS) & DMA_TX_STATUS_PKT_SENT) 986 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT); 987 988 /* 989 * Now Rx/Tx errors 990 */ 991 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, 992 DMA_RX_STATUS_BUS_ERROR | DMA_RX_STATUS_OVERFLOW); 993 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, 994 DMA_TX_STATUS_BUS_ERROR | DMA_TX_STATUS_UNDERRUN); 995 996 /* 997 * Force a DDR flush so any pending data is properly 998 * flushed to RAM before underlying buffers are freed. 999 */ 1000 arge_flush_ddr(sc); 1001} 1002 1003 1004 1005static void 1006arge_init(void *xsc) 1007{ 1008 struct arge_softc *sc = xsc; 1009 1010 ARGE_LOCK(sc); 1011 arge_init_locked(sc); 1012 ARGE_UNLOCK(sc); 1013} 1014 1015static void 1016arge_init_locked(struct arge_softc *sc) 1017{ 1018 struct ifnet *ifp = sc->arge_ifp; 1019 struct mii_data *mii; 1020 1021 ARGE_LOCK_ASSERT(sc); 1022 1023 if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 1024 return; 1025 1026 /* Init circular RX list. */ 1027 if (arge_rx_ring_init(sc) != 0) { 1028 device_printf(sc->arge_dev, 1029 "initialization failed: no memory for rx buffers\n"); 1030 arge_stop(sc); 1031 return; 1032 } 1033 1034 /* Init tx descriptors. */ 1035 arge_tx_ring_init(sc); 1036 1037 arge_reset_dma(sc); 1038 1039 if (sc->arge_miibus) { 1040 mii = device_get_softc(sc->arge_miibus); 1041 mii_mediachg(mii); 1042 } 1043 else { 1044 /* 1045 * Sun always shines over multiPHY interface 1046 */ 1047 sc->arge_link_status = 1; 1048 } 1049 1050 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1051 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1052 1053 if (sc->arge_miibus) { 1054 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc); 1055 arge_update_link_locked(sc); 1056 } 1057 1058 ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, ARGE_TX_RING_ADDR(sc, 0)); 1059 ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, ARGE_RX_RING_ADDR(sc, 0)); 1060 1061 /* Start listening */ 1062 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN); 1063 1064 /* Enable interrupts */ 1065 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL); 1066} 1067 1068/* 1069 * Return whether the mbuf chain is correctly aligned 1070 * for the arge TX engine. 1071 * 1072 * The TX engine requires each fragment to be aligned to a 1073 * 4 byte boundary and the size of each fragment except 1074 * the last to be a multiple of 4 bytes. 1075 */ 1076static int 1077arge_mbuf_chain_is_tx_aligned(struct mbuf *m0) 1078{ 1079 struct mbuf *m; 1080 1081 for (m = m0; m != NULL; m = m->m_next) { 1082 if((mtod(m, intptr_t) & 3) != 0) 1083 return 0; 1084 if ((m->m_next != NULL) && ((m->m_len & 0x03) != 0)) 1085 return 0; 1086 } 1087 return 1; 1088} 1089 1090/* 1091 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1092 * pointers to the fragment pointers. 1093 */ 1094static int 1095arge_encap(struct arge_softc *sc, struct mbuf **m_head) 1096{ 1097 struct arge_txdesc *txd; 1098 struct arge_desc *desc, *prev_desc; 1099 bus_dma_segment_t txsegs[ARGE_MAXFRAGS]; 1100 int error, i, nsegs, prod, prev_prod; 1101 struct mbuf *m; 1102 1103 ARGE_LOCK_ASSERT(sc); 1104 1105 /* 1106 * Fix mbuf chain, all fragments should be 4 bytes aligned and 1107 * even 4 bytes 1108 */ 1109 m = *m_head; 1110 if (! arge_mbuf_chain_is_tx_aligned(m)) { 1111 sc->stats.tx_pkts_unaligned++; 1112 m = m_defrag(*m_head, M_NOWAIT); 1113 if (m == NULL) { 1114 *m_head = NULL; 1115 return (ENOBUFS); 1116 } 1117 *m_head = m; 1118 } else 1119 sc->stats.tx_pkts_aligned++; 1120 1121 prod = sc->arge_cdata.arge_tx_prod; 1122 txd = &sc->arge_cdata.arge_txdesc[prod]; 1123 error = bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_tx_tag, 1124 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1125 1126 if (error == EFBIG) { 1127 panic("EFBIG"); 1128 } else if (error != 0) 1129 return (error); 1130 1131 if (nsegs == 0) { 1132 m_freem(*m_head); 1133 *m_head = NULL; 1134 return (EIO); 1135 } 1136 1137 /* Check number of available descriptors. */ 1138 if (sc->arge_cdata.arge_tx_cnt + nsegs >= (ARGE_TX_RING_COUNT - 1)) { 1139 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap); 1140 return (ENOBUFS); 1141 } 1142 1143 txd->tx_m = *m_head; 1144 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap, 1145 BUS_DMASYNC_PREWRITE); 1146 1147 /* 1148 * Make a list of descriptors for this packet. DMA controller will 1149 * walk through it while arge_link is not zero. 1150 */ 1151 prev_prod = prod; 1152 desc = prev_desc = NULL; 1153 for (i = 0; i < nsegs; i++) { 1154 desc = &sc->arge_rdata.arge_tx_ring[prod]; 1155 desc->packet_ctrl = ARGE_DMASIZE(txsegs[i].ds_len); 1156 1157 if (txsegs[i].ds_addr & 3) 1158 panic("TX packet address unaligned\n"); 1159 1160 desc->packet_addr = txsegs[i].ds_addr; 1161 1162 /* link with previous descriptor */ 1163 if (prev_desc) 1164 prev_desc->packet_ctrl |= ARGE_DESC_MORE; 1165 1166 sc->arge_cdata.arge_tx_cnt++; 1167 prev_desc = desc; 1168 ARGE_INC(prod, ARGE_TX_RING_COUNT); 1169 } 1170 1171 /* Update producer index. */ 1172 sc->arge_cdata.arge_tx_prod = prod; 1173 1174 /* Sync descriptors. */ 1175 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 1176 sc->arge_cdata.arge_tx_ring_map, 1177 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1178 1179 /* Start transmitting */ 1180 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: setting DMA_TX_CONTROL_EN\n", 1181 __func__); 1182 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, DMA_TX_CONTROL_EN); 1183 return (0); 1184} 1185 1186static void 1187arge_start(struct ifnet *ifp) 1188{ 1189 struct arge_softc *sc; 1190 1191 sc = ifp->if_softc; 1192 1193 ARGE_LOCK(sc); 1194 arge_start_locked(ifp); 1195 ARGE_UNLOCK(sc); 1196} 1197 1198static void 1199arge_start_locked(struct ifnet *ifp) 1200{ 1201 struct arge_softc *sc; 1202 struct mbuf *m_head; 1203 int enq = 0; 1204 1205 sc = ifp->if_softc; 1206 1207 ARGE_LOCK_ASSERT(sc); 1208 1209 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: beginning\n", __func__); 1210 1211 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1212 IFF_DRV_RUNNING || sc->arge_link_status == 0 ) 1213 return; 1214 1215 /* 1216 * Before we go any further, check whether we're already full. 1217 * The below check errors out immediately if the ring is full 1218 * and never gets a chance to set this flag. Although it's 1219 * likely never needed, this at least avoids an unexpected 1220 * situation. 1221 */ 1222 if (sc->arge_cdata.arge_tx_cnt >= ARGE_TX_RING_COUNT - 2) { 1223 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1224 ARGEDEBUG(sc, ARGE_DBG_ERR, 1225 "%s: tx_cnt %d >= max %d; setting IFF_DRV_OACTIVE\n", 1226 __func__, sc->arge_cdata.arge_tx_cnt, 1227 ARGE_TX_RING_COUNT - 2); 1228 return; 1229 } 1230 1231 arge_flush_ddr(sc); 1232 1233 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1234 sc->arge_cdata.arge_tx_cnt < ARGE_TX_RING_COUNT - 2; ) { 1235 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1236 if (m_head == NULL) 1237 break; 1238 1239 1240 /* 1241 * Pack the data into the transmit ring. 1242 */ 1243 if (arge_encap(sc, &m_head)) { 1244 if (m_head == NULL) 1245 break; 1246 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1247 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1248 break; 1249 } 1250 1251 enq++; 1252 /* 1253 * If there's a BPF listener, bounce a copy of this frame 1254 * to him. 1255 */ 1256 ETHER_BPF_MTAP(ifp, m_head); 1257 } 1258 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: finished; queued %d packets\n", 1259 __func__, enq); 1260} 1261 1262static void 1263arge_stop(struct arge_softc *sc) 1264{ 1265 struct ifnet *ifp; 1266 1267 ARGE_LOCK_ASSERT(sc); 1268 1269 ifp = sc->arge_ifp; 1270 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1271 if (sc->arge_miibus) 1272 callout_stop(&sc->arge_stat_callout); 1273 1274 /* mask out interrupts */ 1275 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0); 1276 1277 arge_reset_dma(sc); 1278 1279 /* Flush FIFO and free any existing mbufs */ 1280 arge_flush_ddr(sc); 1281 arge_rx_ring_free(sc); 1282 arge_tx_ring_free(sc); 1283} 1284 1285 1286static int 1287arge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1288{ 1289 struct arge_softc *sc = ifp->if_softc; 1290 struct ifreq *ifr = (struct ifreq *) data; 1291 struct mii_data *mii; 1292 int error; 1293#ifdef DEVICE_POLLING 1294 int mask; 1295#endif 1296 1297 switch (command) { 1298 case SIOCSIFFLAGS: 1299 ARGE_LOCK(sc); 1300 if ((ifp->if_flags & IFF_UP) != 0) { 1301 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1302 if (((ifp->if_flags ^ sc->arge_if_flags) 1303 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 1304 /* XXX: handle promisc & multi flags */ 1305 } 1306 1307 } else { 1308 if (!sc->arge_detach) 1309 arge_init_locked(sc); 1310 } 1311 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1312 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1313 arge_stop(sc); 1314 } 1315 sc->arge_if_flags = ifp->if_flags; 1316 ARGE_UNLOCK(sc); 1317 error = 0; 1318 break; 1319 case SIOCADDMULTI: 1320 case SIOCDELMULTI: 1321 /* XXX: implement SIOCDELMULTI */ 1322 error = 0; 1323 break; 1324 case SIOCGIFMEDIA: 1325 case SIOCSIFMEDIA: 1326 if (sc->arge_miibus) { 1327 mii = device_get_softc(sc->arge_miibus); 1328 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 1329 command); 1330 } 1331 else 1332 error = ifmedia_ioctl(ifp, ifr, &sc->arge_ifmedia, 1333 command); 1334 break; 1335 case SIOCSIFCAP: 1336 /* XXX: Check other capabilities */ 1337#ifdef DEVICE_POLLING 1338 mask = ifp->if_capenable ^ ifr->ifr_reqcap; 1339 if (mask & IFCAP_POLLING) { 1340 if (ifr->ifr_reqcap & IFCAP_POLLING) { 1341 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0); 1342 error = ether_poll_register(arge_poll, ifp); 1343 if (error) 1344 return error; 1345 ARGE_LOCK(sc); 1346 ifp->if_capenable |= IFCAP_POLLING; 1347 ARGE_UNLOCK(sc); 1348 } else { 1349 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL); 1350 error = ether_poll_deregister(ifp); 1351 ARGE_LOCK(sc); 1352 ifp->if_capenable &= ~IFCAP_POLLING; 1353 ARGE_UNLOCK(sc); 1354 } 1355 } 1356 error = 0; 1357 break; 1358#endif 1359 default: 1360 error = ether_ioctl(ifp, command, data); 1361 break; 1362 } 1363 1364 return (error); 1365} 1366 1367/* 1368 * Set media options. 1369 */ 1370static int 1371arge_ifmedia_upd(struct ifnet *ifp) 1372{ 1373 struct arge_softc *sc; 1374 struct mii_data *mii; 1375 struct mii_softc *miisc; 1376 int error; 1377 1378 sc = ifp->if_softc; 1379 ARGE_LOCK(sc); 1380 mii = device_get_softc(sc->arge_miibus); 1381 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1382 PHY_RESET(miisc); 1383 error = mii_mediachg(mii); 1384 ARGE_UNLOCK(sc); 1385 1386 return (error); 1387} 1388 1389/* 1390 * Report current media status. 1391 */ 1392static void 1393arge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1394{ 1395 struct arge_softc *sc = ifp->if_softc; 1396 struct mii_data *mii; 1397 1398 mii = device_get_softc(sc->arge_miibus); 1399 ARGE_LOCK(sc); 1400 mii_pollstat(mii); 1401 ifmr->ifm_active = mii->mii_media_active; 1402 ifmr->ifm_status = mii->mii_media_status; 1403 ARGE_UNLOCK(sc); 1404} 1405 1406struct arge_dmamap_arg { 1407 bus_addr_t arge_busaddr; 1408}; 1409 1410static void 1411arge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1412{ 1413 struct arge_dmamap_arg *ctx; 1414 1415 if (error != 0) 1416 return; 1417 ctx = arg; 1418 ctx->arge_busaddr = segs[0].ds_addr; 1419} 1420 1421static int 1422arge_dma_alloc(struct arge_softc *sc) 1423{ 1424 struct arge_dmamap_arg ctx; 1425 struct arge_txdesc *txd; 1426 struct arge_rxdesc *rxd; 1427 int error, i; 1428 1429 /* Create parent DMA tag. */ 1430 error = bus_dma_tag_create( 1431 bus_get_dma_tag(sc->arge_dev), /* parent */ 1432 1, 0, /* alignment, boundary */ 1433 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1434 BUS_SPACE_MAXADDR, /* highaddr */ 1435 NULL, NULL, /* filter, filterarg */ 1436 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1437 0, /* nsegments */ 1438 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1439 0, /* flags */ 1440 NULL, NULL, /* lockfunc, lockarg */ 1441 &sc->arge_cdata.arge_parent_tag); 1442 if (error != 0) { 1443 device_printf(sc->arge_dev, 1444 "failed to create parent DMA tag\n"); 1445 goto fail; 1446 } 1447 /* Create tag for Tx ring. */ 1448 error = bus_dma_tag_create( 1449 sc->arge_cdata.arge_parent_tag, /* parent */ 1450 ARGE_RING_ALIGN, 0, /* alignment, boundary */ 1451 BUS_SPACE_MAXADDR, /* lowaddr */ 1452 BUS_SPACE_MAXADDR, /* highaddr */ 1453 NULL, NULL, /* filter, filterarg */ 1454 ARGE_TX_DMA_SIZE, /* maxsize */ 1455 1, /* nsegments */ 1456 ARGE_TX_DMA_SIZE, /* maxsegsize */ 1457 0, /* flags */ 1458 NULL, NULL, /* lockfunc, lockarg */ 1459 &sc->arge_cdata.arge_tx_ring_tag); 1460 if (error != 0) { 1461 device_printf(sc->arge_dev, 1462 "failed to create Tx ring DMA tag\n"); 1463 goto fail; 1464 } 1465 1466 /* Create tag for Rx ring. */ 1467 error = bus_dma_tag_create( 1468 sc->arge_cdata.arge_parent_tag, /* parent */ 1469 ARGE_RING_ALIGN, 0, /* alignment, boundary */ 1470 BUS_SPACE_MAXADDR, /* lowaddr */ 1471 BUS_SPACE_MAXADDR, /* highaddr */ 1472 NULL, NULL, /* filter, filterarg */ 1473 ARGE_RX_DMA_SIZE, /* maxsize */ 1474 1, /* nsegments */ 1475 ARGE_RX_DMA_SIZE, /* maxsegsize */ 1476 0, /* flags */ 1477 NULL, NULL, /* lockfunc, lockarg */ 1478 &sc->arge_cdata.arge_rx_ring_tag); 1479 if (error != 0) { 1480 device_printf(sc->arge_dev, 1481 "failed to create Rx ring DMA tag\n"); 1482 goto fail; 1483 } 1484 1485 /* Create tag for Tx buffers. */ 1486 error = bus_dma_tag_create( 1487 sc->arge_cdata.arge_parent_tag, /* parent */ 1488 sizeof(uint32_t), 0, /* alignment, boundary */ 1489 BUS_SPACE_MAXADDR, /* lowaddr */ 1490 BUS_SPACE_MAXADDR, /* highaddr */ 1491 NULL, NULL, /* filter, filterarg */ 1492 MCLBYTES * ARGE_MAXFRAGS, /* maxsize */ 1493 ARGE_MAXFRAGS, /* nsegments */ 1494 MCLBYTES, /* maxsegsize */ 1495 0, /* flags */ 1496 NULL, NULL, /* lockfunc, lockarg */ 1497 &sc->arge_cdata.arge_tx_tag); 1498 if (error != 0) { 1499 device_printf(sc->arge_dev, "failed to create Tx DMA tag\n"); 1500 goto fail; 1501 } 1502 1503 /* Create tag for Rx buffers. */ 1504 error = bus_dma_tag_create( 1505 sc->arge_cdata.arge_parent_tag, /* parent */ 1506 ARGE_RX_ALIGN, 0, /* alignment, boundary */ 1507 BUS_SPACE_MAXADDR, /* lowaddr */ 1508 BUS_SPACE_MAXADDR, /* highaddr */ 1509 NULL, NULL, /* filter, filterarg */ 1510 MCLBYTES, /* maxsize */ 1511 ARGE_MAXFRAGS, /* nsegments */ 1512 MCLBYTES, /* maxsegsize */ 1513 0, /* flags */ 1514 NULL, NULL, /* lockfunc, lockarg */ 1515 &sc->arge_cdata.arge_rx_tag); 1516 if (error != 0) { 1517 device_printf(sc->arge_dev, "failed to create Rx DMA tag\n"); 1518 goto fail; 1519 } 1520 1521 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1522 error = bus_dmamem_alloc(sc->arge_cdata.arge_tx_ring_tag, 1523 (void **)&sc->arge_rdata.arge_tx_ring, BUS_DMA_WAITOK | 1524 BUS_DMA_COHERENT | BUS_DMA_ZERO, 1525 &sc->arge_cdata.arge_tx_ring_map); 1526 if (error != 0) { 1527 device_printf(sc->arge_dev, 1528 "failed to allocate DMA'able memory for Tx ring\n"); 1529 goto fail; 1530 } 1531 1532 ctx.arge_busaddr = 0; 1533 error = bus_dmamap_load(sc->arge_cdata.arge_tx_ring_tag, 1534 sc->arge_cdata.arge_tx_ring_map, sc->arge_rdata.arge_tx_ring, 1535 ARGE_TX_DMA_SIZE, arge_dmamap_cb, &ctx, 0); 1536 if (error != 0 || ctx.arge_busaddr == 0) { 1537 device_printf(sc->arge_dev, 1538 "failed to load DMA'able memory for Tx ring\n"); 1539 goto fail; 1540 } 1541 sc->arge_rdata.arge_tx_ring_paddr = ctx.arge_busaddr; 1542 1543 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1544 error = bus_dmamem_alloc(sc->arge_cdata.arge_rx_ring_tag, 1545 (void **)&sc->arge_rdata.arge_rx_ring, BUS_DMA_WAITOK | 1546 BUS_DMA_COHERENT | BUS_DMA_ZERO, 1547 &sc->arge_cdata.arge_rx_ring_map); 1548 if (error != 0) { 1549 device_printf(sc->arge_dev, 1550 "failed to allocate DMA'able memory for Rx ring\n"); 1551 goto fail; 1552 } 1553 1554 ctx.arge_busaddr = 0; 1555 error = bus_dmamap_load(sc->arge_cdata.arge_rx_ring_tag, 1556 sc->arge_cdata.arge_rx_ring_map, sc->arge_rdata.arge_rx_ring, 1557 ARGE_RX_DMA_SIZE, arge_dmamap_cb, &ctx, 0); 1558 if (error != 0 || ctx.arge_busaddr == 0) { 1559 device_printf(sc->arge_dev, 1560 "failed to load DMA'able memory for Rx ring\n"); 1561 goto fail; 1562 } 1563 sc->arge_rdata.arge_rx_ring_paddr = ctx.arge_busaddr; 1564 1565 /* Create DMA maps for Tx buffers. */ 1566 for (i = 0; i < ARGE_TX_RING_COUNT; i++) { 1567 txd = &sc->arge_cdata.arge_txdesc[i]; 1568 txd->tx_m = NULL; 1569 txd->tx_dmamap = NULL; 1570 error = bus_dmamap_create(sc->arge_cdata.arge_tx_tag, 0, 1571 &txd->tx_dmamap); 1572 if (error != 0) { 1573 device_printf(sc->arge_dev, 1574 "failed to create Tx dmamap\n"); 1575 goto fail; 1576 } 1577 } 1578 /* Create DMA maps for Rx buffers. */ 1579 if ((error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0, 1580 &sc->arge_cdata.arge_rx_sparemap)) != 0) { 1581 device_printf(sc->arge_dev, 1582 "failed to create spare Rx dmamap\n"); 1583 goto fail; 1584 } 1585 for (i = 0; i < ARGE_RX_RING_COUNT; i++) { 1586 rxd = &sc->arge_cdata.arge_rxdesc[i]; 1587 rxd->rx_m = NULL; 1588 rxd->rx_dmamap = NULL; 1589 error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0, 1590 &rxd->rx_dmamap); 1591 if (error != 0) { 1592 device_printf(sc->arge_dev, 1593 "failed to create Rx dmamap\n"); 1594 goto fail; 1595 } 1596 } 1597 1598fail: 1599 return (error); 1600} 1601 1602static void 1603arge_dma_free(struct arge_softc *sc) 1604{ 1605 struct arge_txdesc *txd; 1606 struct arge_rxdesc *rxd; 1607 int i; 1608 1609 /* Tx ring. */ 1610 if (sc->arge_cdata.arge_tx_ring_tag) { 1611 if (sc->arge_cdata.arge_tx_ring_map) 1612 bus_dmamap_unload(sc->arge_cdata.arge_tx_ring_tag, 1613 sc->arge_cdata.arge_tx_ring_map); 1614 if (sc->arge_cdata.arge_tx_ring_map && 1615 sc->arge_rdata.arge_tx_ring) 1616 bus_dmamem_free(sc->arge_cdata.arge_tx_ring_tag, 1617 sc->arge_rdata.arge_tx_ring, 1618 sc->arge_cdata.arge_tx_ring_map); 1619 sc->arge_rdata.arge_tx_ring = NULL; 1620 sc->arge_cdata.arge_tx_ring_map = NULL; 1621 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_ring_tag); 1622 sc->arge_cdata.arge_tx_ring_tag = NULL; 1623 } 1624 /* Rx ring. */ 1625 if (sc->arge_cdata.arge_rx_ring_tag) { 1626 if (sc->arge_cdata.arge_rx_ring_map) 1627 bus_dmamap_unload(sc->arge_cdata.arge_rx_ring_tag, 1628 sc->arge_cdata.arge_rx_ring_map); 1629 if (sc->arge_cdata.arge_rx_ring_map && 1630 sc->arge_rdata.arge_rx_ring) 1631 bus_dmamem_free(sc->arge_cdata.arge_rx_ring_tag, 1632 sc->arge_rdata.arge_rx_ring, 1633 sc->arge_cdata.arge_rx_ring_map); 1634 sc->arge_rdata.arge_rx_ring = NULL; 1635 sc->arge_cdata.arge_rx_ring_map = NULL; 1636 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_ring_tag); 1637 sc->arge_cdata.arge_rx_ring_tag = NULL; 1638 } 1639 /* Tx buffers. */ 1640 if (sc->arge_cdata.arge_tx_tag) { 1641 for (i = 0; i < ARGE_TX_RING_COUNT; i++) { 1642 txd = &sc->arge_cdata.arge_txdesc[i]; 1643 if (txd->tx_dmamap) { 1644 bus_dmamap_destroy(sc->arge_cdata.arge_tx_tag, 1645 txd->tx_dmamap); 1646 txd->tx_dmamap = NULL; 1647 } 1648 } 1649 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_tag); 1650 sc->arge_cdata.arge_tx_tag = NULL; 1651 } 1652 /* Rx buffers. */ 1653 if (sc->arge_cdata.arge_rx_tag) { 1654 for (i = 0; i < ARGE_RX_RING_COUNT; i++) { 1655 rxd = &sc->arge_cdata.arge_rxdesc[i]; 1656 if (rxd->rx_dmamap) { 1657 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag, 1658 rxd->rx_dmamap); 1659 rxd->rx_dmamap = NULL; 1660 } 1661 } 1662 if (sc->arge_cdata.arge_rx_sparemap) { 1663 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag, 1664 sc->arge_cdata.arge_rx_sparemap); 1665 sc->arge_cdata.arge_rx_sparemap = 0; 1666 } 1667 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_tag); 1668 sc->arge_cdata.arge_rx_tag = NULL; 1669 } 1670 1671 if (sc->arge_cdata.arge_parent_tag) { 1672 bus_dma_tag_destroy(sc->arge_cdata.arge_parent_tag); 1673 sc->arge_cdata.arge_parent_tag = NULL; 1674 } 1675} 1676 1677/* 1678 * Initialize the transmit descriptors. 1679 */ 1680static int 1681arge_tx_ring_init(struct arge_softc *sc) 1682{ 1683 struct arge_ring_data *rd; 1684 struct arge_txdesc *txd; 1685 bus_addr_t addr; 1686 int i; 1687 1688 sc->arge_cdata.arge_tx_prod = 0; 1689 sc->arge_cdata.arge_tx_cons = 0; 1690 sc->arge_cdata.arge_tx_cnt = 0; 1691 1692 rd = &sc->arge_rdata; 1693 bzero(rd->arge_tx_ring, sizeof(rd->arge_tx_ring)); 1694 for (i = 0; i < ARGE_TX_RING_COUNT; i++) { 1695 if (i == ARGE_TX_RING_COUNT - 1) 1696 addr = ARGE_TX_RING_ADDR(sc, 0); 1697 else 1698 addr = ARGE_TX_RING_ADDR(sc, i + 1); 1699 rd->arge_tx_ring[i].packet_ctrl = ARGE_DESC_EMPTY; 1700 rd->arge_tx_ring[i].next_desc = addr; 1701 txd = &sc->arge_cdata.arge_txdesc[i]; 1702 txd->tx_m = NULL; 1703 } 1704 1705 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 1706 sc->arge_cdata.arge_tx_ring_map, 1707 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1708 1709 return (0); 1710} 1711 1712/* 1713 * Free the Tx ring, unload any pending dma transaction and free the mbuf. 1714 */ 1715static void 1716arge_tx_ring_free(struct arge_softc *sc) 1717{ 1718 struct arge_txdesc *txd; 1719 int i; 1720 1721 /* Free the Tx buffers. */ 1722 for (i = 0; i < ARGE_TX_RING_COUNT; i++) { 1723 txd = &sc->arge_cdata.arge_txdesc[i]; 1724 if (txd->tx_dmamap) { 1725 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, 1726 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1727 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, 1728 txd->tx_dmamap); 1729 } 1730 if (txd->tx_m) 1731 m_freem(txd->tx_m); 1732 txd->tx_m = NULL; 1733 } 1734} 1735 1736/* 1737 * Initialize the RX descriptors and allocate mbufs for them. Note that 1738 * we arrange the descriptors in a closed ring, so that the last descriptor 1739 * points back to the first. 1740 */ 1741static int 1742arge_rx_ring_init(struct arge_softc *sc) 1743{ 1744 struct arge_ring_data *rd; 1745 struct arge_rxdesc *rxd; 1746 bus_addr_t addr; 1747 int i; 1748 1749 sc->arge_cdata.arge_rx_cons = 0; 1750 1751 rd = &sc->arge_rdata; 1752 bzero(rd->arge_rx_ring, sizeof(rd->arge_rx_ring)); 1753 for (i = 0; i < ARGE_RX_RING_COUNT; i++) { 1754 rxd = &sc->arge_cdata.arge_rxdesc[i]; 1755 if (rxd->rx_m != NULL) { 1756 device_printf(sc->arge_dev, 1757 "%s: ring[%d] rx_m wasn't free?\n", 1758 __func__, 1759 i); 1760 } 1761 rxd->rx_m = NULL; 1762 rxd->desc = &rd->arge_rx_ring[i]; 1763 if (i == ARGE_RX_RING_COUNT - 1) 1764 addr = ARGE_RX_RING_ADDR(sc, 0); 1765 else 1766 addr = ARGE_RX_RING_ADDR(sc, i + 1); 1767 rd->arge_rx_ring[i].next_desc = addr; 1768 if (arge_newbuf(sc, i) != 0) { 1769 return (ENOBUFS); 1770 } 1771 } 1772 1773 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 1774 sc->arge_cdata.arge_rx_ring_map, 1775 BUS_DMASYNC_PREWRITE); 1776 1777 return (0); 1778} 1779 1780/* 1781 * Free all the buffers in the RX ring. 1782 * 1783 * TODO: ensure that DMA is disabled and no pending DMA 1784 * is lurking in the FIFO. 1785 */ 1786static void 1787arge_rx_ring_free(struct arge_softc *sc) 1788{ 1789 int i; 1790 struct arge_rxdesc *rxd; 1791 1792 ARGE_LOCK_ASSERT(sc); 1793 1794 for (i = 0; i < ARGE_RX_RING_COUNT; i++) { 1795 rxd = &sc->arge_cdata.arge_rxdesc[i]; 1796 /* Unmap the mbuf */ 1797 if (rxd->rx_m != NULL) { 1798 bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, 1799 rxd->rx_dmamap); 1800 m_free(rxd->rx_m); 1801 rxd->rx_m = NULL; 1802 } 1803 } 1804} 1805 1806/* 1807 * Initialize an RX descriptor and attach an MBUF cluster. 1808 */ 1809static int 1810arge_newbuf(struct arge_softc *sc, int idx) 1811{ 1812 struct arge_desc *desc; 1813 struct arge_rxdesc *rxd; 1814 struct mbuf *m; 1815 bus_dma_segment_t segs[1]; 1816 bus_dmamap_t map; 1817 int nsegs; 1818 1819 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1820 if (m == NULL) 1821 return (ENOBUFS); 1822 m->m_len = m->m_pkthdr.len = MCLBYTES; 1823 m_adj(m, sizeof(uint64_t)); 1824 1825 if (bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_rx_tag, 1826 sc->arge_cdata.arge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1827 m_freem(m); 1828 return (ENOBUFS); 1829 } 1830 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1831 1832 rxd = &sc->arge_cdata.arge_rxdesc[idx]; 1833 if (rxd->rx_m != NULL) { 1834 bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap); 1835 } 1836 map = rxd->rx_dmamap; 1837 rxd->rx_dmamap = sc->arge_cdata.arge_rx_sparemap; 1838 sc->arge_cdata.arge_rx_sparemap = map; 1839 rxd->rx_m = m; 1840 desc = rxd->desc; 1841 if (segs[0].ds_addr & 3) 1842 panic("RX packet address unaligned"); 1843 desc->packet_addr = segs[0].ds_addr; 1844 desc->packet_ctrl = ARGE_DESC_EMPTY | ARGE_DMASIZE(segs[0].ds_len); 1845 1846 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 1847 sc->arge_cdata.arge_rx_ring_map, 1848 BUS_DMASYNC_PREWRITE); 1849 1850 return (0); 1851} 1852 1853static __inline void 1854arge_fixup_rx(struct mbuf *m) 1855{ 1856 int i; 1857 uint16_t *src, *dst; 1858 1859 src = mtod(m, uint16_t *); 1860 dst = src - 1; 1861 1862 for (i = 0; i < m->m_len / sizeof(uint16_t); i++) { 1863 *dst++ = *src++; 1864 } 1865 1866 if (m->m_len % sizeof(uint16_t)) 1867 *(uint8_t *)dst = *(uint8_t *)src; 1868 1869 m->m_data -= ETHER_ALIGN; 1870} 1871 1872#ifdef DEVICE_POLLING 1873static int 1874arge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1875{ 1876 struct arge_softc *sc = ifp->if_softc; 1877 int rx_npkts = 0; 1878 1879 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1880 ARGE_LOCK(sc); 1881 arge_tx_locked(sc); 1882 rx_npkts = arge_rx_locked(sc); 1883 ARGE_UNLOCK(sc); 1884 } 1885 1886 return (rx_npkts); 1887} 1888#endif /* DEVICE_POLLING */ 1889 1890 1891static void 1892arge_tx_locked(struct arge_softc *sc) 1893{ 1894 struct arge_txdesc *txd; 1895 struct arge_desc *cur_tx; 1896 struct ifnet *ifp; 1897 uint32_t ctrl; 1898 int cons, prod; 1899 1900 ARGE_LOCK_ASSERT(sc); 1901 1902 cons = sc->arge_cdata.arge_tx_cons; 1903 prod = sc->arge_cdata.arge_tx_prod; 1904 1905 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: cons=%d, prod=%d\n", __func__, cons, 1906 prod); 1907 1908 if (cons == prod) 1909 return; 1910 1911 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 1912 sc->arge_cdata.arge_tx_ring_map, 1913 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1914 1915 ifp = sc->arge_ifp; 1916 /* 1917 * Go through our tx list and free mbufs for those 1918 * frames that have been transmitted. 1919 */ 1920 for (; cons != prod; ARGE_INC(cons, ARGE_TX_RING_COUNT)) { 1921 cur_tx = &sc->arge_rdata.arge_tx_ring[cons]; 1922 ctrl = cur_tx->packet_ctrl; 1923 /* Check if descriptor has "finished" flag */ 1924 if ((ctrl & ARGE_DESC_EMPTY) == 0) 1925 break; 1926 1927 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT); 1928 1929 sc->arge_cdata.arge_tx_cnt--; 1930 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1931 1932 txd = &sc->arge_cdata.arge_txdesc[cons]; 1933 1934 ifp->if_opackets++; 1935 1936 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap, 1937 BUS_DMASYNC_POSTWRITE); 1938 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap); 1939 1940 /* Free only if it's first descriptor in list */ 1941 if (txd->tx_m) 1942 m_freem(txd->tx_m); 1943 txd->tx_m = NULL; 1944 1945 /* reset descriptor */ 1946 cur_tx->packet_addr = 0; 1947 } 1948 1949 sc->arge_cdata.arge_tx_cons = cons; 1950 1951 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 1952 sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREWRITE); 1953} 1954 1955 1956static int 1957arge_rx_locked(struct arge_softc *sc) 1958{ 1959 struct arge_rxdesc *rxd; 1960 struct ifnet *ifp = sc->arge_ifp; 1961 int cons, prog, packet_len, i; 1962 struct arge_desc *cur_rx; 1963 struct mbuf *m; 1964 int rx_npkts = 0; 1965 1966 ARGE_LOCK_ASSERT(sc); 1967 1968 cons = sc->arge_cdata.arge_rx_cons; 1969 1970 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 1971 sc->arge_cdata.arge_rx_ring_map, 1972 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1973 1974 for (prog = 0; prog < ARGE_RX_RING_COUNT; 1975 ARGE_INC(cons, ARGE_RX_RING_COUNT)) { 1976 cur_rx = &sc->arge_rdata.arge_rx_ring[cons]; 1977 rxd = &sc->arge_cdata.arge_rxdesc[cons]; 1978 m = rxd->rx_m; 1979 1980 if ((cur_rx->packet_ctrl & ARGE_DESC_EMPTY) != 0) 1981 break; 1982 1983 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD); 1984 1985 prog++; 1986 1987 packet_len = ARGE_DMASIZE(cur_rx->packet_ctrl); 1988 bus_dmamap_sync(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap, 1989 BUS_DMASYNC_POSTREAD); 1990 m = rxd->rx_m; 1991 1992 arge_fixup_rx(m); 1993 m->m_pkthdr.rcvif = ifp; 1994 /* Skip 4 bytes of CRC */ 1995 m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN; 1996 ifp->if_ipackets++; 1997 rx_npkts++; 1998 1999 ARGE_UNLOCK(sc); 2000 (*ifp->if_input)(ifp, m); 2001 ARGE_LOCK(sc); 2002 cur_rx->packet_addr = 0; 2003 } 2004 2005 if (prog > 0) { 2006 2007 i = sc->arge_cdata.arge_rx_cons; 2008 for (; prog > 0 ; prog--) { 2009 if (arge_newbuf(sc, i) != 0) { 2010 device_printf(sc->arge_dev, 2011 "Failed to allocate buffer\n"); 2012 break; 2013 } 2014 ARGE_INC(i, ARGE_RX_RING_COUNT); 2015 } 2016 2017 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 2018 sc->arge_cdata.arge_rx_ring_map, 2019 BUS_DMASYNC_PREWRITE); 2020 2021 sc->arge_cdata.arge_rx_cons = cons; 2022 } 2023 2024 return (rx_npkts); 2025} 2026 2027static int 2028arge_intr_filter(void *arg) 2029{ 2030 struct arge_softc *sc = arg; 2031 uint32_t status, ints; 2032 2033 status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS); 2034 ints = ARGE_READ(sc, AR71XX_DMA_INTR); 2035 2036 ARGEDEBUG(sc, ARGE_DBG_INTR, "int mask(filter) = %b\n", ints, 2037 "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD" 2038 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT"); 2039 ARGEDEBUG(sc, ARGE_DBG_INTR, "status(filter) = %b\n", status, 2040 "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD" 2041 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT"); 2042 2043 if (status & DMA_INTR_ALL) { 2044 sc->arge_intr_status |= status; 2045 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0); 2046 return (FILTER_SCHEDULE_THREAD); 2047 } 2048 2049 sc->arge_intr_status = 0; 2050 return (FILTER_STRAY); 2051} 2052 2053static void 2054arge_intr(void *arg) 2055{ 2056 struct arge_softc *sc = arg; 2057 uint32_t status; 2058 struct ifnet *ifp = sc->arge_ifp; 2059 2060 status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS); 2061 status |= sc->arge_intr_status; 2062 2063 ARGEDEBUG(sc, ARGE_DBG_INTR, "int status(intr) = %b\n", status, 2064 "\20\10\7RX_OVERFLOW\5RX_PKT_RCVD" 2065 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT"); 2066 2067 /* 2068 * Is it our interrupt at all? 2069 */ 2070 if (status == 0) 2071 return; 2072 2073 if (status & DMA_INTR_RX_BUS_ERROR) { 2074 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_BUS_ERROR); 2075 device_printf(sc->arge_dev, "RX bus error"); 2076 return; 2077 } 2078 2079 if (status & DMA_INTR_TX_BUS_ERROR) { 2080 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_BUS_ERROR); 2081 device_printf(sc->arge_dev, "TX bus error"); 2082 return; 2083 } 2084 2085 ARGE_LOCK(sc); 2086 2087 if (status & DMA_INTR_RX_PKT_RCVD) 2088 arge_rx_locked(sc); 2089 2090 /* 2091 * RX overrun disables the receiver. 2092 * Clear indication and re-enable rx. 2093 */ 2094 if ( status & DMA_INTR_RX_OVERFLOW) { 2095 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_OVERFLOW); 2096 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN); 2097 sc->stats.rx_overflow++; 2098 } 2099 2100 if (status & DMA_INTR_TX_PKT_SENT) 2101 arge_tx_locked(sc); 2102 /* 2103 * Underrun turns off TX. Clear underrun indication. 2104 * If there's anything left in the ring, reactivate the tx. 2105 */ 2106 if (status & DMA_INTR_TX_UNDERRUN) { 2107 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_UNDERRUN); 2108 sc->stats.tx_underflow++; 2109 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: TX underrun; tx_cnt=%d\n", 2110 __func__, sc->arge_cdata.arge_tx_cnt); 2111 if (sc->arge_cdata.arge_tx_cnt > 0 ) { 2112 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 2113 DMA_TX_CONTROL_EN); 2114 } 2115 } 2116 2117 /* 2118 * If we've finished TXing and there's space for more packets 2119 * to be queued for TX, do so. Otherwise we may end up in a 2120 * situation where the interface send queue was filled 2121 * whilst the hardware queue was full, then the hardware 2122 * queue was drained by the interface send queue wasn't, 2123 * and thus if_start() is never called to kick-start 2124 * the send process (and all subsequent packets are simply 2125 * discarded. 2126 * 2127 * XXX TODO: make sure that the hardware deals nicely 2128 * with the possibility of the queue being enabled above 2129 * after a TX underrun, then having the hardware queue added 2130 * to below. 2131 */ 2132 if (status & (DMA_INTR_TX_PKT_SENT | DMA_INTR_TX_UNDERRUN) && 2133 (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 2134 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2135 arge_start_locked(ifp); 2136 } 2137 2138 /* 2139 * We handled all bits, clear status 2140 */ 2141 sc->arge_intr_status = 0; 2142 ARGE_UNLOCK(sc); 2143 /* 2144 * re-enable all interrupts 2145 */ 2146 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL); 2147} 2148 2149 2150static void 2151arge_tick(void *xsc) 2152{ 2153 struct arge_softc *sc = xsc; 2154 struct mii_data *mii; 2155 2156 ARGE_LOCK_ASSERT(sc); 2157 2158 if (sc->arge_miibus) { 2159 mii = device_get_softc(sc->arge_miibus); 2160 mii_tick(mii); 2161 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc); 2162 } 2163} 2164 2165int 2166arge_multiphy_mediachange(struct ifnet *ifp) 2167{ 2168 struct arge_softc *sc = ifp->if_softc; 2169 struct ifmedia *ifm = &sc->arge_ifmedia; 2170 struct ifmedia_entry *ife = ifm->ifm_cur; 2171 2172 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2173 return (EINVAL); 2174 2175 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 2176 device_printf(sc->arge_dev, 2177 "AUTO is not supported for multiphy MAC"); 2178 return (EINVAL); 2179 } 2180 2181 /* 2182 * Ignore everything 2183 */ 2184 return (0); 2185} 2186 2187void 2188arge_multiphy_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2189{ 2190 struct arge_softc *sc = ifp->if_softc; 2191 2192 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 2193 ifmr->ifm_active = IFM_ETHER | sc->arge_media_type | 2194 sc->arge_duplex_mode; 2195} 2196 2197#if defined(ARGE_MDIO) 2198static int 2199argemdio_probe(device_t dev) 2200{ 2201 device_set_desc(dev, "Atheros AR71xx built-in ethernet interface, MDIO controller"); 2202 return (0); 2203} 2204 2205static int 2206argemdio_attach(device_t dev) 2207{ 2208 struct arge_softc *sc; 2209 int error = 0; 2210 2211 sc = device_get_softc(dev); 2212 sc->arge_dev = dev; 2213 sc->arge_mac_unit = device_get_unit(dev); 2214 sc->arge_rid = 0; 2215 sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2216 &sc->arge_rid, RF_ACTIVE | RF_SHAREABLE); 2217 if (sc->arge_res == NULL) { 2218 device_printf(dev, "couldn't map memory\n"); 2219 error = ENXIO; 2220 goto fail; 2221 } 2222 2223 /* Reset MAC - required for AR71xx MDIO to successfully occur */ 2224 arge_reset_mac(sc); 2225 /* Reset MII bus */ 2226 arge_reset_miibus(sc); 2227 2228 bus_generic_probe(dev); 2229 bus_enumerate_hinted_children(dev); 2230 error = bus_generic_attach(dev); 2231fail: 2232 return (error); 2233} 2234 2235static int 2236argemdio_detach(device_t dev) 2237{ 2238 return (0); 2239} 2240 2241#endif 2242