if_em_netmap.h revision 270252
1/* 2 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26/* 27 * $FreeBSD: stable/10/sys/dev/netmap/if_em_netmap.h 270252 2014-08-20 23:34:36Z luigi $ 28 * 29 * netmap support for: em. 30 * 31 * For more details on netmap support please see ixgbe_netmap.h 32 */ 33 34 35#include <net/netmap.h> 36#include <sys/selinfo.h> 37#include <vm/vm.h> 38#include <vm/pmap.h> /* vtophys ? */ 39#include <dev/netmap/netmap_kern.h> 40 41 42// XXX do we need to block/unblock the tasks ? 43static void 44em_netmap_block_tasks(struct adapter *adapter) 45{ 46 if (adapter->msix > 1) { /* MSIX */ 47 int i; 48 struct tx_ring *txr = adapter->tx_rings; 49 struct rx_ring *rxr = adapter->rx_rings; 50 51 for (i = 0; i < adapter->num_queues; i++, txr++, rxr++) { 52 taskqueue_block(txr->tq); 53 taskqueue_drain(txr->tq, &txr->tx_task); 54 taskqueue_block(rxr->tq); 55 taskqueue_drain(rxr->tq, &rxr->rx_task); 56 } 57 } else { /* legacy */ 58 taskqueue_block(adapter->tq); 59 taskqueue_drain(adapter->tq, &adapter->link_task); 60 taskqueue_drain(adapter->tq, &adapter->que_task); 61 } 62} 63 64 65static void 66em_netmap_unblock_tasks(struct adapter *adapter) 67{ 68 if (adapter->msix > 1) { 69 struct tx_ring *txr = adapter->tx_rings; 70 struct rx_ring *rxr = adapter->rx_rings; 71 int i; 72 73 for (i = 0; i < adapter->num_queues; i++) { 74 taskqueue_unblock(txr->tq); 75 taskqueue_unblock(rxr->tq); 76 } 77 } else { /* legacy */ 78 taskqueue_unblock(adapter->tq); 79 } 80} 81 82 83/* 84 * Register/unregister. We are already under netmap lock. 85 */ 86static int 87em_netmap_reg(struct netmap_adapter *na, int onoff) 88{ 89 struct ifnet *ifp = na->ifp; 90 struct adapter *adapter = ifp->if_softc; 91 92 EM_CORE_LOCK(adapter); 93 em_disable_intr(adapter); 94 95 /* Tell the stack that the interface is no longer active */ 96 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 97 98 em_netmap_block_tasks(adapter); 99 /* enable or disable flags and callbacks in na and ifp */ 100 if (onoff) { 101 nm_set_native_flags(na); 102 } else { 103 nm_clear_native_flags(na); 104 } 105 em_init_locked(adapter); /* also enable intr */ 106 em_netmap_unblock_tasks(adapter); 107 EM_CORE_UNLOCK(adapter); 108 return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1); 109} 110 111 112/* 113 * Reconcile kernel and user view of the transmit ring. 114 */ 115static int 116em_netmap_txsync(struct netmap_kring *kring, int flags) 117{ 118 struct netmap_adapter *na = kring->na; 119 struct ifnet *ifp = na->ifp; 120 struct netmap_ring *ring = kring->ring; 121 u_int nm_i; /* index into the netmap ring */ 122 u_int nic_i; /* index into the NIC ring */ 123 u_int n; 124 u_int const lim = kring->nkr_num_slots - 1; 125 u_int const head = kring->rhead; 126 /* generate an interrupt approximately every half ring */ 127 u_int report_frequency = kring->nkr_num_slots >> 1; 128 129 /* device-specific */ 130 struct adapter *adapter = ifp->if_softc; 131 struct tx_ring *txr = &adapter->tx_rings[kring->ring_id]; 132 133 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 134 BUS_DMASYNC_POSTREAD); 135 136 /* 137 * First part: process new packets to send. 138 */ 139 140 nm_i = kring->nr_hwcur; 141 if (nm_i != head) { /* we have new packets to send */ 142 nic_i = netmap_idx_k2n(kring, nm_i); 143 for (n = 0; nm_i != head; n++) { 144 struct netmap_slot *slot = &ring->slot[nm_i]; 145 u_int len = slot->len; 146 uint64_t paddr; 147 void *addr = PNMB(na, slot, &paddr); 148 149 /* device-specific */ 150 struct e1000_tx_desc *curr = &txr->tx_base[nic_i]; 151 struct em_buffer *txbuf = &txr->tx_buffers[nic_i]; 152 int flags = (slot->flags & NS_REPORT || 153 nic_i == 0 || nic_i == report_frequency) ? 154 E1000_TXD_CMD_RS : 0; 155 156 NM_CHECK_ADDR_LEN(na, addr, len); 157 158 if (slot->flags & NS_BUF_CHANGED) { 159 curr->buffer_addr = htole64(paddr); 160 /* buffer has changed, reload map */ 161 netmap_reload_map(na, txr->txtag, txbuf->map, addr); 162 } 163 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 164 165 /* Fill the slot in the NIC ring. */ 166 curr->upper.data = 0; 167 curr->lower.data = htole32(adapter->txd_cmd | len | 168 (E1000_TXD_CMD_EOP | flags) ); 169 bus_dmamap_sync(txr->txtag, txbuf->map, 170 BUS_DMASYNC_PREWRITE); 171 172 nm_i = nm_next(nm_i, lim); 173 nic_i = nm_next(nic_i, lim); 174 } 175 kring->nr_hwcur = head; 176 177 /* synchronize the NIC ring */ 178 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 179 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 180 181 /* (re)start the tx unit up to slot nic_i (excluded) */ 182 E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), nic_i); 183 } 184 185 /* 186 * Second part: reclaim buffers for completed transmissions. 187 */ 188 if (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { 189 /* record completed transmissions using TDH */ 190 nic_i = E1000_READ_REG(&adapter->hw, E1000_TDH(kring->ring_id)); 191 if (nic_i >= kring->nkr_num_slots) { /* XXX can it happen ? */ 192 D("TDH wrap %d", nic_i); 193 nic_i -= kring->nkr_num_slots; 194 } 195 if (nic_i != txr->next_to_clean) { 196 txr->next_to_clean = nic_i; 197 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); 198 } 199 } 200 201 nm_txsync_finalize(kring); 202 203 return 0; 204} 205 206 207/* 208 * Reconcile kernel and user view of the receive ring. 209 */ 210static int 211em_netmap_rxsync(struct netmap_kring *kring, int flags) 212{ 213 struct netmap_adapter *na = kring->na; 214 struct ifnet *ifp = na->ifp; 215 struct netmap_ring *ring = kring->ring; 216 u_int nm_i; /* index into the netmap ring */ 217 u_int nic_i; /* index into the NIC ring */ 218 u_int n; 219 u_int const lim = kring->nkr_num_slots - 1; 220 u_int const head = nm_rxsync_prologue(kring); 221 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 222 223 /* device-specific */ 224 struct adapter *adapter = ifp->if_softc; 225 struct rx_ring *rxr = &adapter->rx_rings[kring->ring_id]; 226 227 if (head > lim) 228 return netmap_ring_reinit(kring); 229 230 /* XXX check sync modes */ 231 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 232 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 233 234 /* 235 * First part: import newly received packets. 236 */ 237 if (netmap_no_pendintr || force_update) { 238 uint16_t slot_flags = kring->nkr_slot_flags; 239 240 nic_i = rxr->next_to_check; 241 nm_i = netmap_idx_n2k(kring, nic_i); 242 243 for (n = 0; ; n++) { // XXX no need to count 244 struct e1000_rx_desc *curr = &rxr->rx_base[nic_i]; 245 uint32_t staterr = le32toh(curr->status); 246 247 if ((staterr & E1000_RXD_STAT_DD) == 0) 248 break; 249 ring->slot[nm_i].len = le16toh(curr->length); 250 ring->slot[nm_i].flags = slot_flags; 251 bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[nic_i].map, 252 BUS_DMASYNC_POSTREAD); 253 nm_i = nm_next(nm_i, lim); 254 /* make sure next_to_refresh follows next_to_check */ 255 rxr->next_to_refresh = nic_i; // XXX 256 nic_i = nm_next(nic_i, lim); 257 } 258 if (n) { /* update the state variables */ 259 rxr->next_to_check = nic_i; 260 kring->nr_hwtail = nm_i; 261 } 262 kring->nr_kflags &= ~NKR_PENDINTR; 263 } 264 265 /* 266 * Second part: skip past packets that userspace has released. 267 */ 268 nm_i = kring->nr_hwcur; 269 if (nm_i != head) { 270 nic_i = netmap_idx_k2n(kring, nm_i); 271 for (n = 0; nm_i != head; n++) { 272 struct netmap_slot *slot = &ring->slot[nm_i]; 273 uint64_t paddr; 274 void *addr = PNMB(na, slot, &paddr); 275 276 struct e1000_rx_desc *curr = &rxr->rx_base[nic_i]; 277 struct em_buffer *rxbuf = &rxr->rx_buffers[nic_i]; 278 279 if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ 280 goto ring_reset; 281 282 if (slot->flags & NS_BUF_CHANGED) { 283 /* buffer has changed, reload map */ 284 curr->buffer_addr = htole64(paddr); 285 netmap_reload_map(na, rxr->rxtag, rxbuf->map, addr); 286 slot->flags &= ~NS_BUF_CHANGED; 287 } 288 curr->status = 0; 289 bus_dmamap_sync(rxr->rxtag, rxbuf->map, 290 BUS_DMASYNC_PREREAD); 291 nm_i = nm_next(nm_i, lim); 292 nic_i = nm_next(nic_i, lim); 293 } 294 kring->nr_hwcur = head; 295 296 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 297 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 298 /* 299 * IMPORTANT: we must leave one free slot in the ring, 300 * so move nic_i back by one unit 301 */ 302 nic_i = nm_prev(nic_i, lim); 303 E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), nic_i); 304 } 305 306 /* tell userspace that there might be new packets */ 307 nm_rxsync_finalize(kring); 308 309 return 0; 310 311ring_reset: 312 return netmap_ring_reinit(kring); 313} 314 315 316static void 317em_netmap_attach(struct adapter *adapter) 318{ 319 struct netmap_adapter na; 320 321 bzero(&na, sizeof(na)); 322 323 na.ifp = adapter->ifp; 324 na.na_flags = NAF_BDG_MAYSLEEP; 325 na.num_tx_desc = adapter->num_tx_desc; 326 na.num_rx_desc = adapter->num_rx_desc; 327 na.nm_txsync = em_netmap_txsync; 328 na.nm_rxsync = em_netmap_rxsync; 329 na.nm_register = em_netmap_reg; 330 na.num_tx_rings = na.num_rx_rings = adapter->num_queues; 331 netmap_attach(&na); 332} 333 334/* end of file */ 335