t4_cpl_io.c revision 330897
1/*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012, 2015 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/tom/t4_cpl_io.c 330897 2018-03-14 03:19:51Z eadler $"); 32 33#include "opt_inet.h" 34 35#ifdef TCP_OFFLOAD 36#include <sys/param.h> 37#include <sys/aio.h> 38#include <sys/file.h> 39#include <sys/kernel.h> 40#include <sys/ktr.h> 41#include <sys/module.h> 42#include <sys/proc.h> 43#include <sys/protosw.h> 44#include <sys/domain.h> 45#include <sys/socket.h> 46#include <sys/socketvar.h> 47#include <sys/sglist.h> 48#include <sys/taskqueue.h> 49#include <netinet/in.h> 50#include <netinet/in_pcb.h> 51#include <netinet/ip.h> 52#include <netinet/ip6.h> 53#define TCPSTATES 54#include <netinet/tcp_fsm.h> 55#include <netinet/tcp_seq.h> 56#include <netinet/tcp_var.h> 57#include <netinet/toecore.h> 58 59#include <security/mac/mac_framework.h> 60 61#include <vm/vm.h> 62#include <vm/vm_extern.h> 63#include <vm/pmap.h> 64#include <vm/vm_map.h> 65#include <vm/vm_page.h> 66 67#include "common/common.h" 68#include "common/t4_msg.h" 69#include "common/t4_regs.h" 70#include "common/t4_tcb.h" 71#include "tom/t4_tom_l2t.h" 72#include "tom/t4_tom.h" 73 74VNET_DECLARE(int, tcp_do_autosndbuf); 75#define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf) 76VNET_DECLARE(int, tcp_autosndbuf_inc); 77#define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc) 78VNET_DECLARE(int, tcp_autosndbuf_max); 79#define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max) 80VNET_DECLARE(int, tcp_do_autorcvbuf); 81#define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 82VNET_DECLARE(int, tcp_autorcvbuf_inc); 83#define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 84VNET_DECLARE(int, tcp_autorcvbuf_max); 85#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 86 87#define IS_AIOTX_MBUF(m) \ 88 ((m)->m_flags & M_EXT && (m)->m_ext.ext_flags & EXT_FLAG_AIOTX) 89 90static void t4_aiotx_cancel(struct kaiocb *job); 91static void t4_aiotx_queue_toep(struct toepcb *toep); 92 93static size_t 94aiotx_mbuf_pgoff(struct mbuf *m) 95{ 96 struct aiotx_buffer *ab; 97 98 MPASS(IS_AIOTX_MBUF(m)); 99 ab = m->m_ext.ext_arg1; 100 return ((ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) % PAGE_SIZE); 101} 102 103static vm_page_t * 104aiotx_mbuf_pages(struct mbuf *m) 105{ 106 struct aiotx_buffer *ab; 107 int npages; 108 109 MPASS(IS_AIOTX_MBUF(m)); 110 ab = m->m_ext.ext_arg1; 111 npages = (ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) / PAGE_SIZE; 112 return (ab->ps.pages + npages); 113} 114 115void 116send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp) 117{ 118 struct wrqe *wr; 119 struct fw_flowc_wr *flowc; 120 unsigned int nparams = ftxp ? 8 : 6, flowclen; 121 struct vi_info *vi = toep->vi; 122 struct port_info *pi = vi->pi; 123 struct adapter *sc = pi->adapter; 124 unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN; 125 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 126 127 KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT), 128 ("%s: flowc for tid %u sent already", __func__, toep->tid)); 129 130 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 131 132 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); 133 if (wr == NULL) { 134 /* XXX */ 135 panic("%s: allocation failure.", __func__); 136 } 137 flowc = wrtod(wr); 138 memset(flowc, 0, wr->wr_len); 139 140 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 141 V_FW_FLOWC_WR_NPARAMS(nparams)); 142 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 143 V_FW_WR_FLOWID(toep->tid)); 144 145 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 146 flowc->mnemval[0].val = htobe32(pfvf); 147 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 148 flowc->mnemval[1].val = htobe32(pi->tx_chan); 149 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 150 flowc->mnemval[2].val = htobe32(pi->tx_chan); 151 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 152 flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id); 153 if (ftxp) { 154 uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf); 155 156 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 157 flowc->mnemval[4].val = htobe32(ftxp->snd_nxt); 158 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 159 flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt); 160 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 161 flowc->mnemval[6].val = htobe32(sndbuf); 162 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 163 flowc->mnemval[7].val = htobe32(ftxp->mss); 164 165 CTR6(KTR_CXGBE, 166 "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x", 167 __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt, 168 ftxp->rcv_nxt); 169 } else { 170 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF; 171 flowc->mnemval[4].val = htobe32(512); 172 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS; 173 flowc->mnemval[5].val = htobe32(512); 174 175 CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 176 } 177 178 txsd->tx_credits = howmany(flowclen, 16); 179 txsd->plen = 0; 180 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 181 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 182 toep->tx_credits -= txsd->tx_credits; 183 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 184 toep->txsd_pidx = 0; 185 toep->txsd_avail--; 186 187 toep->flags |= TPF_FLOWC_WR_SENT; 188 t4_wrq_tx(sc, wr); 189} 190 191void 192send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) 193{ 194 struct wrqe *wr; 195 struct cpl_abort_req *req; 196 int tid = toep->tid; 197 struct inpcb *inp = toep->inp; 198 struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ 199 200 INP_WLOCK_ASSERT(inp); 201 202 CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", 203 __func__, toep->tid, 204 inp->inp_flags & INP_DROPPED ? "inp dropped" : 205 tcpstates[tp->t_state], 206 toep->flags, inp->inp_flags, 207 toep->flags & TPF_ABORT_SHUTDOWN ? 208 " (abort already in progress)" : ""); 209 210 if (toep->flags & TPF_ABORT_SHUTDOWN) 211 return; /* abort already in progress */ 212 213 toep->flags |= TPF_ABORT_SHUTDOWN; 214 215 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 216 ("%s: flowc_wr not sent for tid %d.", __func__, tid)); 217 218 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 219 if (wr == NULL) { 220 /* XXX */ 221 panic("%s: allocation failure.", __func__); 222 } 223 req = wrtod(wr); 224 225 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); 226 if (inp->inp_flags & INP_DROPPED) 227 req->rsvd0 = htobe32(snd_nxt); 228 else 229 req->rsvd0 = htobe32(tp->snd_nxt); 230 req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); 231 req->cmd = CPL_ABORT_SEND_RST; 232 233 /* 234 * XXX: What's the correct way to tell that the inp hasn't been detached 235 * from its socket? Should I even be flushing the snd buffer here? 236 */ 237 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 238 struct socket *so = inp->inp_socket; 239 240 if (so != NULL) /* because I'm not sure. See comment above */ 241 sbflush(&so->so_snd); 242 } 243 244 t4_l2t_send(sc, wr, toep->l2te); 245} 246 247/* 248 * Called when a connection is established to translate the TCP options 249 * reported by HW to FreeBSD's native format. 250 */ 251static void 252assign_rxopt(struct tcpcb *tp, unsigned int opt) 253{ 254 struct toepcb *toep = tp->t_toe; 255 struct inpcb *inp = tp->t_inpcb; 256 struct adapter *sc = td_adapter(toep->td); 257 int n; 258 259 INP_LOCK_ASSERT(inp); 260 261 if (inp->inp_inc.inc_flags & INC_ISIPV6) 262 n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 263 else 264 n = sizeof(struct ip) + sizeof(struct tcphdr); 265 tp->t_maxseg = sc->params.mtus[G_TCPOPT_MSS(opt)] - n; 266 267 CTR4(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u)", __func__, toep->tid, 268 G_TCPOPT_MSS(opt), sc->params.mtus[G_TCPOPT_MSS(opt)]); 269 270 if (G_TCPOPT_TSTAMP(opt)) { 271 tp->t_flags |= TF_RCVD_TSTMP; /* timestamps ok */ 272 tp->ts_recent = 0; /* hmmm */ 273 tp->ts_recent_age = tcp_ts_getticks(); 274 } 275 276 if (G_TCPOPT_SACK(opt)) 277 tp->t_flags |= TF_SACK_PERMIT; /* should already be set */ 278 else 279 tp->t_flags &= ~TF_SACK_PERMIT; /* sack disallowed by peer */ 280 281 if (G_TCPOPT_WSCALE_OK(opt)) 282 tp->t_flags |= TF_RCVD_SCALE; 283 284 /* Doing window scaling? */ 285 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 286 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 287 tp->rcv_scale = tp->request_r_scale; 288 tp->snd_scale = G_TCPOPT_SND_WSCALE(opt); 289 } 290} 291 292/* 293 * Completes some final bits of initialization for just established connections 294 * and changes their state to TCPS_ESTABLISHED. 295 * 296 * The ISNs are from after the exchange of SYNs. i.e., the true ISN + 1. 297 */ 298void 299make_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn, 300 uint16_t opt) 301{ 302 struct inpcb *inp = toep->inp; 303 struct socket *so = inp->inp_socket; 304 struct tcpcb *tp = intotcpcb(inp); 305 long bufsize; 306 uint32_t iss = be32toh(snd_isn) - 1; /* true ISS */ 307 uint32_t irs = be32toh(rcv_isn) - 1; /* true IRS */ 308 uint16_t tcpopt = be16toh(opt); 309 struct flowc_tx_params ftxp; 310 311 INP_WLOCK_ASSERT(inp); 312 KASSERT(tp->t_state == TCPS_SYN_SENT || 313 tp->t_state == TCPS_SYN_RECEIVED, 314 ("%s: TCP state %s", __func__, tcpstates[tp->t_state])); 315 316 CTR6(KTR_CXGBE, "%s: tid %d, so %p, inp %p, tp %p, toep %p", 317 __func__, toep->tid, so, inp, tp, toep); 318 319 tp->t_state = TCPS_ESTABLISHED; 320 tp->t_starttime = ticks; 321 TCPSTAT_INC(tcps_connects); 322 323 tp->irs = irs; 324 tcp_rcvseqinit(tp); 325 tp->rcv_wnd = toep->rx_credits << 10; 326 tp->rcv_adv += tp->rcv_wnd; 327 tp->last_ack_sent = tp->rcv_nxt; 328 329 /* 330 * If we were unable to send all rx credits via opt0, save the remainder 331 * in rx_credits so that they can be handed over with the next credit 332 * update. 333 */ 334 SOCKBUF_LOCK(&so->so_rcv); 335 bufsize = select_rcv_wnd(so); 336 SOCKBUF_UNLOCK(&so->so_rcv); 337 toep->rx_credits = bufsize - tp->rcv_wnd; 338 339 tp->iss = iss; 340 tcp_sendseqinit(tp); 341 tp->snd_una = iss + 1; 342 tp->snd_nxt = iss + 1; 343 tp->snd_max = iss + 1; 344 345 assign_rxopt(tp, tcpopt); 346 347 SOCKBUF_LOCK(&so->so_snd); 348 if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf) 349 bufsize = V_tcp_autosndbuf_max; 350 else 351 bufsize = sbspace(&so->so_snd); 352 SOCKBUF_UNLOCK(&so->so_snd); 353 354 ftxp.snd_nxt = tp->snd_nxt; 355 ftxp.rcv_nxt = tp->rcv_nxt; 356 ftxp.snd_space = bufsize; 357 ftxp.mss = tp->t_maxseg; 358 send_flowc_wr(toep, &ftxp); 359 360 soisconnected(so); 361} 362 363static int 364send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits) 365{ 366 struct wrqe *wr; 367 struct cpl_rx_data_ack *req; 368 uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); 369 370 KASSERT(credits >= 0, ("%s: %d credits", __func__, credits)); 371 372 wr = alloc_wrqe(sizeof(*req), toep->ctrlq); 373 if (wr == NULL) 374 return (0); 375 req = wrtod(wr); 376 377 INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid); 378 req->credit_dack = htobe32(dack | V_RX_CREDITS(credits)); 379 380 t4_wrq_tx(sc, wr); 381 return (credits); 382} 383 384void 385t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp) 386{ 387 struct adapter *sc = tod->tod_softc; 388 struct inpcb *inp = tp->t_inpcb; 389 struct socket *so = inp->inp_socket; 390 struct sockbuf *sb = &so->so_rcv; 391 struct toepcb *toep = tp->t_toe; 392 int credits; 393 394 INP_WLOCK_ASSERT(inp); 395 396 SOCKBUF_LOCK_ASSERT(sb); 397 KASSERT(toep->sb_cc >= sbused(sb), 398 ("%s: sb %p has more data (%d) than last time (%d).", 399 __func__, sb, sbused(sb), toep->sb_cc)); 400 401 toep->rx_credits += toep->sb_cc - sbused(sb); 402 toep->sb_cc = sbused(sb); 403 404 if (toep->rx_credits > 0 && 405 (tp->rcv_wnd <= 32 * 1024 || toep->rx_credits >= 64 * 1024 || 406 (toep->rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) || 407 toep->sb_cc + tp->rcv_wnd < sb->sb_lowat)) { 408 409 credits = send_rx_credits(sc, toep, toep->rx_credits); 410 toep->rx_credits -= credits; 411 tp->rcv_wnd += credits; 412 tp->rcv_adv += credits; 413 } 414} 415 416void 417t4_rcvd(struct toedev *tod, struct tcpcb *tp) 418{ 419 struct inpcb *inp = tp->t_inpcb; 420 struct socket *so = inp->inp_socket; 421 struct sockbuf *sb = &so->so_rcv; 422 423 SOCKBUF_LOCK(sb); 424 t4_rcvd_locked(tod, tp); 425 SOCKBUF_UNLOCK(sb); 426} 427 428/* 429 * Close a connection by sending a CPL_CLOSE_CON_REQ message. 430 */ 431static int 432close_conn(struct adapter *sc, struct toepcb *toep) 433{ 434 struct wrqe *wr; 435 struct cpl_close_con_req *req; 436 unsigned int tid = toep->tid; 437 438 CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, 439 toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); 440 441 if (toep->flags & TPF_FIN_SENT) 442 return (0); 443 444 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 445 ("%s: flowc_wr not sent for tid %u.", __func__, tid)); 446 447 wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); 448 if (wr == NULL) { 449 /* XXX */ 450 panic("%s: allocation failure.", __func__); 451 } 452 req = wrtod(wr); 453 454 req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | 455 V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); 456 req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | 457 V_FW_WR_FLOWID(tid)); 458 req->wr.wr_lo = cpu_to_be64(0); 459 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 460 req->rsvd = 0; 461 462 toep->flags |= TPF_FIN_SENT; 463 toep->flags &= ~TPF_SEND_FIN; 464 t4_l2t_send(sc, wr, toep->l2te); 465 466 return (0); 467} 468 469#define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16) 470#define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16)) 471 472/* Maximum amount of immediate data we could stuff in a WR */ 473static inline int 474max_imm_payload(int tx_credits) 475{ 476 const int n = 2; /* Use only up to 2 desc for imm. data WR */ 477 478 KASSERT(tx_credits >= 0 && 479 tx_credits <= MAX_OFLD_TX_CREDITS, 480 ("%s: %d credits", __func__, tx_credits)); 481 482 if (tx_credits < MIN_OFLD_TX_CREDITS) 483 return (0); 484 485 if (tx_credits >= (n * EQ_ESIZE) / 16) 486 return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr)); 487 else 488 return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr)); 489} 490 491/* Maximum number of SGL entries we could stuff in a WR */ 492static inline int 493max_dsgl_nsegs(int tx_credits) 494{ 495 int nseg = 1; /* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */ 496 int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS; 497 498 KASSERT(tx_credits >= 0 && 499 tx_credits <= MAX_OFLD_TX_CREDITS, 500 ("%s: %d credits", __func__, tx_credits)); 501 502 if (tx_credits < MIN_OFLD_TX_CREDITS) 503 return (0); 504 505 nseg += 2 * (sge_pair_credits * 16 / 24); 506 if ((sge_pair_credits * 16) % 24 == 16) 507 nseg++; 508 509 return (nseg); 510} 511 512static inline void 513write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen, 514 unsigned int plen, uint8_t credits, int shove, int ulp_submode, int txalign) 515{ 516 struct fw_ofld_tx_data_wr *txwr = dst; 517 518 txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) | 519 V_FW_WR_IMMDLEN(immdlen)); 520 txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) | 521 V_FW_WR_LEN16(credits)); 522 txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(toep->ulp_mode) | 523 V_TX_ULP_SUBMODE(ulp_submode) | V_TX_URG(0) | V_TX_SHOVE(shove)); 524 txwr->plen = htobe32(plen); 525 526 if (txalign > 0) { 527 struct tcpcb *tp = intotcpcb(toep->inp); 528 529 if (plen < 2 * tp->t_maxseg || is_10G_port(toep->vi->pi)) 530 txwr->lsodisable_to_flags |= 531 htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE); 532 else 533 txwr->lsodisable_to_flags |= 534 htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD | 535 (tp->t_flags & TF_NODELAY ? 0 : 536 F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE)); 537 } 538} 539 540/* 541 * Generate a DSGL from a starting mbuf. The total number of segments and the 542 * maximum segments in any one mbuf are provided. 543 */ 544static void 545write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n) 546{ 547 struct mbuf *m; 548 struct ulptx_sgl *usgl = dst; 549 int i, j, rc; 550 struct sglist sg; 551 struct sglist_seg segs[n]; 552 553 KASSERT(nsegs > 0, ("%s: nsegs 0", __func__)); 554 555 sglist_init(&sg, n, segs); 556 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 557 V_ULPTX_NSGE(nsegs)); 558 559 i = -1; 560 for (m = start; m != stop; m = m->m_next) { 561 if (IS_AIOTX_MBUF(m)) 562 rc = sglist_append_vmpages(&sg, aiotx_mbuf_pages(m), 563 aiotx_mbuf_pgoff(m), m->m_len); 564 else 565 rc = sglist_append(&sg, mtod(m, void *), m->m_len); 566 if (__predict_false(rc != 0)) 567 panic("%s: sglist_append %d", __func__, rc); 568 569 for (j = 0; j < sg.sg_nseg; i++, j++) { 570 if (i < 0) { 571 usgl->len0 = htobe32(segs[j].ss_len); 572 usgl->addr0 = htobe64(segs[j].ss_paddr); 573 } else { 574 usgl->sge[i / 2].len[i & 1] = 575 htobe32(segs[j].ss_len); 576 usgl->sge[i / 2].addr[i & 1] = 577 htobe64(segs[j].ss_paddr); 578 } 579#ifdef INVARIANTS 580 nsegs--; 581#endif 582 } 583 sglist_reset(&sg); 584 } 585 if (i & 1) 586 usgl->sge[i / 2].len[1] = htobe32(0); 587 KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p", 588 __func__, nsegs, start, stop)); 589} 590 591/* 592 * Max number of SGL entries an offload tx work request can have. This is 41 593 * (1 + 40) for a full 512B work request. 594 * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40) 595 */ 596#define OFLD_SGL_LEN (41) 597 598/* 599 * Send data and/or a FIN to the peer. 600 * 601 * The socket's so_snd buffer consists of a stream of data starting with sb_mb 602 * and linked together with m_next. sb_sndptr, if set, is the last mbuf that 603 * was transmitted. 604 * 605 * drop indicates the number of bytes that should be dropped from the head of 606 * the send buffer. It is an optimization that lets do_fw4_ack avoid creating 607 * contention on the send buffer lock (before this change it used to do 608 * sowwakeup and then t4_push_frames right after that when recovering from tx 609 * stalls). When drop is set this function MUST drop the bytes and wake up any 610 * writers. 611 */ 612void 613t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop) 614{ 615 struct mbuf *sndptr, *m, *sb_sndptr; 616 struct fw_ofld_tx_data_wr *txwr; 617 struct wrqe *wr; 618 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 619 struct inpcb *inp = toep->inp; 620 struct tcpcb *tp = intotcpcb(inp); 621 struct socket *so = inp->inp_socket; 622 struct sockbuf *sb = &so->so_snd; 623 int tx_credits, shove, compl, sowwakeup; 624 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 625 bool aiotx_mbuf_seen; 626 627 INP_WLOCK_ASSERT(inp); 628 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 629 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 630 631 KASSERT(toep->ulp_mode == ULP_MODE_NONE || 632 toep->ulp_mode == ULP_MODE_TCPDDP || 633 toep->ulp_mode == ULP_MODE_RDMA, 634 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 635 636#ifdef VERBOSE_TRACES 637 CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d", 638 __func__, toep->tid, toep->flags, tp->t_flags); 639#endif 640 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 641 return; 642 643 /* 644 * This function doesn't resume by itself. Someone else must clear the 645 * flag and call this function. 646 */ 647 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 648 KASSERT(drop == 0, 649 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 650 return; 651 } 652 653 do { 654 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 655 max_imm = max_imm_payload(tx_credits); 656 max_nsegs = max_dsgl_nsegs(tx_credits); 657 658 SOCKBUF_LOCK(sb); 659 sowwakeup = drop; 660 if (drop) { 661 sbdrop_locked(sb, drop); 662 drop = 0; 663 } 664 sb_sndptr = sb->sb_sndptr; 665 sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb; 666 plen = 0; 667 nsegs = 0; 668 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 669 aiotx_mbuf_seen = false; 670 for (m = sndptr; m != NULL; m = m->m_next) { 671 int n; 672 673 if (IS_AIOTX_MBUF(m)) 674 n = sglist_count_vmpages(aiotx_mbuf_pages(m), 675 aiotx_mbuf_pgoff(m), m->m_len); 676 else 677 n = sglist_count(mtod(m, void *), m->m_len); 678 679 nsegs += n; 680 plen += m->m_len; 681 682 /* This mbuf sent us _over_ the nsegs limit, back out */ 683 if (plen > max_imm && nsegs > max_nsegs) { 684 nsegs -= n; 685 plen -= m->m_len; 686 if (plen == 0) { 687 /* Too few credits */ 688 toep->flags |= TPF_TX_SUSPENDED; 689 if (sowwakeup) { 690 if (!TAILQ_EMPTY( 691 &toep->aiotx_jobq)) 692 t4_aiotx_queue_toep( 693 toep); 694 sowwakeup_locked(so); 695 } else 696 SOCKBUF_UNLOCK(sb); 697 SOCKBUF_UNLOCK_ASSERT(sb); 698 return; 699 } 700 break; 701 } 702 703 if (IS_AIOTX_MBUF(m)) 704 aiotx_mbuf_seen = true; 705 if (max_nsegs_1mbuf < n) 706 max_nsegs_1mbuf = n; 707 sb_sndptr = m; /* new sb->sb_sndptr if all goes well */ 708 709 /* This mbuf put us right at the max_nsegs limit */ 710 if (plen > max_imm && nsegs == max_nsegs) { 711 m = m->m_next; 712 break; 713 } 714 } 715 716 if (sbused(sb) > sb->sb_hiwat * 5 / 8 && 717 toep->plen_nocompl + plen >= sb->sb_hiwat / 4) 718 compl = 1; 719 else 720 compl = 0; 721 722 if (sb->sb_flags & SB_AUTOSIZE && 723 V_tcp_do_autosndbuf && 724 sb->sb_hiwat < V_tcp_autosndbuf_max && 725 sbused(sb) >= sb->sb_hiwat * 7 / 8) { 726 int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc, 727 V_tcp_autosndbuf_max); 728 729 if (!sbreserve_locked(sb, newsize, so, NULL)) 730 sb->sb_flags &= ~SB_AUTOSIZE; 731 else 732 sowwakeup = 1; /* room available */ 733 } 734 if (sowwakeup) { 735 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 736 t4_aiotx_queue_toep(toep); 737 sowwakeup_locked(so); 738 } else 739 SOCKBUF_UNLOCK(sb); 740 SOCKBUF_UNLOCK_ASSERT(sb); 741 742 /* nothing to send */ 743 if (plen == 0) { 744 KASSERT(m == NULL, 745 ("%s: nothing to send, but m != NULL", __func__)); 746 break; 747 } 748 749 if (__predict_false(toep->flags & TPF_FIN_SENT)) 750 panic("%s: excess tx.", __func__); 751 752 shove = m == NULL && !(tp->t_flags & TF_MORETOCOME); 753 if (plen <= max_imm && !aiotx_mbuf_seen) { 754 755 /* Immediate data tx */ 756 757 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 758 toep->ofld_txq); 759 if (wr == NULL) { 760 /* XXX: how will we recover from this? */ 761 toep->flags |= TPF_TX_SUSPENDED; 762 return; 763 } 764 txwr = wrtod(wr); 765 credits = howmany(wr->wr_len, 16); 766 write_tx_wr(txwr, toep, plen, plen, credits, shove, 0, 767 sc->tt.tx_align); 768 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 769 nsegs = 0; 770 } else { 771 int wr_len; 772 773 /* DSGL tx */ 774 775 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 776 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 777 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 778 if (wr == NULL) { 779 /* XXX: how will we recover from this? */ 780 toep->flags |= TPF_TX_SUSPENDED; 781 return; 782 } 783 txwr = wrtod(wr); 784 credits = howmany(wr_len, 16); 785 write_tx_wr(txwr, toep, 0, plen, credits, shove, 0, 786 sc->tt.tx_align); 787 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 788 max_nsegs_1mbuf); 789 if (wr_len & 0xf) { 790 uint64_t *pad = (uint64_t *) 791 ((uintptr_t)txwr + wr_len); 792 *pad = 0; 793 } 794 } 795 796 KASSERT(toep->tx_credits >= credits, 797 ("%s: not enough credits", __func__)); 798 799 toep->tx_credits -= credits; 800 toep->tx_nocompl += credits; 801 toep->plen_nocompl += plen; 802 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 803 toep->tx_nocompl >= toep->tx_total / 4) 804 compl = 1; 805 806 if (compl || toep->ulp_mode == ULP_MODE_RDMA) { 807 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 808 toep->tx_nocompl = 0; 809 toep->plen_nocompl = 0; 810 } 811 812 tp->snd_nxt += plen; 813 tp->snd_max += plen; 814 815 SOCKBUF_LOCK(sb); 816 KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__)); 817 sb->sb_sndptr = sb_sndptr; 818 SOCKBUF_UNLOCK(sb); 819 820 toep->flags |= TPF_TX_DATA_SENT; 821 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 822 toep->flags |= TPF_TX_SUSPENDED; 823 824 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 825 txsd->plen = plen; 826 txsd->tx_credits = credits; 827 txsd++; 828 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 829 toep->txsd_pidx = 0; 830 txsd = &toep->txsd[0]; 831 } 832 toep->txsd_avail--; 833 834 t4_l2t_send(sc, wr, toep->l2te); 835 } while (m != NULL); 836 837 /* Send a FIN if requested, but only if there's no more data to send */ 838 if (m == NULL && toep->flags & TPF_SEND_FIN) 839 close_conn(sc, toep); 840} 841 842static inline void 843rqdrop_locked(struct mbufq *q, int plen) 844{ 845 struct mbuf *m; 846 847 while (plen > 0) { 848 m = mbufq_dequeue(q); 849 850 /* Too many credits. */ 851 MPASS(m != NULL); 852 M_ASSERTPKTHDR(m); 853 854 /* Partial credits. */ 855 MPASS(plen >= m->m_pkthdr.len); 856 857 plen -= m->m_pkthdr.len; 858 m_freem(m); 859 } 860} 861 862void 863t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop) 864{ 865 struct mbuf *sndptr, *m; 866 struct fw_ofld_tx_data_wr *txwr; 867 struct wrqe *wr; 868 u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf; 869 u_int adjusted_plen, ulp_submode; 870 struct inpcb *inp = toep->inp; 871 struct tcpcb *tp = intotcpcb(inp); 872 int tx_credits, shove; 873 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 874 struct mbufq *pduq = &toep->ulp_pduq; 875 static const u_int ulp_extra_len[] = {0, 4, 4, 8}; 876 877 INP_WLOCK_ASSERT(inp); 878 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 879 ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid)); 880 KASSERT(toep->ulp_mode == ULP_MODE_ISCSI, 881 ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep)); 882 883 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) 884 return; 885 886 /* 887 * This function doesn't resume by itself. Someone else must clear the 888 * flag and call this function. 889 */ 890 if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) { 891 KASSERT(drop == 0, 892 ("%s: drop (%d) != 0 but tx is suspended", __func__, drop)); 893 return; 894 } 895 896 if (drop) 897 rqdrop_locked(&toep->ulp_pdu_reclaimq, drop); 898 899 while ((sndptr = mbufq_first(pduq)) != NULL) { 900 M_ASSERTPKTHDR(sndptr); 901 902 tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS); 903 max_imm = max_imm_payload(tx_credits); 904 max_nsegs = max_dsgl_nsegs(tx_credits); 905 906 plen = 0; 907 nsegs = 0; 908 max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */ 909 for (m = sndptr; m != NULL; m = m->m_next) { 910 int n = sglist_count(mtod(m, void *), m->m_len); 911 912 nsegs += n; 913 plen += m->m_len; 914 915 /* 916 * This mbuf would send us _over_ the nsegs limit. 917 * Suspend tx because the PDU can't be sent out. 918 */ 919 if (plen > max_imm && nsegs > max_nsegs) { 920 toep->flags |= TPF_TX_SUSPENDED; 921 return; 922 } 923 924 if (max_nsegs_1mbuf < n) 925 max_nsegs_1mbuf = n; 926 } 927 928 if (__predict_false(toep->flags & TPF_FIN_SENT)) 929 panic("%s: excess tx.", __func__); 930 931 /* 932 * We have a PDU to send. All of it goes out in one WR so 'm' 933 * is NULL. A PDU's length is always a multiple of 4. 934 */ 935 MPASS(m == NULL); 936 MPASS((plen & 3) == 0); 937 MPASS(sndptr->m_pkthdr.len == plen); 938 939 shove = !(tp->t_flags & TF_MORETOCOME); 940 ulp_submode = mbuf_ulp_submode(sndptr); 941 MPASS(ulp_submode < nitems(ulp_extra_len)); 942 943 /* 944 * plen doesn't include header and data digests, which are 945 * generated and inserted in the right places by the TOE, but 946 * they do occupy TCP sequence space and need to be accounted 947 * for. 948 */ 949 adjusted_plen = plen + ulp_extra_len[ulp_submode]; 950 if (plen <= max_imm) { 951 952 /* Immediate data tx */ 953 954 wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16), 955 toep->ofld_txq); 956 if (wr == NULL) { 957 /* XXX: how will we recover from this? */ 958 toep->flags |= TPF_TX_SUSPENDED; 959 return; 960 } 961 txwr = wrtod(wr); 962 credits = howmany(wr->wr_len, 16); 963 write_tx_wr(txwr, toep, plen, adjusted_plen, credits, 964 shove, ulp_submode, sc->tt.tx_align); 965 m_copydata(sndptr, 0, plen, (void *)(txwr + 1)); 966 nsegs = 0; 967 } else { 968 int wr_len; 969 970 /* DSGL tx */ 971 wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) + 972 ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8; 973 wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq); 974 if (wr == NULL) { 975 /* XXX: how will we recover from this? */ 976 toep->flags |= TPF_TX_SUSPENDED; 977 return; 978 } 979 txwr = wrtod(wr); 980 credits = howmany(wr_len, 16); 981 write_tx_wr(txwr, toep, 0, adjusted_plen, credits, 982 shove, ulp_submode, sc->tt.tx_align); 983 write_tx_sgl(txwr + 1, sndptr, m, nsegs, 984 max_nsegs_1mbuf); 985 if (wr_len & 0xf) { 986 uint64_t *pad = (uint64_t *) 987 ((uintptr_t)txwr + wr_len); 988 *pad = 0; 989 } 990 } 991 992 KASSERT(toep->tx_credits >= credits, 993 ("%s: not enough credits", __func__)); 994 995 m = mbufq_dequeue(pduq); 996 MPASS(m == sndptr); 997 mbufq_enqueue(&toep->ulp_pdu_reclaimq, m); 998 999 toep->tx_credits -= credits; 1000 toep->tx_nocompl += credits; 1001 toep->plen_nocompl += plen; 1002 if (toep->tx_credits <= toep->tx_total * 3 / 8 && 1003 toep->tx_nocompl >= toep->tx_total / 4) { 1004 txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL); 1005 toep->tx_nocompl = 0; 1006 toep->plen_nocompl = 0; 1007 } 1008 1009 tp->snd_nxt += adjusted_plen; 1010 tp->snd_max += adjusted_plen; 1011 1012 toep->flags |= TPF_TX_DATA_SENT; 1013 if (toep->tx_credits < MIN_OFLD_TX_CREDITS) 1014 toep->flags |= TPF_TX_SUSPENDED; 1015 1016 KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__)); 1017 txsd->plen = plen; 1018 txsd->tx_credits = credits; 1019 txsd++; 1020 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) { 1021 toep->txsd_pidx = 0; 1022 txsd = &toep->txsd[0]; 1023 } 1024 toep->txsd_avail--; 1025 1026 t4_l2t_send(sc, wr, toep->l2te); 1027 } 1028 1029 /* Send a FIN if requested, but only if there are no more PDUs to send */ 1030 if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN) 1031 close_conn(sc, toep); 1032} 1033 1034int 1035t4_tod_output(struct toedev *tod, struct tcpcb *tp) 1036{ 1037 struct adapter *sc = tod->tod_softc; 1038#ifdef INVARIANTS 1039 struct inpcb *inp = tp->t_inpcb; 1040#endif 1041 struct toepcb *toep = tp->t_toe; 1042 1043 INP_WLOCK_ASSERT(inp); 1044 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1045 ("%s: inp %p dropped.", __func__, inp)); 1046 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1047 1048 if (toep->ulp_mode == ULP_MODE_ISCSI) 1049 t4_push_pdus(sc, toep, 0); 1050 else 1051 t4_push_frames(sc, toep, 0); 1052 1053 return (0); 1054} 1055 1056int 1057t4_send_fin(struct toedev *tod, struct tcpcb *tp) 1058{ 1059 struct adapter *sc = tod->tod_softc; 1060#ifdef INVARIANTS 1061 struct inpcb *inp = tp->t_inpcb; 1062#endif 1063 struct toepcb *toep = tp->t_toe; 1064 1065 INP_WLOCK_ASSERT(inp); 1066 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1067 ("%s: inp %p dropped.", __func__, inp)); 1068 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1069 1070 toep->flags |= TPF_SEND_FIN; 1071 if (tp->t_state >= TCPS_ESTABLISHED) { 1072 if (toep->ulp_mode == ULP_MODE_ISCSI) 1073 t4_push_pdus(sc, toep, 0); 1074 else 1075 t4_push_frames(sc, toep, 0); 1076 } 1077 1078 return (0); 1079} 1080 1081int 1082t4_send_rst(struct toedev *tod, struct tcpcb *tp) 1083{ 1084 struct adapter *sc = tod->tod_softc; 1085#if defined(INVARIANTS) 1086 struct inpcb *inp = tp->t_inpcb; 1087#endif 1088 struct toepcb *toep = tp->t_toe; 1089 1090 INP_WLOCK_ASSERT(inp); 1091 KASSERT((inp->inp_flags & INP_DROPPED) == 0, 1092 ("%s: inp %p dropped.", __func__, inp)); 1093 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 1094 1095 /* hmmmm */ 1096 KASSERT(toep->flags & TPF_FLOWC_WR_SENT, 1097 ("%s: flowc for tid %u [%s] not sent already", 1098 __func__, toep->tid, tcpstates[tp->t_state])); 1099 1100 send_reset(sc, toep, 0); 1101 return (0); 1102} 1103 1104/* 1105 * Peer has sent us a FIN. 1106 */ 1107static int 1108do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1109{ 1110 struct adapter *sc = iq->adapter; 1111 const struct cpl_peer_close *cpl = (const void *)(rss + 1); 1112 unsigned int tid = GET_TID(cpl); 1113 struct toepcb *toep = lookup_tid(sc, tid); 1114 struct inpcb *inp = toep->inp; 1115 struct tcpcb *tp = NULL; 1116 struct socket *so; 1117#ifdef INVARIANTS 1118 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1119#endif 1120 1121 KASSERT(opcode == CPL_PEER_CLOSE, 1122 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1123 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1124 1125 if (__predict_false(toep->flags & TPF_SYNQE)) { 1126#ifdef INVARIANTS 1127 struct synq_entry *synqe = (void *)toep; 1128 1129 INP_WLOCK(synqe->lctx->inp); 1130 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1131 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1132 ("%s: listen socket closed but tid %u not aborted.", 1133 __func__, tid)); 1134 } else { 1135 /* 1136 * do_pass_accept_req is still running and will 1137 * eventually take care of this tid. 1138 */ 1139 } 1140 INP_WUNLOCK(synqe->lctx->inp); 1141#endif 1142 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1143 toep, toep->flags); 1144 return (0); 1145 } 1146 1147 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1148 1149 CURVNET_SET(toep->vnet); 1150 INP_INFO_RLOCK(&V_tcbinfo); 1151 INP_WLOCK(inp); 1152 tp = intotcpcb(inp); 1153 1154 CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__, 1155 tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp); 1156 1157 if (toep->flags & TPF_ABORT_SHUTDOWN) 1158 goto done; 1159 1160 tp->rcv_nxt++; /* FIN */ 1161 1162 so = inp->inp_socket; 1163 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1164 DDP_LOCK(toep); 1165 if (__predict_false(toep->ddp_flags & 1166 (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) 1167 handle_ddp_close(toep, tp, cpl->rcv_nxt); 1168 DDP_UNLOCK(toep); 1169 } 1170 socantrcvmore(so); 1171 1172 if (toep->ulp_mode != ULP_MODE_RDMA) { 1173 KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt), 1174 ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt, 1175 be32toh(cpl->rcv_nxt))); 1176 } 1177 1178 switch (tp->t_state) { 1179 case TCPS_SYN_RECEIVED: 1180 tp->t_starttime = ticks; 1181 /* FALLTHROUGH */ 1182 1183 case TCPS_ESTABLISHED: 1184 tp->t_state = TCPS_CLOSE_WAIT; 1185 break; 1186 1187 case TCPS_FIN_WAIT_1: 1188 tp->t_state = TCPS_CLOSING; 1189 break; 1190 1191 case TCPS_FIN_WAIT_2: 1192 tcp_twstart(tp); 1193 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1194 INP_INFO_RUNLOCK(&V_tcbinfo); 1195 CURVNET_RESTORE(); 1196 1197 INP_WLOCK(inp); 1198 final_cpl_received(toep); 1199 return (0); 1200 1201 default: 1202 log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n", 1203 __func__, tid, tp->t_state); 1204 } 1205done: 1206 INP_WUNLOCK(inp); 1207 INP_INFO_RUNLOCK(&V_tcbinfo); 1208 CURVNET_RESTORE(); 1209 return (0); 1210} 1211 1212/* 1213 * Peer has ACK'd our FIN. 1214 */ 1215static int 1216do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss, 1217 struct mbuf *m) 1218{ 1219 struct adapter *sc = iq->adapter; 1220 const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1); 1221 unsigned int tid = GET_TID(cpl); 1222 struct toepcb *toep = lookup_tid(sc, tid); 1223 struct inpcb *inp = toep->inp; 1224 struct tcpcb *tp = NULL; 1225 struct socket *so = NULL; 1226#ifdef INVARIANTS 1227 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1228#endif 1229 1230 KASSERT(opcode == CPL_CLOSE_CON_RPL, 1231 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1232 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1233 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1234 1235 CURVNET_SET(toep->vnet); 1236 INP_INFO_RLOCK(&V_tcbinfo); 1237 INP_WLOCK(inp); 1238 tp = intotcpcb(inp); 1239 1240 CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x", 1241 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags); 1242 1243 if (toep->flags & TPF_ABORT_SHUTDOWN) 1244 goto done; 1245 1246 so = inp->inp_socket; 1247 tp->snd_una = be32toh(cpl->snd_nxt) - 1; /* exclude FIN */ 1248 1249 switch (tp->t_state) { 1250 case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */ 1251 tcp_twstart(tp); 1252release: 1253 INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */ 1254 INP_INFO_RUNLOCK(&V_tcbinfo); 1255 CURVNET_RESTORE(); 1256 1257 INP_WLOCK(inp); 1258 final_cpl_received(toep); /* no more CPLs expected */ 1259 1260 return (0); 1261 case TCPS_LAST_ACK: 1262 if (tcp_close(tp)) 1263 INP_WUNLOCK(inp); 1264 goto release; 1265 1266 case TCPS_FIN_WAIT_1: 1267 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 1268 soisdisconnected(so); 1269 tp->t_state = TCPS_FIN_WAIT_2; 1270 break; 1271 1272 default: 1273 log(LOG_ERR, 1274 "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n", 1275 __func__, tid, tcpstates[tp->t_state]); 1276 } 1277done: 1278 INP_WUNLOCK(inp); 1279 INP_INFO_RUNLOCK(&V_tcbinfo); 1280 CURVNET_RESTORE(); 1281 return (0); 1282} 1283 1284void 1285send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid, 1286 int rst_status) 1287{ 1288 struct wrqe *wr; 1289 struct cpl_abort_rpl *cpl; 1290 1291 wr = alloc_wrqe(sizeof(*cpl), ofld_txq); 1292 if (wr == NULL) { 1293 /* XXX */ 1294 panic("%s: allocation failure.", __func__); 1295 } 1296 cpl = wrtod(wr); 1297 1298 INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid); 1299 cpl->cmd = rst_status; 1300 1301 t4_wrq_tx(sc, wr); 1302} 1303 1304static int 1305abort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason) 1306{ 1307 switch (abort_reason) { 1308 case CPL_ERR_BAD_SYN: 1309 case CPL_ERR_CONN_RESET: 1310 return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET); 1311 case CPL_ERR_XMIT_TIMEDOUT: 1312 case CPL_ERR_PERSIST_TIMEDOUT: 1313 case CPL_ERR_FINWAIT2_TIMEDOUT: 1314 case CPL_ERR_KEEPALIVE_TIMEDOUT: 1315 return (ETIMEDOUT); 1316 default: 1317 return (EIO); 1318 } 1319} 1320 1321/* 1322 * TCP RST from the peer, timeout, or some other such critical error. 1323 */ 1324static int 1325do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1326{ 1327 struct adapter *sc = iq->adapter; 1328 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); 1329 unsigned int tid = GET_TID(cpl); 1330 struct toepcb *toep = lookup_tid(sc, tid); 1331 struct sge_wrq *ofld_txq = toep->ofld_txq; 1332 struct inpcb *inp; 1333 struct tcpcb *tp; 1334#ifdef INVARIANTS 1335 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1336#endif 1337 1338 KASSERT(opcode == CPL_ABORT_REQ_RSS, 1339 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1340 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1341 1342 if (toep->flags & TPF_SYNQE) 1343 return (do_abort_req_synqe(iq, rss, m)); 1344 1345 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1346 1347 if (negative_advice(cpl->status)) { 1348 CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)", 1349 __func__, cpl->status, tid, toep->flags); 1350 return (0); /* Ignore negative advice */ 1351 } 1352 1353 inp = toep->inp; 1354 CURVNET_SET(toep->vnet); 1355 INP_INFO_RLOCK(&V_tcbinfo); /* for tcp_close */ 1356 INP_WLOCK(inp); 1357 1358 tp = intotcpcb(inp); 1359 1360 CTR6(KTR_CXGBE, 1361 "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d", 1362 __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, 1363 inp->inp_flags, cpl->status); 1364 1365 /* 1366 * If we'd initiated an abort earlier the reply to it is responsible for 1367 * cleaning up resources. Otherwise we tear everything down right here 1368 * right now. We owe the T4 a CPL_ABORT_RPL no matter what. 1369 */ 1370 if (toep->flags & TPF_ABORT_SHUTDOWN) { 1371 INP_WUNLOCK(inp); 1372 goto done; 1373 } 1374 toep->flags |= TPF_ABORT_SHUTDOWN; 1375 1376 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 1377 struct socket *so = inp->inp_socket; 1378 1379 if (so != NULL) 1380 so_error_set(so, abort_status_to_errno(tp, 1381 cpl->status)); 1382 tp = tcp_close(tp); 1383 if (tp == NULL) 1384 INP_WLOCK(inp); /* re-acquire */ 1385 } 1386 1387 final_cpl_received(toep); 1388done: 1389 INP_INFO_RUNLOCK(&V_tcbinfo); 1390 CURVNET_RESTORE(); 1391 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); 1392 return (0); 1393} 1394 1395/* 1396 * Reply to the CPL_ABORT_REQ (send_reset) 1397 */ 1398static int 1399do_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1400{ 1401 struct adapter *sc = iq->adapter; 1402 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 1403 unsigned int tid = GET_TID(cpl); 1404 struct toepcb *toep = lookup_tid(sc, tid); 1405 struct inpcb *inp = toep->inp; 1406#ifdef INVARIANTS 1407 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1408#endif 1409 1410 KASSERT(opcode == CPL_ABORT_RPL_RSS, 1411 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1412 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1413 1414 if (toep->flags & TPF_SYNQE) 1415 return (do_abort_rpl_synqe(iq, rss, m)); 1416 1417 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1418 1419 CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d", 1420 __func__, tid, toep, inp, cpl->status); 1421 1422 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1423 ("%s: wasn't expecting abort reply", __func__)); 1424 1425 INP_WLOCK(inp); 1426 final_cpl_received(toep); 1427 1428 return (0); 1429} 1430 1431static int 1432do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1433{ 1434 struct adapter *sc = iq->adapter; 1435 const struct cpl_rx_data *cpl = mtod(m, const void *); 1436 unsigned int tid = GET_TID(cpl); 1437 struct toepcb *toep = lookup_tid(sc, tid); 1438 struct inpcb *inp = toep->inp; 1439 struct tcpcb *tp; 1440 struct socket *so; 1441 struct sockbuf *sb; 1442 int len; 1443 uint32_t ddp_placed = 0; 1444 1445 if (__predict_false(toep->flags & TPF_SYNQE)) { 1446#ifdef INVARIANTS 1447 struct synq_entry *synqe = (void *)toep; 1448 1449 INP_WLOCK(synqe->lctx->inp); 1450 if (synqe->flags & TPF_SYNQE_HAS_L2TE) { 1451 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 1452 ("%s: listen socket closed but tid %u not aborted.", 1453 __func__, tid)); 1454 } else { 1455 /* 1456 * do_pass_accept_req is still running and will 1457 * eventually take care of this tid. 1458 */ 1459 } 1460 INP_WUNLOCK(synqe->lctx->inp); 1461#endif 1462 CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid, 1463 toep, toep->flags); 1464 m_freem(m); 1465 return (0); 1466 } 1467 1468 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1469 1470 /* strip off CPL header */ 1471 m_adj(m, sizeof(*cpl)); 1472 len = m->m_pkthdr.len; 1473 1474 INP_WLOCK(inp); 1475 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 1476 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 1477 __func__, tid, len, inp->inp_flags); 1478 INP_WUNLOCK(inp); 1479 m_freem(m); 1480 return (0); 1481 } 1482 1483 tp = intotcpcb(inp); 1484 1485 if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq))) 1486 ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt; 1487 1488 tp->rcv_nxt += len; 1489 if (tp->rcv_wnd < len) { 1490 KASSERT(toep->ulp_mode == ULP_MODE_RDMA, 1491 ("%s: negative window size", __func__)); 1492 } 1493 1494 tp->rcv_wnd -= len; 1495 tp->t_rcvtime = ticks; 1496 1497 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1498 DDP_LOCK(toep); 1499 so = inp_inpcbtosocket(inp); 1500 sb = &so->so_rcv; 1501 SOCKBUF_LOCK(sb); 1502 1503 if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 1504 CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)", 1505 __func__, tid, len); 1506 m_freem(m); 1507 SOCKBUF_UNLOCK(sb); 1508 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1509 DDP_UNLOCK(toep); 1510 INP_WUNLOCK(inp); 1511 1512 CURVNET_SET(toep->vnet); 1513 INP_INFO_RLOCK(&V_tcbinfo); 1514 INP_WLOCK(inp); 1515 tp = tcp_drop(tp, ECONNRESET); 1516 if (tp) 1517 INP_WUNLOCK(inp); 1518 INP_INFO_RUNLOCK(&V_tcbinfo); 1519 CURVNET_RESTORE(); 1520 1521 return (0); 1522 } 1523 1524 /* receive buffer autosize */ 1525 MPASS(toep->vnet == so->so_vnet); 1526 CURVNET_SET(toep->vnet); 1527 if (sb->sb_flags & SB_AUTOSIZE && 1528 V_tcp_do_autorcvbuf && 1529 sb->sb_hiwat < V_tcp_autorcvbuf_max && 1530 len > (sbspace(sb) / 8 * 7)) { 1531 unsigned int hiwat = sb->sb_hiwat; 1532 unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, 1533 V_tcp_autorcvbuf_max); 1534 1535 if (!sbreserve_locked(sb, newsize, so, NULL)) 1536 sb->sb_flags &= ~SB_AUTOSIZE; 1537 else 1538 toep->rx_credits += newsize - hiwat; 1539 } 1540 1541 if (toep->ddp_waiting_count != 0 || toep->ddp_active_count != 0) 1542 CTR3(KTR_CXGBE, "%s: tid %u, non-ddp rx (%d bytes)", __func__, 1543 tid, len); 1544 1545 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1546 int changed = !(toep->ddp_flags & DDP_ON) ^ cpl->ddp_off; 1547 1548 if (changed) { 1549 if (toep->ddp_flags & DDP_SC_REQ) 1550 toep->ddp_flags ^= DDP_ON | DDP_SC_REQ; 1551 else { 1552 KASSERT(cpl->ddp_off == 1, 1553 ("%s: DDP switched on by itself.", 1554 __func__)); 1555 1556 /* Fell out of DDP mode */ 1557 toep->ddp_flags &= ~DDP_ON; 1558 CTR1(KTR_CXGBE, "%s: fell out of DDP mode", 1559 __func__); 1560 1561 insert_ddp_data(toep, ddp_placed); 1562 } 1563 } 1564 1565 if (toep->ddp_flags & DDP_ON) { 1566 /* 1567 * CPL_RX_DATA with DDP on can only be an indicate. 1568 * Start posting queued AIO requests via DDP. The 1569 * payload that arrived in this indicate is appended 1570 * to the socket buffer as usual. 1571 */ 1572 handle_ddp_indicate(toep); 1573 } 1574 } 1575 1576 KASSERT(toep->sb_cc >= sbused(sb), 1577 ("%s: sb %p has more data (%d) than last time (%d).", 1578 __func__, sb, sbused(sb), toep->sb_cc)); 1579 toep->rx_credits += toep->sb_cc - sbused(sb); 1580 sbappendstream_locked(sb, m, 0); 1581 toep->sb_cc = sbused(sb); 1582 if (toep->rx_credits > 0 && toep->sb_cc + tp->rcv_wnd < sb->sb_lowat) { 1583 int credits; 1584 1585 credits = send_rx_credits(sc, toep, toep->rx_credits); 1586 toep->rx_credits -= credits; 1587 tp->rcv_wnd += credits; 1588 tp->rcv_adv += credits; 1589 } 1590 1591 if (toep->ddp_waiting_count > 0 && sbavail(sb) != 0) { 1592 CTR2(KTR_CXGBE, "%s: tid %u queueing AIO task", __func__, 1593 tid); 1594 ddp_queue_toep(toep); 1595 } 1596 sorwakeup_locked(so); 1597 SOCKBUF_UNLOCK_ASSERT(sb); 1598 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1599 DDP_UNLOCK(toep); 1600 1601 INP_WUNLOCK(inp); 1602 CURVNET_RESTORE(); 1603 return (0); 1604} 1605 1606#define S_CPL_FW4_ACK_OPCODE 24 1607#define M_CPL_FW4_ACK_OPCODE 0xff 1608#define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE) 1609#define G_CPL_FW4_ACK_OPCODE(x) \ 1610 (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE) 1611 1612#define S_CPL_FW4_ACK_FLOWID 0 1613#define M_CPL_FW4_ACK_FLOWID 0xffffff 1614#define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID) 1615#define G_CPL_FW4_ACK_FLOWID(x) \ 1616 (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID) 1617 1618#define S_CPL_FW4_ACK_CR 24 1619#define M_CPL_FW4_ACK_CR 0xff 1620#define V_CPL_FW4_ACK_CR(x) ((x) << S_CPL_FW4_ACK_CR) 1621#define G_CPL_FW4_ACK_CR(x) (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR) 1622 1623#define S_CPL_FW4_ACK_SEQVAL 0 1624#define M_CPL_FW4_ACK_SEQVAL 0x1 1625#define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL) 1626#define G_CPL_FW4_ACK_SEQVAL(x) \ 1627 (((x) >> S_CPL_FW4_ACK_SEQVAL) & M_CPL_FW4_ACK_SEQVAL) 1628#define F_CPL_FW4_ACK_SEQVAL V_CPL_FW4_ACK_SEQVAL(1U) 1629 1630static int 1631do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1632{ 1633 struct adapter *sc = iq->adapter; 1634 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 1635 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 1636 struct toepcb *toep = lookup_tid(sc, tid); 1637 struct inpcb *inp; 1638 struct tcpcb *tp; 1639 struct socket *so; 1640 uint8_t credits = cpl->credits; 1641 struct ofld_tx_sdesc *txsd; 1642 int plen; 1643#ifdef INVARIANTS 1644 unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl))); 1645#endif 1646 1647 /* 1648 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and 1649 * now this comes back carrying the credits for the flowc. 1650 */ 1651 if (__predict_false(toep->flags & TPF_SYNQE)) { 1652 KASSERT(toep->flags & TPF_ABORT_SHUTDOWN, 1653 ("%s: credits for a synq entry %p", __func__, toep)); 1654 return (0); 1655 } 1656 1657 inp = toep->inp; 1658 1659 KASSERT(opcode == CPL_FW4_ACK, 1660 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1661 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1662 KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__)); 1663 1664 INP_WLOCK(inp); 1665 1666 if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) { 1667 INP_WUNLOCK(inp); 1668 return (0); 1669 } 1670 1671 KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0, 1672 ("%s: inp_flags 0x%x", __func__, inp->inp_flags)); 1673 1674 tp = intotcpcb(inp); 1675 1676 if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) { 1677 tcp_seq snd_una = be32toh(cpl->snd_una); 1678 1679#ifdef INVARIANTS 1680 if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) { 1681 log(LOG_ERR, 1682 "%s: unexpected seq# %x for TID %u, snd_una %x\n", 1683 __func__, snd_una, toep->tid, tp->snd_una); 1684 } 1685#endif 1686 1687 if (tp->snd_una != snd_una) { 1688 tp->snd_una = snd_una; 1689 tp->ts_recent_age = tcp_ts_getticks(); 1690 } 1691 } 1692 1693#ifdef VERBOSE_TRACES 1694 CTR3(KTR_CXGBE, "%s: tid %d credits %u", __func__, tid, credits); 1695#endif 1696 so = inp->inp_socket; 1697 txsd = &toep->txsd[toep->txsd_cidx]; 1698 plen = 0; 1699 while (credits) { 1700 KASSERT(credits >= txsd->tx_credits, 1701 ("%s: too many (or partial) credits", __func__)); 1702 credits -= txsd->tx_credits; 1703 toep->tx_credits += txsd->tx_credits; 1704 plen += txsd->plen; 1705 txsd++; 1706 toep->txsd_avail++; 1707 KASSERT(toep->txsd_avail <= toep->txsd_total, 1708 ("%s: txsd avail > total", __func__)); 1709 if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) { 1710 txsd = &toep->txsd[0]; 1711 toep->txsd_cidx = 0; 1712 } 1713 } 1714 1715 if (toep->tx_credits == toep->tx_total) { 1716 toep->tx_nocompl = 0; 1717 toep->plen_nocompl = 0; 1718 } 1719 1720 if (toep->flags & TPF_TX_SUSPENDED && 1721 toep->tx_credits >= toep->tx_total / 4) { 1722#ifdef VERBOSE_TRACES 1723 CTR2(KTR_CXGBE, "%s: tid %d calling t4_push_frames", __func__, 1724 tid); 1725#endif 1726 toep->flags &= ~TPF_TX_SUSPENDED; 1727 CURVNET_SET(toep->vnet); 1728 if (toep->ulp_mode == ULP_MODE_ISCSI) 1729 t4_push_pdus(sc, toep, plen); 1730 else 1731 t4_push_frames(sc, toep, plen); 1732 CURVNET_RESTORE(); 1733 } else if (plen > 0) { 1734 struct sockbuf *sb = &so->so_snd; 1735 int sbu; 1736 1737 SOCKBUF_LOCK(sb); 1738 sbu = sbused(sb); 1739 if (toep->ulp_mode == ULP_MODE_ISCSI) { 1740 1741 if (__predict_false(sbu > 0)) { 1742 /* 1743 * The data trasmitted before the tid's ULP mode 1744 * changed to ISCSI is still in so_snd. 1745 * Incoming credits should account for so_snd 1746 * first. 1747 */ 1748 sbdrop_locked(sb, min(sbu, plen)); 1749 plen -= min(sbu, plen); 1750 } 1751 sowwakeup_locked(so); /* unlocks so_snd */ 1752 rqdrop_locked(&toep->ulp_pdu_reclaimq, plen); 1753 } else { 1754#ifdef VERBOSE_TRACES 1755 CTR3(KTR_CXGBE, "%s: tid %d dropped %d bytes", __func__, 1756 tid, plen); 1757#endif 1758 sbdrop_locked(sb, plen); 1759 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 1760 t4_aiotx_queue_toep(toep); 1761 sowwakeup_locked(so); /* unlocks so_snd */ 1762 } 1763 SOCKBUF_UNLOCK_ASSERT(sb); 1764 } 1765 1766 INP_WUNLOCK(inp); 1767 1768 return (0); 1769} 1770 1771int 1772do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1773{ 1774 struct adapter *sc = iq->adapter; 1775 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 1776 unsigned int tid = GET_TID(cpl); 1777 struct toepcb *toep; 1778#ifdef INVARIANTS 1779 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1780#endif 1781 1782 KASSERT(opcode == CPL_SET_TCB_RPL, 1783 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1784 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1785 MPASS(iq != &sc->sge.fwq); 1786 1787 toep = lookup_tid(sc, tid); 1788 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1789 handle_ddp_tcb_rpl(toep, cpl); 1790 return (0); 1791 } 1792 1793 /* 1794 * TOM and/or other ULPs don't request replies for CPL_SET_TCB or 1795 * CPL_SET_TCB_FIELD requests. This can easily change and when it does 1796 * the dispatch code will go here. 1797 */ 1798#ifdef INVARIANTS 1799 panic("%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p", __func__, 1800 tid, iq); 1801#else 1802 log(LOG_ERR, "%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p\n", 1803 __func__, tid, iq); 1804#endif 1805 1806 return (0); 1807} 1808 1809void 1810t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, int tid, 1811 uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie, int iqid) 1812{ 1813 struct wrqe *wr; 1814 struct cpl_set_tcb_field *req; 1815 1816 MPASS((cookie & ~M_COOKIE) == 0); 1817 MPASS((iqid & ~M_QUEUENO) == 0); 1818 1819 wr = alloc_wrqe(sizeof(*req), wrq); 1820 if (wr == NULL) { 1821 /* XXX */ 1822 panic("%s: allocation failure.", __func__); 1823 } 1824 req = wrtod(wr); 1825 1826 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid); 1827 req->reply_ctrl = htobe16(V_QUEUENO(iqid)); 1828 if (reply == 0) 1829 req->reply_ctrl |= htobe16(F_NO_REPLY); 1830 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie)); 1831 req->mask = htobe64(mask); 1832 req->val = htobe64(val); 1833 1834 t4_wrq_tx(sc, wr); 1835} 1836 1837void 1838t4_init_cpl_io_handlers(void) 1839{ 1840 1841 t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close); 1842 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl); 1843 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req); 1844 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl); 1845 t4_register_cpl_handler(CPL_RX_DATA, do_rx_data); 1846 t4_register_cpl_handler(CPL_FW4_ACK, do_fw4_ack); 1847} 1848 1849void 1850t4_uninit_cpl_io_handlers(void) 1851{ 1852 1853 t4_register_cpl_handler(CPL_PEER_CLOSE, NULL); 1854 t4_register_cpl_handler(CPL_CLOSE_CON_RPL, NULL); 1855 t4_register_cpl_handler(CPL_ABORT_REQ_RSS, NULL); 1856 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, NULL); 1857 t4_register_cpl_handler(CPL_RX_DATA, NULL); 1858 t4_register_cpl_handler(CPL_FW4_ACK, NULL); 1859} 1860 1861/* 1862 * Use the 'backend3' field in AIO jobs to store the amount of data 1863 * sent by the AIO job so far and the 'backend4' field to hold an 1864 * error that should be reported when the job is completed. 1865 */ 1866#define aio_sent backend3 1867#define aio_error backend4 1868 1869#define jobtotid(job) \ 1870 (((struct toepcb *)(so_sototcpcb((job)->fd_file->f_data)->t_toe))->tid) 1871 1872static void 1873free_aiotx_buffer(struct aiotx_buffer *ab) 1874{ 1875 struct kaiocb *job; 1876 long status; 1877 int error; 1878 1879 if (refcount_release(&ab->refcount) == 0) 1880 return; 1881 1882 job = ab->job; 1883 error = job->aio_error; 1884 status = job->aio_sent; 1885 vm_page_unhold_pages(ab->ps.pages, ab->ps.npages); 1886 free(ab, M_CXGBE); 1887#ifdef VERBOSE_TRACES 1888 CTR5(KTR_CXGBE, "%s: tid %d completed %p len %ld, error %d", __func__, 1889 jobtotid(job), job, status, error); 1890#endif 1891 if (error == ECANCELED && status != 0) 1892 error = 0; 1893 if (error == ECANCELED) 1894 aio_cancel(job); 1895 else if (error) 1896 aio_complete(job, -1, error); 1897 else 1898 aio_complete(job, status, 0); 1899} 1900 1901static void 1902t4_aiotx_mbuf_free(struct mbuf *m, void *buffer, void *arg) 1903{ 1904 struct aiotx_buffer *ab = buffer; 1905 1906#ifdef VERBOSE_TRACES 1907 CTR3(KTR_CXGBE, "%s: completed %d bytes for tid %d", __func__, 1908 m->m_len, jobtotid(ab->job)); 1909#endif 1910 free_aiotx_buffer(ab); 1911} 1912 1913/* 1914 * Hold the buffer backing an AIO request and return an AIO transmit 1915 * buffer. 1916 */ 1917static int 1918hold_aio(struct kaiocb *job) 1919{ 1920 struct aiotx_buffer *ab; 1921 struct vmspace *vm; 1922 vm_map_t map; 1923 vm_offset_t start, end, pgoff; 1924 int n; 1925 1926 MPASS(job->backend1 == NULL); 1927 1928 /* 1929 * The AIO subsystem will cancel and drain all requests before 1930 * permitting a process to exit or exec, so p_vmspace should 1931 * be stable here. 1932 */ 1933 vm = job->userproc->p_vmspace; 1934 map = &vm->vm_map; 1935 start = (uintptr_t)job->uaiocb.aio_buf; 1936 pgoff = start & PAGE_MASK; 1937 end = round_page(start + job->uaiocb.aio_nbytes); 1938 start = trunc_page(start); 1939 n = atop(end - start); 1940 1941 ab = malloc(sizeof(*ab) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK | 1942 M_ZERO); 1943 refcount_init(&ab->refcount, 1); 1944 ab->ps.pages = (vm_page_t *)(ab + 1); 1945 ab->ps.npages = vm_fault_quick_hold_pages(map, start, end - start, 1946 VM_PROT_WRITE, ab->ps.pages, n); 1947 if (ab->ps.npages < 0) { 1948 free(ab, M_CXGBE); 1949 return (EFAULT); 1950 } 1951 1952 KASSERT(ab->ps.npages == n, 1953 ("hold_aio: page count mismatch: %d vs %d", ab->ps.npages, n)); 1954 1955 ab->ps.offset = pgoff; 1956 ab->ps.len = job->uaiocb.aio_nbytes; 1957 ab->job = job; 1958 job->backend1 = ab; 1959#ifdef VERBOSE_TRACES 1960 CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d", 1961 __func__, jobtotid(job), &ab->ps, job, ab->ps.npages); 1962#endif 1963 return (0); 1964} 1965 1966static void 1967t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job) 1968{ 1969 struct adapter *sc; 1970 struct sockbuf *sb; 1971 struct file *fp; 1972 struct aiotx_buffer *ab; 1973 struct inpcb *inp; 1974 struct tcpcb *tp; 1975 struct mbuf *m; 1976 int error; 1977 bool moretocome, sendmore; 1978 1979 sc = td_adapter(toep->td); 1980 sb = &so->so_snd; 1981 SOCKBUF_UNLOCK(sb); 1982 fp = job->fd_file; 1983 ab = job->backend1; 1984 m = NULL; 1985 1986#ifdef MAC 1987 error = mac_socket_check_send(fp->f_cred, so); 1988 if (error != 0) 1989 goto out; 1990#endif 1991 1992 if (ab == NULL) { 1993 error = hold_aio(job); 1994 if (error != 0) 1995 goto out; 1996 ab = job->backend1; 1997 } 1998 1999 /* Inline sosend_generic(). */ 2000 2001 job->msgsnd = 1; 2002 2003 error = sblock(sb, SBL_WAIT); 2004 MPASS(error == 0); 2005 2006sendanother: 2007 m = m_get(M_WAITOK, MT_DATA); 2008 2009 SOCKBUF_LOCK(sb); 2010 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2011 SOCKBUF_UNLOCK(sb); 2012 sbunlock(sb); 2013 if ((so->so_options & SO_NOSIGPIPE) == 0) { 2014 PROC_LOCK(job->userproc); 2015 kern_psignal(job->userproc, SIGPIPE); 2016 PROC_UNLOCK(job->userproc); 2017 } 2018 error = EPIPE; 2019 goto out; 2020 } 2021 if (so->so_error) { 2022 error = so->so_error; 2023 so->so_error = 0; 2024 SOCKBUF_UNLOCK(sb); 2025 sbunlock(sb); 2026 goto out; 2027 } 2028 if ((so->so_state & SS_ISCONNECTED) == 0) { 2029 SOCKBUF_UNLOCK(sb); 2030 sbunlock(sb); 2031 error = ENOTCONN; 2032 goto out; 2033 } 2034 if (sbspace(sb) < sb->sb_lowat) { 2035 MPASS(job->aio_sent == 0 || !(so->so_state & SS_NBIO)); 2036 2037 /* 2038 * Don't block if there is too little room in the socket 2039 * buffer. Instead, requeue the request. 2040 */ 2041 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) { 2042 SOCKBUF_UNLOCK(sb); 2043 sbunlock(sb); 2044 error = ECANCELED; 2045 goto out; 2046 } 2047 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); 2048 SOCKBUF_UNLOCK(sb); 2049 sbunlock(sb); 2050 goto out; 2051 } 2052 2053 /* 2054 * Write as much data as the socket permits, but no more than a 2055 * a single sndbuf at a time. 2056 */ 2057 m->m_len = sbspace(sb); 2058 if (m->m_len > ab->ps.len - job->aio_sent) { 2059 m->m_len = ab->ps.len - job->aio_sent; 2060 moretocome = false; 2061 } else 2062 moretocome = true; 2063 if (m->m_len > sc->tt.sndbuf) { 2064 m->m_len = sc->tt.sndbuf; 2065 sendmore = true; 2066 } else 2067 sendmore = false; 2068 2069 if (!TAILQ_EMPTY(&toep->aiotx_jobq)) 2070 moretocome = true; 2071 SOCKBUF_UNLOCK(sb); 2072 MPASS(m->m_len != 0); 2073 2074 /* Inlined tcp_usr_send(). */ 2075 2076 inp = toep->inp; 2077 INP_WLOCK(inp); 2078 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 2079 INP_WUNLOCK(inp); 2080 sbunlock(sb); 2081 error = ECONNRESET; 2082 goto out; 2083 } 2084 2085 refcount_acquire(&ab->refcount); 2086 m_extadd(m, NULL, ab->ps.len, t4_aiotx_mbuf_free, ab, 2087 (void *)(uintptr_t)job->aio_sent, 0, EXT_NET_DRV); 2088 m->m_ext.ext_flags |= EXT_FLAG_AIOTX; 2089 job->aio_sent += m->m_len; 2090 2091 sbappendstream(sb, m, 0); 2092 m = NULL; 2093 2094 if (!(inp->inp_flags & INP_DROPPED)) { 2095 tp = intotcpcb(inp); 2096 if (moretocome) 2097 tp->t_flags |= TF_MORETOCOME; 2098 error = tp->t_fb->tfb_tcp_output(tp); 2099 if (moretocome) 2100 tp->t_flags &= ~TF_MORETOCOME; 2101 } 2102 2103 INP_WUNLOCK(inp); 2104 if (sendmore) 2105 goto sendanother; 2106 sbunlock(sb); 2107 2108 if (error) 2109 goto out; 2110 2111 /* 2112 * If this is a non-blocking socket and the request has not 2113 * been fully completed, requeue it until the socket is ready 2114 * again. 2115 */ 2116 if (job->aio_sent < job->uaiocb.aio_nbytes && 2117 !(so->so_state & SS_NBIO)) { 2118 SOCKBUF_LOCK(sb); 2119 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) { 2120 SOCKBUF_UNLOCK(sb); 2121 error = ECANCELED; 2122 goto out; 2123 } 2124 TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list); 2125 return; 2126 } 2127 2128 /* 2129 * If the request will not be requeued, drop a reference on 2130 * the the aiotx buffer. Any mbufs in flight should still 2131 * contain a reference, but this drops the reference that the 2132 * job owns while it is waiting to queue mbufs to the socket. 2133 */ 2134 free_aiotx_buffer(ab); 2135 2136out: 2137 if (error) { 2138 if (ab != NULL) { 2139 job->aio_error = error; 2140 free_aiotx_buffer(ab); 2141 } else { 2142 MPASS(job->aio_sent == 0); 2143 aio_complete(job, -1, error); 2144 } 2145 } 2146 if (m != NULL) 2147 m_free(m); 2148 SOCKBUF_LOCK(sb); 2149} 2150 2151static void 2152t4_aiotx_task(void *context, int pending) 2153{ 2154 struct toepcb *toep = context; 2155 struct inpcb *inp = toep->inp; 2156 struct socket *so = inp->inp_socket; 2157 struct kaiocb *job; 2158 2159 CURVNET_SET(toep->vnet); 2160 SOCKBUF_LOCK(&so->so_snd); 2161 while (!TAILQ_EMPTY(&toep->aiotx_jobq) && sowriteable(so)) { 2162 job = TAILQ_FIRST(&toep->aiotx_jobq); 2163 TAILQ_REMOVE(&toep->aiotx_jobq, job, list); 2164 if (!aio_clear_cancel_function(job)) 2165 continue; 2166 2167 t4_aiotx_process_job(toep, so, job); 2168 } 2169 toep->aiotx_task_active = false; 2170 SOCKBUF_UNLOCK(&so->so_snd); 2171 CURVNET_RESTORE(); 2172 2173 free_toepcb(toep); 2174} 2175 2176static void 2177t4_aiotx_queue_toep(struct toepcb *toep) 2178{ 2179 2180 SOCKBUF_LOCK_ASSERT(&toep->inp->inp_socket->so_snd); 2181#ifdef VERBOSE_TRACES 2182 CTR3(KTR_CXGBE, "%s: queueing aiotx task for tid %d, active = %s", 2183 __func__, toep->tid, toep->aiotx_task_active ? "true" : "false"); 2184#endif 2185 if (toep->aiotx_task_active) 2186 return; 2187 toep->aiotx_task_active = true; 2188 hold_toepcb(toep); 2189 soaio_enqueue(&toep->aiotx_task); 2190} 2191 2192static void 2193t4_aiotx_cancel(struct kaiocb *job) 2194{ 2195 struct aiotx_buffer *ab; 2196 struct socket *so; 2197 struct sockbuf *sb; 2198 struct tcpcb *tp; 2199 struct toepcb *toep; 2200 2201 so = job->fd_file->f_data; 2202 tp = so_sototcpcb(so); 2203 toep = tp->t_toe; 2204 MPASS(job->uaiocb.aio_lio_opcode == LIO_WRITE); 2205 sb = &so->so_snd; 2206 2207 SOCKBUF_LOCK(sb); 2208 if (!aio_cancel_cleared(job)) 2209 TAILQ_REMOVE(&toep->aiotx_jobq, job, list); 2210 SOCKBUF_UNLOCK(sb); 2211 2212 ab = job->backend1; 2213 if (ab != NULL) 2214 free_aiotx_buffer(ab); 2215 else 2216 aio_cancel(job); 2217} 2218 2219int 2220t4_aio_queue_aiotx(struct socket *so, struct kaiocb *job) 2221{ 2222 struct tcpcb *tp = so_sototcpcb(so); 2223 struct toepcb *toep = tp->t_toe; 2224 struct adapter *sc = td_adapter(toep->td); 2225 2226 /* This only handles writes. */ 2227 if (job->uaiocb.aio_lio_opcode != LIO_WRITE) 2228 return (EOPNOTSUPP); 2229 2230 if (!sc->tt.tx_zcopy) 2231 return (EOPNOTSUPP); 2232 2233 SOCKBUF_LOCK(&so->so_snd); 2234#ifdef VERBOSE_TRACES 2235 CTR2(KTR_CXGBE, "%s: queueing %p", __func__, job); 2236#endif 2237 if (!aio_set_cancel_function(job, t4_aiotx_cancel)) 2238 panic("new job was cancelled"); 2239 TAILQ_INSERT_TAIL(&toep->aiotx_jobq, job, list); 2240 if (sowriteable(so)) 2241 t4_aiotx_queue_toep(toep); 2242 SOCKBUF_UNLOCK(&so->so_snd); 2243 return (0); 2244} 2245 2246void 2247aiotx_init_toep(struct toepcb *toep) 2248{ 2249 2250 TAILQ_INIT(&toep->aiotx_jobq); 2251 TASK_INIT(&toep->aiotx_task, 0, t4_aiotx_task, toep); 2252} 2253#endif 2254