qp.c revision 331719
1/* 2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/qp.c 331719 2018-03-29 01:20:58Z np $"); 34 35#include "opt_inet.h" 36 37#ifdef TCP_OFFLOAD 38#include <sys/types.h> 39#include <sys/malloc.h> 40#include <sys/socket.h> 41#include <sys/socketvar.h> 42#include <sys/sockio.h> 43#include <sys/taskqueue.h> 44#include <netinet/in.h> 45#include <net/route.h> 46 47#include <netinet/in_systm.h> 48#include <netinet/in_pcb.h> 49#include <netinet/ip.h> 50#include <netinet/ip_var.h> 51#include <netinet/tcp_var.h> 52#include <netinet/tcp.h> 53#include <netinet/tcpip.h> 54 55#include <netinet/toecore.h> 56 57struct sge_iq; 58struct rss_header; 59#include <linux/types.h> 60#include "offload.h" 61#include "tom/t4_tom.h" 62 63#include "iw_cxgbe.h" 64#include "user.h" 65 66static int creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize); 67 68 69static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) 70{ 71 unsigned long flag; 72 spin_lock_irqsave(&qhp->lock, flag); 73 qhp->attr.state = state; 74 spin_unlock_irqrestore(&qhp->lock, flag); 75} 76 77static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) 78{ 79 80 contigfree(sq->queue, sq->memsize, M_DEVBUF); 81} 82 83static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) 84{ 85 86 dealloc_host_sq(rdev, sq); 87} 88 89static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) 90{ 91 sq->queue = contigmalloc(sq->memsize, M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 92 4096, 0); 93 94 if (sq->queue) 95 sq->dma_addr = vtophys(sq->queue); 96 else 97 return -ENOMEM; 98 sq->phys_addr = vtophys(sq->queue); 99 pci_unmap_addr_set(sq, mapping, sq->dma_addr); 100 CTR4(KTR_IW_CXGBE, "%s sq %p dma_addr %p phys_addr %p", __func__, 101 sq->queue, sq->dma_addr, sq->phys_addr); 102 return 0; 103} 104 105static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, 106 struct c4iw_dev_ucontext *uctx) 107{ 108 /* 109 * uP clears EQ contexts when the connection exits rdma mode, 110 * so no need to post a RESET WR for these EQs. 111 */ 112 contigfree(wq->rq.queue, wq->rq.memsize, M_DEVBUF); 113 dealloc_sq(rdev, &wq->sq); 114 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); 115 kfree(wq->rq.sw_rq); 116 kfree(wq->sq.sw_sq); 117 c4iw_put_qpid(rdev, wq->rq.qid, uctx); 118 c4iw_put_qpid(rdev, wq->sq.qid, uctx); 119 return 0; 120} 121 122static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, 123 struct t4_cq *rcq, struct t4_cq *scq, 124 struct c4iw_dev_ucontext *uctx) 125{ 126 struct adapter *sc = rdev->adap; 127 int user = (uctx != &rdev->uctx); 128 struct fw_ri_res_wr *res_wr; 129 struct fw_ri_res *res; 130 int wr_len; 131 struct c4iw_wr_wait wr_wait; 132 int ret; 133 int eqsize; 134 struct wrqe *wr; 135 const int spg_ndesc = sc->params.sge.spg_len / EQ_ESIZE; 136 137 wq->sq.qid = c4iw_get_qpid(rdev, uctx); 138 if (!wq->sq.qid) 139 return -ENOMEM; 140 141 wq->rq.qid = c4iw_get_qpid(rdev, uctx); 142 if (!wq->rq.qid) 143 goto err1; 144 145 if (!user) { 146 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, 147 GFP_KERNEL); 148 if (!wq->sq.sw_sq) 149 goto err2; 150 151 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, 152 GFP_KERNEL); 153 if (!wq->rq.sw_rq) 154 goto err3; 155 } 156 157 /* RQT must be a power of 2. */ 158 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size); 159 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); 160 if (!wq->rq.rqt_hwaddr) 161 goto err4; 162 163 if (alloc_host_sq(rdev, &wq->sq)) 164 goto err5; 165 166 memset(wq->sq.queue, 0, wq->sq.memsize); 167 pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); 168 169 wq->rq.queue = contigmalloc(wq->rq.memsize, 170 M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 4096, 0); 171 if (wq->rq.queue) 172 wq->rq.dma_addr = vtophys(wq->rq.queue); 173 else 174 goto err6; 175 CTR5(KTR_IW_CXGBE, 176 "%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx", __func__, 177 wq->sq.queue, (unsigned long long)vtophys(wq->sq.queue), 178 wq->rq.queue, (unsigned long long)vtophys(wq->rq.queue)); 179 memset(wq->rq.queue, 0, wq->rq.memsize); 180 pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); 181 182 wq->db = (void *)((unsigned long)rman_get_virtual(sc->regs_res) + 183 sc->sge_kdoorbell_reg); 184 wq->gts = (void *)((unsigned long)rman_get_virtual(rdev->adap->regs_res) 185 + sc->sge_gts_reg); 186 if (user) { 187 wq->sq.udb = (u64)((char*)rman_get_virtual(rdev->adap->udbs_res) + 188 (wq->sq.qid << rdev->qpshift)); 189 wq->sq.udb &= PAGE_MASK; 190 wq->rq.udb = (u64)((char*)rman_get_virtual(rdev->adap->udbs_res) + 191 (wq->rq.qid << rdev->qpshift)); 192 wq->rq.udb &= PAGE_MASK; 193 } 194 wq->rdev = rdev; 195 wq->rq.msn = 1; 196 197 /* build fw_ri_res_wr */ 198 wr_len = sizeof *res_wr + 2 * sizeof *res; 199 200 wr = alloc_wrqe(wr_len, &sc->sge.mgmtq); 201 if (wr == NULL) 202 return (0); 203 res_wr = wrtod(wr); 204 205 memset(res_wr, 0, wr_len); 206 res_wr->op_nres = cpu_to_be32( 207 V_FW_WR_OP(FW_RI_RES_WR) | 208 V_FW_RI_RES_WR_NRES(2) | 209 F_FW_WR_COMPL); 210 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 211 res_wr->cookie = (unsigned long) &wr_wait; 212 res = res_wr->res; 213 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; 214 res->u.sqrq.op = FW_RI_RES_OP_WRITE; 215 216 /* eqsize is the number of 64B entries plus the status page size. */ 217 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + spg_ndesc; 218 219 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( 220 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ 221 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ 222 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ 223 V_FW_RI_RES_WR_IQID(scq->cqid)); 224 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( 225 V_FW_RI_RES_WR_DCAEN(0) | 226 V_FW_RI_RES_WR_DCACPU(0) | 227 V_FW_RI_RES_WR_FBMIN(2) | 228 V_FW_RI_RES_WR_FBMAX(2) | 229 V_FW_RI_RES_WR_CIDXFTHRESHO(0) | 230 V_FW_RI_RES_WR_CIDXFTHRESH(0) | 231 V_FW_RI_RES_WR_EQSIZE(eqsize)); 232 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); 233 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); 234 res++; 235 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ; 236 res->u.sqrq.op = FW_RI_RES_OP_WRITE; 237 238 /* eqsize is the number of 64B entries plus the status page size. */ 239 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + spg_ndesc; 240 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( 241 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ 242 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ 243 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ 244 V_FW_RI_RES_WR_IQID(rcq->cqid)); 245 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( 246 V_FW_RI_RES_WR_DCAEN(0) | 247 V_FW_RI_RES_WR_DCACPU(0) | 248 V_FW_RI_RES_WR_FBMIN(2) | 249 V_FW_RI_RES_WR_FBMAX(2) | 250 V_FW_RI_RES_WR_CIDXFTHRESHO(0) | 251 V_FW_RI_RES_WR_CIDXFTHRESH(0) | 252 V_FW_RI_RES_WR_EQSIZE(eqsize)); 253 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); 254 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); 255 256 c4iw_init_wr_wait(&wr_wait); 257 258 t4_wrq_tx(sc, wr); 259 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__); 260 if (ret) 261 goto err7; 262 263 CTR6(KTR_IW_CXGBE, 264 "%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx", 265 __func__, wq->sq.qid, wq->rq.qid, wq->db, 266 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb); 267 268 return 0; 269err7: 270 contigfree(wq->rq.queue, wq->rq.memsize, M_DEVBUF); 271err6: 272 dealloc_sq(rdev, &wq->sq); 273err5: 274 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); 275err4: 276 kfree(wq->rq.sw_rq); 277err3: 278 kfree(wq->sq.sw_sq); 279err2: 280 c4iw_put_qpid(rdev, wq->rq.qid, uctx); 281err1: 282 c4iw_put_qpid(rdev, wq->sq.qid, uctx); 283 return -ENOMEM; 284} 285 286static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, 287 struct ib_send_wr *wr, int max, u32 *plenp) 288{ 289 u8 *dstp, *srcp; 290 u32 plen = 0; 291 int i; 292 int rem, len; 293 294 dstp = (u8 *)immdp->data; 295 for (i = 0; i < wr->num_sge; i++) { 296 if ((plen + wr->sg_list[i].length) > max) 297 return -EMSGSIZE; 298 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; 299 plen += wr->sg_list[i].length; 300 rem = wr->sg_list[i].length; 301 while (rem) { 302 if (dstp == (u8 *)&sq->queue[sq->size]) 303 dstp = (u8 *)sq->queue; 304 if (rem <= (u8 *)&sq->queue[sq->size] - dstp) 305 len = rem; 306 else 307 len = (u8 *)&sq->queue[sq->size] - dstp; 308 memcpy(dstp, srcp, len); 309 dstp += len; 310 srcp += len; 311 rem -= len; 312 } 313 } 314 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp); 315 if (len) 316 memset(dstp, 0, len); 317 immdp->op = FW_RI_DATA_IMMD; 318 immdp->r1 = 0; 319 immdp->r2 = 0; 320 immdp->immdlen = cpu_to_be32(plen); 321 *plenp = plen; 322 return 0; 323} 324 325static int build_isgl(__be64 *queue_start, __be64 *queue_end, 326 struct fw_ri_isgl *isglp, struct ib_sge *sg_list, 327 int num_sge, u32 *plenp) 328 329{ 330 int i; 331 u32 plen = 0; 332 __be64 *flitp = (__be64 *)isglp->sge; 333 334 for (i = 0; i < num_sge; i++) { 335 if ((plen + sg_list[i].length) < plen) 336 return -EMSGSIZE; 337 plen += sg_list[i].length; 338 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) | 339 sg_list[i].length); 340 if (++flitp == queue_end) 341 flitp = queue_start; 342 *flitp = cpu_to_be64(sg_list[i].addr); 343 if (++flitp == queue_end) 344 flitp = queue_start; 345 } 346 *flitp = (__force __be64)0; 347 isglp->op = FW_RI_DATA_ISGL; 348 isglp->r1 = 0; 349 isglp->nsge = cpu_to_be16(num_sge); 350 isglp->r2 = 0; 351 if (plenp) 352 *plenp = plen; 353 return 0; 354} 355 356static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, 357 struct ib_send_wr *wr, u8 *len16) 358{ 359 u32 plen; 360 int size; 361 int ret; 362 363 if (wr->num_sge > T4_MAX_SEND_SGE) 364 return -EINVAL; 365 switch (wr->opcode) { 366 case IB_WR_SEND: 367 if (wr->send_flags & IB_SEND_SOLICITED) 368 wqe->send.sendop_pkd = cpu_to_be32( 369 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE)); 370 else 371 wqe->send.sendop_pkd = cpu_to_be32( 372 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND)); 373 wqe->send.stag_inv = 0; 374 break; 375 case IB_WR_SEND_WITH_INV: 376 if (wr->send_flags & IB_SEND_SOLICITED) 377 wqe->send.sendop_pkd = cpu_to_be32( 378 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV)); 379 else 380 wqe->send.sendop_pkd = cpu_to_be32( 381 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV)); 382 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); 383 break; 384 385 default: 386 return -EINVAL; 387 } 388 389 plen = 0; 390 if (wr->num_sge) { 391 if (wr->send_flags & IB_SEND_INLINE) { 392 ret = build_immd(sq, wqe->send.u.immd_src, wr, 393 T4_MAX_SEND_INLINE, &plen); 394 if (ret) 395 return ret; 396 size = sizeof wqe->send + sizeof(struct fw_ri_immd) + 397 plen; 398 } else { 399 ret = build_isgl((__be64 *)sq->queue, 400 (__be64 *)&sq->queue[sq->size], 401 wqe->send.u.isgl_src, 402 wr->sg_list, wr->num_sge, &plen); 403 if (ret) 404 return ret; 405 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + 406 wr->num_sge * sizeof(struct fw_ri_sge); 407 } 408 } else { 409 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; 410 wqe->send.u.immd_src[0].r1 = 0; 411 wqe->send.u.immd_src[0].r2 = 0; 412 wqe->send.u.immd_src[0].immdlen = 0; 413 size = sizeof wqe->send + sizeof(struct fw_ri_immd); 414 plen = 0; 415 } 416 *len16 = DIV_ROUND_UP(size, 16); 417 wqe->send.plen = cpu_to_be32(plen); 418 return 0; 419} 420 421static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, 422 struct ib_send_wr *wr, u8 *len16) 423{ 424 u32 plen; 425 int size; 426 int ret; 427 428 if (wr->num_sge > T4_MAX_SEND_SGE) 429 return -EINVAL; 430 wqe->write.immd_data = 0; 431 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); 432 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); 433 if (wr->num_sge) { 434 if (wr->send_flags & IB_SEND_INLINE) { 435 ret = build_immd(sq, wqe->write.u.immd_src, wr, 436 T4_MAX_WRITE_INLINE, &plen); 437 if (ret) 438 return ret; 439 size = sizeof wqe->write + sizeof(struct fw_ri_immd) + 440 plen; 441 } else { 442 ret = build_isgl((__be64 *)sq->queue, 443 (__be64 *)&sq->queue[sq->size], 444 wqe->write.u.isgl_src, 445 wr->sg_list, wr->num_sge, &plen); 446 if (ret) 447 return ret; 448 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + 449 wr->num_sge * sizeof(struct fw_ri_sge); 450 } 451 } else { 452 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; 453 wqe->write.u.immd_src[0].r1 = 0; 454 wqe->write.u.immd_src[0].r2 = 0; 455 wqe->write.u.immd_src[0].immdlen = 0; 456 size = sizeof wqe->write + sizeof(struct fw_ri_immd); 457 plen = 0; 458 } 459 *len16 = DIV_ROUND_UP(size, 16); 460 wqe->write.plen = cpu_to_be32(plen); 461 return 0; 462} 463 464static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) 465{ 466 if (wr->num_sge > 1) 467 return -EINVAL; 468 if (wr->num_sge) { 469 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey); 470 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr 471 >> 32)); 472 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr); 473 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); 474 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); 475 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr 476 >> 32)); 477 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); 478 } else { 479 wqe->read.stag_src = cpu_to_be32(2); 480 wqe->read.to_src_hi = 0; 481 wqe->read.to_src_lo = 0; 482 wqe->read.stag_sink = cpu_to_be32(2); 483 wqe->read.plen = 0; 484 wqe->read.to_sink_hi = 0; 485 wqe->read.to_sink_lo = 0; 486 } 487 wqe->read.r2 = 0; 488 wqe->read.r5 = 0; 489 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16); 490 return 0; 491} 492 493static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, 494 struct ib_recv_wr *wr, u8 *len16) 495{ 496 int ret; 497 498 ret = build_isgl((__be64 *)qhp->wq.rq.queue, 499 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], 500 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); 501 if (ret) 502 return ret; 503 *len16 = DIV_ROUND_UP(sizeof wqe->recv + 504 wr->num_sge * sizeof(struct fw_ri_sge), 16); 505 return 0; 506} 507 508static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, 509 struct ib_send_wr *wr, u8 *len16) 510{ 511 512 struct fw_ri_immd *imdp; 513 __be64 *p; 514 int i; 515 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); 516 int rem; 517 518 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH) 519 return -EINVAL; 520 521 wqe->fr.qpbinde_to_dcacpu = 0; 522 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12; 523 wqe->fr.addr_type = FW_RI_VA_BASED_TO; 524 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags); 525 wqe->fr.len_hi = 0; 526 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length); 527 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey); 528 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); 529 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & 530 0xffffffff); 531 WARN_ON(pbllen > T4_MAX_FR_IMMD); 532 imdp = (struct fw_ri_immd *)(&wqe->fr + 1); 533 imdp->op = FW_RI_DATA_IMMD; 534 imdp->r1 = 0; 535 imdp->r2 = 0; 536 imdp->immdlen = cpu_to_be32(pbllen); 537 p = (__be64 *)(imdp + 1); 538 rem = pbllen; 539 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { 540 *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]); 541 rem -= sizeof *p; 542 if (++p == (__be64 *)&sq->queue[sq->size]) 543 p = (__be64 *)sq->queue; 544 } 545 BUG_ON(rem < 0); 546 while (rem) { 547 *p = 0; 548 rem -= sizeof *p; 549 if (++p == (__be64 *)&sq->queue[sq->size]) 550 p = (__be64 *)sq->queue; 551 } 552 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16); 553 return 0; 554} 555 556static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, 557 u8 *len16) 558{ 559 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); 560 wqe->inv.r2 = 0; 561 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); 562 return 0; 563} 564 565void c4iw_qp_add_ref(struct ib_qp *qp) 566{ 567 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp); 568 atomic_inc(&(to_c4iw_qp(qp)->refcnt)); 569} 570 571void c4iw_qp_rem_ref(struct ib_qp *qp) 572{ 573 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp); 574 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt))) 575 wake_up(&(to_c4iw_qp(qp)->wait)); 576} 577 578static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) 579{ 580 struct t4_cqe cqe = {}; 581 struct c4iw_cq *schp; 582 unsigned long flag; 583 struct t4_cq *cq; 584 585 schp = to_c4iw_cq(qhp->ibqp.send_cq); 586 cq = &schp->cq; 587 588 PDBG("%s drain sq id %u\n", __func__, qhp->wq.sq.qid); 589 cqe.u.drain_cookie = wr->wr_id; 590 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | 591 V_CQE_OPCODE(C4IW_DRAIN_OPCODE) | 592 V_CQE_TYPE(1) | 593 V_CQE_SWCQE(1) | 594 V_CQE_QPID(qhp->wq.sq.qid)); 595 596 spin_lock_irqsave(&schp->lock, flag); 597 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); 598 cq->sw_queue[cq->sw_pidx] = cqe; 599 t4_swcq_produce(cq); 600 spin_unlock_irqrestore(&schp->lock, flag); 601 602 spin_lock_irqsave(&schp->comp_handler_lock, flag); 603 (*schp->ibcq.comp_handler)(&schp->ibcq, 604 schp->ibcq.cq_context); 605 spin_unlock_irqrestore(&schp->comp_handler_lock, flag); 606} 607 608static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) 609{ 610 struct t4_cqe cqe = {}; 611 struct c4iw_cq *rchp; 612 unsigned long flag; 613 struct t4_cq *cq; 614 615 rchp = to_c4iw_cq(qhp->ibqp.recv_cq); 616 cq = &rchp->cq; 617 618 PDBG("%s drain rq id %u\n", __func__, qhp->wq.sq.qid); 619 cqe.u.drain_cookie = wr->wr_id; 620 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | 621 V_CQE_OPCODE(C4IW_DRAIN_OPCODE) | 622 V_CQE_TYPE(0) | 623 V_CQE_SWCQE(1) | 624 V_CQE_QPID(qhp->wq.sq.qid)); 625 626 spin_lock_irqsave(&rchp->lock, flag); 627 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); 628 cq->sw_queue[cq->sw_pidx] = cqe; 629 t4_swcq_produce(cq); 630 spin_unlock_irqrestore(&rchp->lock, flag); 631 632 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 633 (*rchp->ibcq.comp_handler)(&rchp->ibcq, 634 rchp->ibcq.cq_context); 635 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 636} 637 638int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 639 struct ib_send_wr **bad_wr) 640{ 641 int err = 0; 642 u8 len16 = 0; 643 enum fw_wr_opcodes fw_opcode = 0; 644 enum fw_ri_wr_flags fw_flags; 645 struct c4iw_qp *qhp; 646 union t4_wr *wqe; 647 u32 num_wrs; 648 struct t4_swsqe *swsqe; 649 unsigned long flag; 650 u16 idx = 0; 651 652 qhp = to_c4iw_qp(ibqp); 653 spin_lock_irqsave(&qhp->lock, flag); 654 if (t4_wq_in_error(&qhp->wq)) { 655 spin_unlock_irqrestore(&qhp->lock, flag); 656 complete_sq_drain_wr(qhp, wr); 657 return err; 658 } 659 num_wrs = t4_sq_avail(&qhp->wq); 660 if (num_wrs == 0) { 661 spin_unlock_irqrestore(&qhp->lock, flag); 662 return -ENOMEM; 663 } 664 while (wr) { 665 if (num_wrs == 0) { 666 err = -ENOMEM; 667 *bad_wr = wr; 668 break; 669 } 670 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + 671 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); 672 673 fw_flags = 0; 674 if (wr->send_flags & IB_SEND_SOLICITED) 675 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; 676 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) 677 fw_flags |= FW_RI_COMPLETION_FLAG; 678 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; 679 switch (wr->opcode) { 680 case IB_WR_SEND_WITH_INV: 681 case IB_WR_SEND: 682 if (wr->send_flags & IB_SEND_FENCE) 683 fw_flags |= FW_RI_READ_FENCE_FLAG; 684 fw_opcode = FW_RI_SEND_WR; 685 if (wr->opcode == IB_WR_SEND) 686 swsqe->opcode = FW_RI_SEND; 687 else 688 swsqe->opcode = FW_RI_SEND_WITH_INV; 689 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); 690 break; 691 case IB_WR_RDMA_WRITE: 692 fw_opcode = FW_RI_RDMA_WRITE_WR; 693 swsqe->opcode = FW_RI_RDMA_WRITE; 694 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); 695 break; 696 case IB_WR_RDMA_READ: 697 case IB_WR_RDMA_READ_WITH_INV: 698 fw_opcode = FW_RI_RDMA_READ_WR; 699 swsqe->opcode = FW_RI_READ_REQ; 700 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) 701 fw_flags = FW_RI_RDMA_READ_INVALIDATE; 702 else 703 fw_flags = 0; 704 err = build_rdma_read(wqe, wr, &len16); 705 if (err) 706 break; 707 swsqe->read_len = wr->sg_list[0].length; 708 if (!qhp->wq.sq.oldest_read) 709 qhp->wq.sq.oldest_read = swsqe; 710 break; 711 case IB_WR_FAST_REG_MR: 712 fw_opcode = FW_RI_FR_NSMR_WR; 713 swsqe->opcode = FW_RI_FAST_REGISTER; 714 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16); 715 break; 716 case IB_WR_LOCAL_INV: 717 if (wr->send_flags & IB_SEND_FENCE) 718 fw_flags |= FW_RI_LOCAL_FENCE_FLAG; 719 fw_opcode = FW_RI_INV_LSTAG_WR; 720 swsqe->opcode = FW_RI_LOCAL_INV; 721 err = build_inv_stag(wqe, wr, &len16); 722 break; 723 default: 724 CTR2(KTR_IW_CXGBE, "%s post of type =%d TBD!", __func__, 725 wr->opcode); 726 err = -EINVAL; 727 } 728 if (err) { 729 *bad_wr = wr; 730 break; 731 } 732 swsqe->idx = qhp->wq.sq.pidx; 733 swsqe->complete = 0; 734 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) || 735 qhp->sq_sig_all; 736 swsqe->wr_id = wr->wr_id; 737 738 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); 739 740 CTR5(KTR_IW_CXGBE, 741 "%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u", 742 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, 743 swsqe->opcode, swsqe->read_len); 744 wr = wr->next; 745 num_wrs--; 746 t4_sq_produce(&qhp->wq, len16); 747 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); 748 } 749 750 t4_ring_sq_db(&qhp->wq, idx); 751 spin_unlock_irqrestore(&qhp->lock, flag); 752 return err; 753} 754 755int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 756 struct ib_recv_wr **bad_wr) 757{ 758 int err = 0; 759 struct c4iw_qp *qhp; 760 union t4_recv_wr *wqe; 761 u32 num_wrs; 762 u8 len16 = 0; 763 unsigned long flag; 764 u16 idx = 0; 765 766 qhp = to_c4iw_qp(ibqp); 767 spin_lock_irqsave(&qhp->lock, flag); 768 if (t4_wq_in_error(&qhp->wq)) { 769 spin_unlock_irqrestore(&qhp->lock, flag); 770 complete_rq_drain_wr(qhp, wr); 771 return err; 772 } 773 num_wrs = t4_rq_avail(&qhp->wq); 774 if (num_wrs == 0) { 775 spin_unlock_irqrestore(&qhp->lock, flag); 776 return -ENOMEM; 777 } 778 while (wr) { 779 if (wr->num_sge > T4_MAX_RECV_SGE) { 780 err = -EINVAL; 781 *bad_wr = wr; 782 break; 783 } 784 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + 785 qhp->wq.rq.wq_pidx * 786 T4_EQ_ENTRY_SIZE); 787 if (num_wrs) 788 err = build_rdma_recv(qhp, wqe, wr, &len16); 789 else 790 err = -ENOMEM; 791 if (err) { 792 *bad_wr = wr; 793 break; 794 } 795 796 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; 797 798 wqe->recv.opcode = FW_RI_RECV_WR; 799 wqe->recv.r1 = 0; 800 wqe->recv.wrid = qhp->wq.rq.pidx; 801 wqe->recv.r2[0] = 0; 802 wqe->recv.r2[1] = 0; 803 wqe->recv.r2[2] = 0; 804 wqe->recv.len16 = len16; 805 CTR3(KTR_IW_CXGBE, "%s cookie 0x%llx pidx %u", __func__, 806 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); 807 t4_rq_produce(&qhp->wq, len16); 808 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); 809 wr = wr->next; 810 num_wrs--; 811 } 812 813 t4_ring_rq_db(&qhp->wq, idx); 814 spin_unlock_irqrestore(&qhp->lock, flag); 815 return err; 816} 817 818int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind) 819{ 820 return -ENOSYS; 821} 822 823static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type, 824 u8 *ecode) 825{ 826 int status; 827 int tagged; 828 int opcode; 829 int rqtype; 830 int send_inv; 831 832 if (!err_cqe) { 833 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; 834 *ecode = 0; 835 return; 836 } 837 838 status = CQE_STATUS(err_cqe); 839 opcode = CQE_OPCODE(err_cqe); 840 rqtype = RQ_TYPE(err_cqe); 841 send_inv = (opcode == FW_RI_SEND_WITH_INV) || 842 (opcode == FW_RI_SEND_WITH_SE_INV); 843 tagged = (opcode == FW_RI_RDMA_WRITE) || 844 (rqtype && (opcode == FW_RI_READ_RESP)); 845 846 switch (status) { 847 case T4_ERR_STAG: 848 if (send_inv) { 849 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 850 *ecode = RDMAP_CANT_INV_STAG; 851 } else { 852 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 853 *ecode = RDMAP_INV_STAG; 854 } 855 break; 856 case T4_ERR_PDID: 857 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 858 if ((opcode == FW_RI_SEND_WITH_INV) || 859 (opcode == FW_RI_SEND_WITH_SE_INV)) 860 *ecode = RDMAP_CANT_INV_STAG; 861 else 862 *ecode = RDMAP_STAG_NOT_ASSOC; 863 break; 864 case T4_ERR_QPID: 865 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 866 *ecode = RDMAP_STAG_NOT_ASSOC; 867 break; 868 case T4_ERR_ACCESS: 869 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 870 *ecode = RDMAP_ACC_VIOL; 871 break; 872 case T4_ERR_WRAP: 873 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 874 *ecode = RDMAP_TO_WRAP; 875 break; 876 case T4_ERR_BOUND: 877 if (tagged) { 878 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 879 *ecode = DDPT_BASE_BOUNDS; 880 } else { 881 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 882 *ecode = RDMAP_BASE_BOUNDS; 883 } 884 break; 885 case T4_ERR_INVALIDATE_SHARED_MR: 886 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: 887 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 888 *ecode = RDMAP_CANT_INV_STAG; 889 break; 890 case T4_ERR_ECC: 891 case T4_ERR_ECC_PSTAG: 892 case T4_ERR_INTERNAL_ERR: 893 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA; 894 *ecode = 0; 895 break; 896 case T4_ERR_OUT_OF_RQE: 897 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 898 *ecode = DDPU_INV_MSN_NOBUF; 899 break; 900 case T4_ERR_PBL_ADDR_BOUND: 901 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 902 *ecode = DDPT_BASE_BOUNDS; 903 break; 904 case T4_ERR_CRC: 905 *layer_type = LAYER_MPA|DDP_LLP; 906 *ecode = MPA_CRC_ERR; 907 break; 908 case T4_ERR_MARKER: 909 *layer_type = LAYER_MPA|DDP_LLP; 910 *ecode = MPA_MARKER_ERR; 911 break; 912 case T4_ERR_PDU_LEN_ERR: 913 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 914 *ecode = DDPU_MSG_TOOBIG; 915 break; 916 case T4_ERR_DDP_VERSION: 917 if (tagged) { 918 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 919 *ecode = DDPT_INV_VERS; 920 } else { 921 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 922 *ecode = DDPU_INV_VERS; 923 } 924 break; 925 case T4_ERR_RDMA_VERSION: 926 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 927 *ecode = RDMAP_INV_VERS; 928 break; 929 case T4_ERR_OPCODE: 930 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 931 *ecode = RDMAP_INV_OPCODE; 932 break; 933 case T4_ERR_DDP_QUEUE_NUM: 934 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 935 *ecode = DDPU_INV_QN; 936 break; 937 case T4_ERR_MSN: 938 case T4_ERR_MSN_GAP: 939 case T4_ERR_MSN_RANGE: 940 case T4_ERR_IRD_OVERFLOW: 941 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 942 *ecode = DDPU_INV_MSN_RANGE; 943 break; 944 case T4_ERR_TBIT: 945 *layer_type = LAYER_DDP|DDP_LOCAL_CATA; 946 *ecode = 0; 947 break; 948 case T4_ERR_MO: 949 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 950 *ecode = DDPU_INV_MO; 951 break; 952 default: 953 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; 954 *ecode = 0; 955 break; 956 } 957} 958 959static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, 960 gfp_t gfp) 961{ 962 int ret; 963 struct fw_ri_wr *wqe; 964 struct terminate_message *term; 965 struct wrqe *wr; 966 struct socket *so = qhp->ep->com.so; 967 struct inpcb *inp = sotoinpcb(so); 968 struct tcpcb *tp = intotcpcb(inp); 969 struct toepcb *toep = tp->t_toe; 970 971 CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp, 972 qhp->wq.sq.qid, qhp->ep->hwtid); 973 974 wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq); 975 if (wr == NULL) 976 return; 977 wqe = wrtod(wr); 978 979 memset(wqe, 0, sizeof *wqe); 980 wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR)); 981 wqe->flowid_len16 = cpu_to_be32( 982 V_FW_WR_FLOWID(qhp->ep->hwtid) | 983 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); 984 985 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; 986 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); 987 term = (struct terminate_message *)wqe->u.terminate.termmsg; 988 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { 989 term->layer_etype = qhp->attr.layer_etype; 990 term->ecode = qhp->attr.ecode; 991 } else 992 build_term_codes(err_cqe, &term->layer_etype, &term->ecode); 993 ret = creds(toep, inp, sizeof(*wqe)); 994 if (ret) { 995 free_wrqe(wr); 996 return; 997 } 998 t4_wrq_tx(qhp->rhp->rdev.adap, wr); 999} 1000 1001/* Assumes qhp lock is held. */ 1002static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, 1003 struct c4iw_cq *schp) 1004{ 1005 int count; 1006 int flushed; 1007 unsigned long flag; 1008 1009 CTR4(KTR_IW_CXGBE, "%s qhp %p rchp %p schp %p", __func__, qhp, rchp, 1010 schp); 1011 1012 /* locking hierarchy: cq lock first, then qp lock. */ 1013 spin_lock_irqsave(&rchp->lock, flag); 1014 spin_lock(&qhp->lock); 1015 c4iw_flush_hw_cq(&rchp->cq); 1016 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); 1017 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); 1018 spin_unlock(&qhp->lock); 1019 spin_unlock_irqrestore(&rchp->lock, flag); 1020 if (flushed && rchp->ibcq.comp_handler) { 1021 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 1022 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); 1023 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 1024 } 1025 1026 /* locking hierarchy: cq lock first, then qp lock. */ 1027 spin_lock_irqsave(&schp->lock, flag); 1028 spin_lock(&qhp->lock); 1029 c4iw_flush_hw_cq(&schp->cq); 1030 c4iw_count_scqes(&schp->cq, &qhp->wq, &count); 1031 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); 1032 spin_unlock(&qhp->lock); 1033 spin_unlock_irqrestore(&schp->lock, flag); 1034 if (flushed && schp->ibcq.comp_handler) { 1035 spin_lock_irqsave(&schp->comp_handler_lock, flag); 1036 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); 1037 spin_unlock_irqrestore(&schp->comp_handler_lock, flag); 1038 } 1039} 1040 1041static void flush_qp(struct c4iw_qp *qhp) 1042{ 1043 struct c4iw_cq *rchp, *schp; 1044 unsigned long flag; 1045 1046 rchp = get_chp(qhp->rhp, qhp->attr.rcq); 1047 schp = get_chp(qhp->rhp, qhp->attr.scq); 1048 1049 if (qhp->ibqp.uobject) { 1050 t4_set_wq_in_error(&qhp->wq); 1051 t4_set_cq_in_error(&rchp->cq); 1052 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 1053 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); 1054 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 1055 if (schp != rchp) { 1056 t4_set_cq_in_error(&schp->cq); 1057 spin_lock_irqsave(&schp->comp_handler_lock, flag); 1058 (*schp->ibcq.comp_handler)(&schp->ibcq, 1059 schp->ibcq.cq_context); 1060 spin_unlock_irqrestore(&schp->comp_handler_lock, flag); 1061 } 1062 return; 1063 } 1064 __flush_qp(qhp, rchp, schp); 1065} 1066 1067static int 1068rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep) 1069{ 1070 struct c4iw_rdev *rdev = &rhp->rdev; 1071 struct adapter *sc = rdev->adap; 1072 struct fw_ri_wr *wqe; 1073 int ret; 1074 struct wrqe *wr; 1075 struct socket *so = ep->com.so; 1076 struct inpcb *inp = sotoinpcb(so); 1077 struct tcpcb *tp = intotcpcb(inp); 1078 struct toepcb *toep = tp->t_toe; 1079 1080 KASSERT(rhp == qhp->rhp && ep == qhp->ep, ("%s: EDOOFUS", __func__)); 1081 1082 CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp, 1083 qhp->wq.sq.qid, ep->hwtid); 1084 1085 wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq); 1086 if (wr == NULL) 1087 return (0); 1088 wqe = wrtod(wr); 1089 1090 memset(wqe, 0, sizeof *wqe); 1091 1092 wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR) | F_FW_WR_COMPL); 1093 wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) | 1094 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); 1095 wqe->cookie = (unsigned long) &ep->com.wr_wait; 1096 wqe->u.fini.type = FW_RI_TYPE_FINI; 1097 1098 c4iw_init_wr_wait(&ep->com.wr_wait); 1099 1100 ret = creds(toep, inp, sizeof(*wqe)); 1101 if (ret) { 1102 free_wrqe(wr); 1103 return ret; 1104 } 1105 t4_wrq_tx(sc, wr); 1106 1107 ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid, 1108 qhp->wq.sq.qid, __func__); 1109 return ret; 1110} 1111 1112static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) 1113{ 1114 CTR2(KTR_IW_CXGBE, "%s p2p_type = %d", __func__, p2p_type); 1115 memset(&init->u, 0, sizeof init->u); 1116 switch (p2p_type) { 1117 case FW_RI_INIT_P2PTYPE_RDMA_WRITE: 1118 init->u.write.opcode = FW_RI_RDMA_WRITE_WR; 1119 init->u.write.stag_sink = cpu_to_be32(1); 1120 init->u.write.to_sink = cpu_to_be64(1); 1121 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD; 1122 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write + 1123 sizeof(struct fw_ri_immd), 1124 16); 1125 break; 1126 case FW_RI_INIT_P2PTYPE_READ_REQ: 1127 init->u.write.opcode = FW_RI_RDMA_READ_WR; 1128 init->u.read.stag_src = cpu_to_be32(1); 1129 init->u.read.to_src_lo = cpu_to_be32(1); 1130 init->u.read.stag_sink = cpu_to_be32(1); 1131 init->u.read.to_sink_lo = cpu_to_be32(1); 1132 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16); 1133 break; 1134 } 1135} 1136 1137static int 1138creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize) 1139{ 1140 struct ofld_tx_sdesc *txsd; 1141 1142 CTR3(KTR_IW_CXGBE, "%s:creB %p %u", __func__, toep , wrsize); 1143 INP_WLOCK(inp); 1144 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) != 0) { 1145 INP_WUNLOCK(inp); 1146 return (EINVAL); 1147 } 1148 txsd = &toep->txsd[toep->txsd_pidx]; 1149 txsd->tx_credits = howmany(wrsize, 16); 1150 txsd->plen = 0; 1151 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 1152 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 1153 toep->tx_credits -= txsd->tx_credits; 1154 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 1155 toep->txsd_pidx = 0; 1156 toep->txsd_avail--; 1157 INP_WUNLOCK(inp); 1158 CTR5(KTR_IW_CXGBE, "%s:creE %p %u %u %u", __func__, toep , 1159 txsd->tx_credits, toep->tx_credits, toep->txsd_pidx); 1160 return (0); 1161} 1162 1163static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) 1164{ 1165 struct fw_ri_wr *wqe; 1166 int ret; 1167 struct wrqe *wr; 1168 struct c4iw_ep *ep = qhp->ep; 1169 struct c4iw_rdev *rdev = &qhp->rhp->rdev; 1170 struct adapter *sc = rdev->adap; 1171 struct socket *so = ep->com.so; 1172 struct inpcb *inp = sotoinpcb(so); 1173 struct tcpcb *tp = intotcpcb(inp); 1174 struct toepcb *toep = tp->t_toe; 1175 1176 CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp, 1177 qhp->wq.sq.qid, ep->hwtid); 1178 1179 wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq); 1180 if (wr == NULL) 1181 return (0); 1182 wqe = wrtod(wr); 1183 1184 memset(wqe, 0, sizeof *wqe); 1185 1186 wqe->op_compl = cpu_to_be32( 1187 V_FW_WR_OP(FW_RI_WR) | 1188 F_FW_WR_COMPL); 1189 wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) | 1190 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); 1191 1192 wqe->cookie = (unsigned long) &ep->com.wr_wait; 1193 1194 wqe->u.init.type = FW_RI_TYPE_INIT; 1195 wqe->u.init.mpareqbit_p2ptype = 1196 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) | 1197 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type); 1198 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE; 1199 if (qhp->attr.mpa_attr.recv_marker_enabled) 1200 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE; 1201 if (qhp->attr.mpa_attr.xmit_marker_enabled) 1202 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE; 1203 if (qhp->attr.mpa_attr.crc_enabled) 1204 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE; 1205 1206 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE | 1207 FW_RI_QP_RDMA_WRITE_ENABLE | 1208 FW_RI_QP_BIND_ENABLE; 1209 if (!qhp->ibqp.uobject) 1210 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE | 1211 FW_RI_QP_STAG0_ENABLE; 1212 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); 1213 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd); 1214 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); 1215 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); 1216 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); 1217 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); 1218 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq); 1219 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord); 1220 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird); 1221 wqe->u.init.iss = cpu_to_be32(ep->snd_seq); 1222 wqe->u.init.irs = cpu_to_be32(ep->rcv_seq); 1223 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); 1224 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - 1225 sc->vres.rq.start); 1226 if (qhp->attr.mpa_attr.initiator) 1227 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); 1228 1229 c4iw_init_wr_wait(&ep->com.wr_wait); 1230 1231 ret = creds(toep, inp, sizeof(*wqe)); 1232 if (ret) { 1233 free_wrqe(wr); 1234 return ret; 1235 } 1236 t4_wrq_tx(sc, wr); 1237 1238 ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid, 1239 qhp->wq.sq.qid, __func__); 1240 1241 toep->ulp_mode = ULP_MODE_RDMA; 1242 1243 return ret; 1244} 1245 1246int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, 1247 enum c4iw_qp_attr_mask mask, 1248 struct c4iw_qp_attributes *attrs, 1249 int internal) 1250{ 1251 int ret = 0; 1252 struct c4iw_qp_attributes newattr = qhp->attr; 1253 int disconnect = 0; 1254 int terminate = 0; 1255 int abort = 0; 1256 int free = 0; 1257 struct c4iw_ep *ep = NULL; 1258 1259 CTR5(KTR_IW_CXGBE, "%s qhp %p sqid 0x%x rqid 0x%x ep %p", __func__, qhp, 1260 qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep); 1261 CTR3(KTR_IW_CXGBE, "%s state %d -> %d", __func__, qhp->attr.state, 1262 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); 1263 1264 mutex_lock(&qhp->mutex); 1265 1266 /* Process attr changes if in IDLE */ 1267 if (mask & C4IW_QP_ATTR_VALID_MODIFY) { 1268 if (qhp->attr.state != C4IW_QP_STATE_IDLE) { 1269 ret = -EIO; 1270 goto out; 1271 } 1272 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ) 1273 newattr.enable_rdma_read = attrs->enable_rdma_read; 1274 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE) 1275 newattr.enable_rdma_write = attrs->enable_rdma_write; 1276 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND) 1277 newattr.enable_bind = attrs->enable_bind; 1278 if (mask & C4IW_QP_ATTR_MAX_ORD) { 1279 if (attrs->max_ord > c4iw_max_read_depth) { 1280 ret = -EINVAL; 1281 goto out; 1282 } 1283 newattr.max_ord = attrs->max_ord; 1284 } 1285 if (mask & C4IW_QP_ATTR_MAX_IRD) { 1286 if (attrs->max_ird > c4iw_max_read_depth) { 1287 ret = -EINVAL; 1288 goto out; 1289 } 1290 newattr.max_ird = attrs->max_ird; 1291 } 1292 qhp->attr = newattr; 1293 } 1294 1295 if (!(mask & C4IW_QP_ATTR_NEXT_STATE)) 1296 goto out; 1297 if (qhp->attr.state == attrs->next_state) 1298 goto out; 1299 1300 switch (qhp->attr.state) { 1301 case C4IW_QP_STATE_IDLE: 1302 switch (attrs->next_state) { 1303 case C4IW_QP_STATE_RTS: 1304 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) { 1305 ret = -EINVAL; 1306 goto out; 1307 } 1308 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) { 1309 ret = -EINVAL; 1310 goto out; 1311 } 1312 qhp->attr.mpa_attr = attrs->mpa_attr; 1313 qhp->attr.llp_stream_handle = attrs->llp_stream_handle; 1314 qhp->ep = qhp->attr.llp_stream_handle; 1315 set_state(qhp, C4IW_QP_STATE_RTS); 1316 1317 /* 1318 * Ref the endpoint here and deref when we 1319 * disassociate the endpoint from the QP. This 1320 * happens in CLOSING->IDLE transition or *->ERROR 1321 * transition. 1322 */ 1323 c4iw_get_ep(&qhp->ep->com); 1324 ret = rdma_init(rhp, qhp); 1325 if (ret) 1326 goto err; 1327 break; 1328 case C4IW_QP_STATE_ERROR: 1329 set_state(qhp, C4IW_QP_STATE_ERROR); 1330 flush_qp(qhp); 1331 break; 1332 default: 1333 ret = -EINVAL; 1334 goto out; 1335 } 1336 break; 1337 case C4IW_QP_STATE_RTS: 1338 switch (attrs->next_state) { 1339 case C4IW_QP_STATE_CLOSING: 1340 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); 1341 set_state(qhp, C4IW_QP_STATE_CLOSING); 1342 ep = qhp->ep; 1343 if (!internal) { 1344 abort = 0; 1345 disconnect = 1; 1346 c4iw_get_ep(&qhp->ep->com); 1347 } 1348 if (qhp->ibqp.uobject) 1349 t4_set_wq_in_error(&qhp->wq); 1350 ret = rdma_fini(rhp, qhp, ep); 1351 if (ret) 1352 goto err; 1353 break; 1354 case C4IW_QP_STATE_TERMINATE: 1355 set_state(qhp, C4IW_QP_STATE_TERMINATE); 1356 qhp->attr.layer_etype = attrs->layer_etype; 1357 qhp->attr.ecode = attrs->ecode; 1358 if (qhp->ibqp.uobject) 1359 t4_set_wq_in_error(&qhp->wq); 1360 ep = qhp->ep; 1361 if (!internal) 1362 terminate = 1; 1363 disconnect = 1; 1364 c4iw_get_ep(&qhp->ep->com); 1365 break; 1366 case C4IW_QP_STATE_ERROR: 1367 set_state(qhp, C4IW_QP_STATE_ERROR); 1368 if (qhp->ibqp.uobject) 1369 t4_set_wq_in_error(&qhp->wq); 1370 if (!internal) { 1371 abort = 1; 1372 disconnect = 1; 1373 ep = qhp->ep; 1374 c4iw_get_ep(&qhp->ep->com); 1375 } 1376 goto err; 1377 break; 1378 default: 1379 ret = -EINVAL; 1380 goto out; 1381 } 1382 break; 1383 case C4IW_QP_STATE_CLOSING: 1384 1385 /* 1386 * Allow kernel users to move to ERROR for qp draining. 1387 */ 1388 if (!internal && (qhp->ibqp.uobject || attrs->next_state != 1389 C4IW_QP_STATE_ERROR)) { 1390 ret = -EINVAL; 1391 goto out; 1392 } 1393 switch (attrs->next_state) { 1394 case C4IW_QP_STATE_IDLE: 1395 flush_qp(qhp); 1396 set_state(qhp, C4IW_QP_STATE_IDLE); 1397 qhp->attr.llp_stream_handle = NULL; 1398 c4iw_put_ep(&qhp->ep->com); 1399 qhp->ep = NULL; 1400 wake_up(&qhp->wait); 1401 break; 1402 case C4IW_QP_STATE_ERROR: 1403 goto err; 1404 default: 1405 ret = -EINVAL; 1406 goto err; 1407 } 1408 break; 1409 case C4IW_QP_STATE_ERROR: 1410 if (attrs->next_state != C4IW_QP_STATE_IDLE) { 1411 ret = -EINVAL; 1412 goto out; 1413 } 1414 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { 1415 ret = -EINVAL; 1416 goto out; 1417 } 1418 set_state(qhp, C4IW_QP_STATE_IDLE); 1419 break; 1420 case C4IW_QP_STATE_TERMINATE: 1421 if (!internal) { 1422 ret = -EINVAL; 1423 goto out; 1424 } 1425 goto err; 1426 break; 1427 default: 1428 printf("%s in a bad state %d\n", 1429 __func__, qhp->attr.state); 1430 ret = -EINVAL; 1431 goto err; 1432 break; 1433 } 1434 goto out; 1435err: 1436 CTR3(KTR_IW_CXGBE, "%s disassociating ep %p qpid 0x%x", __func__, 1437 qhp->ep, qhp->wq.sq.qid); 1438 1439 /* disassociate the LLP connection */ 1440 qhp->attr.llp_stream_handle = NULL; 1441 if (!ep) 1442 ep = qhp->ep; 1443 qhp->ep = NULL; 1444 set_state(qhp, C4IW_QP_STATE_ERROR); 1445 free = 1; 1446 abort = 1; 1447 BUG_ON(!ep); 1448 flush_qp(qhp); 1449 wake_up(&qhp->wait); 1450out: 1451 mutex_unlock(&qhp->mutex); 1452 1453 if (terminate) 1454 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); 1455 1456 /* 1457 * If disconnect is 1, then we need to initiate a disconnect 1458 * on the EP. This can be a normal close (RTS->CLOSING) or 1459 * an abnormal close (RTS/CLOSING->ERROR). 1460 */ 1461 if (disconnect) { 1462 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC : 1463 GFP_KERNEL); 1464 c4iw_put_ep(&ep->com); 1465 } 1466 1467 /* 1468 * If free is 1, then we've disassociated the EP from the QP 1469 * and we need to dereference the EP. 1470 */ 1471 if (free) 1472 c4iw_put_ep(&ep->com); 1473 CTR2(KTR_IW_CXGBE, "%s exit state %d", __func__, qhp->attr.state); 1474 return ret; 1475} 1476 1477int c4iw_destroy_qp(struct ib_qp *ib_qp) 1478{ 1479 struct c4iw_dev *rhp; 1480 struct c4iw_qp *qhp; 1481 struct c4iw_qp_attributes attrs; 1482 struct c4iw_ucontext *ucontext; 1483 1484 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ib_qp); 1485 qhp = to_c4iw_qp(ib_qp); 1486 rhp = qhp->rhp; 1487 1488 attrs.next_state = C4IW_QP_STATE_ERROR; 1489 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE) 1490 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1491 else 1492 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1493 wait_event(qhp->wait, !qhp->ep); 1494 1495 spin_lock_irq(&rhp->lock); 1496 remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid); 1497 spin_unlock_irq(&rhp->lock); 1498 atomic_dec(&qhp->refcnt); 1499 wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); 1500 1501 ucontext = ib_qp->uobject ? 1502 to_c4iw_ucontext(ib_qp->uobject->context) : NULL; 1503 destroy_qp(&rhp->rdev, &qhp->wq, 1504 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 1505 1506 CTR3(KTR_IW_CXGBE, "%s ib_qp %p qpid 0x%0x", __func__, ib_qp, 1507 qhp->wq.sq.qid); 1508 kfree(qhp); 1509 return 0; 1510} 1511 1512struct ib_qp * 1513c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, 1514 struct ib_udata *udata) 1515{ 1516 struct c4iw_dev *rhp; 1517 struct c4iw_qp *qhp; 1518 struct c4iw_pd *php; 1519 struct c4iw_cq *schp; 1520 struct c4iw_cq *rchp; 1521 struct c4iw_create_qp_resp uresp; 1522 int sqsize, rqsize; 1523 struct c4iw_ucontext *ucontext; 1524 int ret, spg_ndesc; 1525 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4; 1526 1527 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd); 1528 1529 if (attrs->qp_type != IB_QPT_RC) 1530 return ERR_PTR(-EINVAL); 1531 1532 php = to_c4iw_pd(pd); 1533 rhp = php->rhp; 1534 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); 1535 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); 1536 if (!schp || !rchp) 1537 return ERR_PTR(-EINVAL); 1538 1539 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE) 1540 return ERR_PTR(-EINVAL); 1541 1542 spg_ndesc = rhp->rdev.adap->params.sge.spg_len / EQ_ESIZE; 1543 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16); 1544 if (rqsize > T4_MAX_RQ_SIZE(spg_ndesc)) 1545 return ERR_PTR(-E2BIG); 1546 1547 sqsize = roundup(attrs->cap.max_send_wr + 1, 16); 1548 if (sqsize > T4_MAX_SQ_SIZE(spg_ndesc)) 1549 return ERR_PTR(-E2BIG); 1550 1551 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; 1552 1553 1554 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); 1555 if (!qhp) 1556 return ERR_PTR(-ENOMEM); 1557 qhp->wq.sq.size = sqsize; 1558 qhp->wq.sq.memsize = (sqsize + spg_ndesc) * sizeof *qhp->wq.sq.queue + 1559 16 * sizeof(__be64); 1560 qhp->wq.rq.size = rqsize; 1561 qhp->wq.rq.memsize = (rqsize + spg_ndesc) * sizeof *qhp->wq.rq.queue; 1562 1563 if (ucontext) { 1564 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); 1565 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE); 1566 } 1567 1568 CTR5(KTR_IW_CXGBE, "%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu", 1569 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize); 1570 1571 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, 1572 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 1573 if (ret) 1574 goto err1; 1575 1576 attrs->cap.max_recv_wr = rqsize - 1; 1577 attrs->cap.max_send_wr = sqsize - 1; 1578 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE; 1579 1580 qhp->rhp = rhp; 1581 qhp->attr.pd = php->pdid; 1582 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; 1583 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid; 1584 qhp->attr.sq_num_entries = attrs->cap.max_send_wr; 1585 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; 1586 qhp->attr.sq_max_sges = attrs->cap.max_send_sge; 1587 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; 1588 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; 1589 qhp->attr.state = C4IW_QP_STATE_IDLE; 1590 qhp->attr.next_state = C4IW_QP_STATE_IDLE; 1591 qhp->attr.enable_rdma_read = 1; 1592 qhp->attr.enable_rdma_write = 1; 1593 qhp->attr.enable_bind = 1; 1594 qhp->attr.max_ord = 1; 1595 qhp->attr.max_ird = 1; 1596 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; 1597 spin_lock_init(&qhp->lock); 1598 mutex_init(&qhp->mutex); 1599 init_waitqueue_head(&qhp->wait); 1600 atomic_set(&qhp->refcnt, 1); 1601 1602 spin_lock_irq(&rhp->lock); 1603 ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); 1604 spin_unlock_irq(&rhp->lock); 1605 if (ret) 1606 goto err2; 1607 1608 if (udata) { 1609 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); 1610 if (!mm1) { 1611 ret = -ENOMEM; 1612 goto err3; 1613 } 1614 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); 1615 if (!mm2) { 1616 ret = -ENOMEM; 1617 goto err4; 1618 } 1619 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL); 1620 if (!mm3) { 1621 ret = -ENOMEM; 1622 goto err5; 1623 } 1624 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL); 1625 if (!mm4) { 1626 ret = -ENOMEM; 1627 goto err6; 1628 } 1629 uresp.flags = 0; 1630 uresp.qid_mask = rhp->rdev.qpmask; 1631 uresp.sqid = qhp->wq.sq.qid; 1632 uresp.sq_size = qhp->wq.sq.size; 1633 uresp.sq_memsize = qhp->wq.sq.memsize; 1634 uresp.rqid = qhp->wq.rq.qid; 1635 uresp.rq_size = qhp->wq.rq.size; 1636 uresp.rq_memsize = qhp->wq.rq.memsize; 1637 spin_lock(&ucontext->mmap_lock); 1638 uresp.sq_key = ucontext->key; 1639 ucontext->key += PAGE_SIZE; 1640 uresp.rq_key = ucontext->key; 1641 ucontext->key += PAGE_SIZE; 1642 uresp.sq_db_gts_key = ucontext->key; 1643 ucontext->key += PAGE_SIZE; 1644 uresp.rq_db_gts_key = ucontext->key; 1645 ucontext->key += PAGE_SIZE; 1646 spin_unlock(&ucontext->mmap_lock); 1647 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); 1648 if (ret) 1649 goto err7; 1650 mm1->key = uresp.sq_key; 1651 mm1->addr = qhp->wq.sq.phys_addr; 1652 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); 1653 CTR4(KTR_IW_CXGBE, "%s mm1 %x, %x, %d", __func__, mm1->key, 1654 mm1->addr, mm1->len); 1655 insert_mmap(ucontext, mm1); 1656 mm2->key = uresp.rq_key; 1657 mm2->addr = vtophys(qhp->wq.rq.queue); 1658 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); 1659 CTR4(KTR_IW_CXGBE, "%s mm2 %x, %x, %d", __func__, mm2->key, 1660 mm2->addr, mm2->len); 1661 insert_mmap(ucontext, mm2); 1662 mm3->key = uresp.sq_db_gts_key; 1663 mm3->addr = qhp->wq.sq.udb; 1664 mm3->len = PAGE_SIZE; 1665 CTR4(KTR_IW_CXGBE, "%s mm3 %x, %x, %d", __func__, mm3->key, 1666 mm3->addr, mm3->len); 1667 insert_mmap(ucontext, mm3); 1668 mm4->key = uresp.rq_db_gts_key; 1669 mm4->addr = qhp->wq.rq.udb; 1670 mm4->len = PAGE_SIZE; 1671 CTR4(KTR_IW_CXGBE, "%s mm4 %x, %x, %d", __func__, mm4->key, 1672 mm4->addr, mm4->len); 1673 insert_mmap(ucontext, mm4); 1674 } 1675 qhp->ibqp.qp_num = qhp->wq.sq.qid; 1676 init_timer(&(qhp->timer)); 1677 CTR5(KTR_IW_CXGBE, 1678 "%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x", 1679 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, 1680 qhp->wq.sq.qid); 1681 return &qhp->ibqp; 1682err7: 1683 kfree(mm4); 1684err6: 1685 kfree(mm3); 1686err5: 1687 kfree(mm2); 1688err4: 1689 kfree(mm1); 1690err3: 1691 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); 1692err2: 1693 destroy_qp(&rhp->rdev, &qhp->wq, 1694 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 1695err1: 1696 kfree(qhp); 1697 return ERR_PTR(ret); 1698} 1699 1700int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1701 int attr_mask, struct ib_udata *udata) 1702{ 1703 struct c4iw_dev *rhp; 1704 struct c4iw_qp *qhp; 1705 enum c4iw_qp_attr_mask mask = 0; 1706 struct c4iw_qp_attributes attrs; 1707 1708 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ibqp); 1709 1710 /* iwarp does not support the RTR state */ 1711 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) 1712 attr_mask &= ~IB_QP_STATE; 1713 1714 /* Make sure we still have something left to do */ 1715 if (!attr_mask) 1716 return 0; 1717 1718 memset(&attrs, 0, sizeof attrs); 1719 qhp = to_c4iw_qp(ibqp); 1720 rhp = qhp->rhp; 1721 1722 attrs.next_state = c4iw_convert_state(attr->qp_state); 1723 attrs.enable_rdma_read = (attr->qp_access_flags & 1724 IB_ACCESS_REMOTE_READ) ? 1 : 0; 1725 attrs.enable_rdma_write = (attr->qp_access_flags & 1726 IB_ACCESS_REMOTE_WRITE) ? 1 : 0; 1727 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; 1728 1729 1730 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0; 1731 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? 1732 (C4IW_QP_ATTR_ENABLE_RDMA_READ | 1733 C4IW_QP_ATTR_ENABLE_RDMA_WRITE | 1734 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0; 1735 1736 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); 1737} 1738 1739struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn) 1740{ 1741 CTR3(KTR_IW_CXGBE, "%s ib_dev %p qpn 0x%x", __func__, dev, qpn); 1742 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); 1743} 1744 1745int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1746 int attr_mask, struct ib_qp_init_attr *init_attr) 1747{ 1748 struct c4iw_qp *qhp = to_c4iw_qp(ibqp); 1749 1750 memset(attr, 0, sizeof *attr); 1751 memset(init_attr, 0, sizeof *init_attr); 1752 attr->qp_state = to_ib_qp_state(qhp->attr.state); 1753 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; 1754 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; 1755 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; 1756 init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; 1757 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; 1758 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; 1759 return 0; 1760} 1761#endif 1762