qp.c revision 309450
1/* 2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/qp.c 309450 2016-12-03 00:18:38Z jhb $"); 34 35#include "opt_inet.h" 36 37#ifdef TCP_OFFLOAD 38#include <sys/types.h> 39#include <sys/malloc.h> 40#include <sys/socket.h> 41#include <sys/socketvar.h> 42#include <sys/sockio.h> 43#include <sys/taskqueue.h> 44#include <netinet/in.h> 45#include <net/route.h> 46 47#include <netinet/in_systm.h> 48#include <netinet/in_pcb.h> 49#include <netinet/ip.h> 50#include <netinet/ip_var.h> 51#include <netinet/tcp_var.h> 52#include <netinet/tcp.h> 53#include <netinet/tcpip.h> 54 55#include <netinet/toecore.h> 56 57struct sge_iq; 58struct rss_header; 59#include <linux/types.h> 60#include "offload.h" 61#include "tom/t4_tom.h" 62 63#include "iw_cxgbe.h" 64#include "user.h" 65 66static void creds(struct toepcb *toep, size_t wrsize); 67 68 69static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) 70{ 71 unsigned long flag; 72 spin_lock_irqsave(&qhp->lock, flag); 73 qhp->attr.state = state; 74 spin_unlock_irqrestore(&qhp->lock, flag); 75} 76 77static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) 78{ 79 80 contigfree(sq->queue, sq->memsize, M_DEVBUF); 81} 82 83static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) 84{ 85 86 dealloc_host_sq(rdev, sq); 87} 88 89static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) 90{ 91 sq->queue = contigmalloc(sq->memsize, M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 92 4096, 0); 93 94 if (sq->queue) 95 sq->dma_addr = vtophys(sq->queue); 96 else 97 return -ENOMEM; 98 sq->phys_addr = vtophys(sq->queue); 99 pci_unmap_addr_set(sq, mapping, sq->dma_addr); 100 CTR4(KTR_IW_CXGBE, "%s sq %p dma_addr %p phys_addr %p", __func__, 101 sq->queue, sq->dma_addr, sq->phys_addr); 102 return 0; 103} 104 105static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, 106 struct c4iw_dev_ucontext *uctx) 107{ 108 /* 109 * uP clears EQ contexts when the connection exits rdma mode, 110 * so no need to post a RESET WR for these EQs. 111 */ 112 contigfree(wq->rq.queue, wq->rq.memsize, M_DEVBUF); 113 dealloc_sq(rdev, &wq->sq); 114 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); 115 kfree(wq->rq.sw_rq); 116 kfree(wq->sq.sw_sq); 117 c4iw_put_qpid(rdev, wq->rq.qid, uctx); 118 c4iw_put_qpid(rdev, wq->sq.qid, uctx); 119 return 0; 120} 121 122static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, 123 struct t4_cq *rcq, struct t4_cq *scq, 124 struct c4iw_dev_ucontext *uctx) 125{ 126 struct adapter *sc = rdev->adap; 127 int user = (uctx != &rdev->uctx); 128 struct fw_ri_res_wr *res_wr; 129 struct fw_ri_res *res; 130 int wr_len; 131 struct c4iw_wr_wait wr_wait; 132 int ret; 133 int eqsize; 134 struct wrqe *wr; 135 136 wq->sq.qid = c4iw_get_qpid(rdev, uctx); 137 if (!wq->sq.qid) 138 return -ENOMEM; 139 140 wq->rq.qid = c4iw_get_qpid(rdev, uctx); 141 if (!wq->rq.qid) 142 goto err1; 143 144 if (!user) { 145 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, 146 GFP_KERNEL); 147 if (!wq->sq.sw_sq) 148 goto err2; 149 150 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, 151 GFP_KERNEL); 152 if (!wq->rq.sw_rq) 153 goto err3; 154 } 155 156 /* RQT must be a power of 2. */ 157 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size); 158 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); 159 if (!wq->rq.rqt_hwaddr) 160 goto err4; 161 162 if (alloc_host_sq(rdev, &wq->sq)) 163 goto err5; 164 165 memset(wq->sq.queue, 0, wq->sq.memsize); 166 pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); 167 168 wq->rq.queue = contigmalloc(wq->rq.memsize, 169 M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 4096, 0); 170 if (wq->rq.queue) 171 wq->rq.dma_addr = vtophys(wq->rq.queue); 172 else 173 goto err6; 174 CTR5(KTR_IW_CXGBE, 175 "%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx", __func__, 176 wq->sq.queue, (unsigned long long)vtophys(wq->sq.queue), 177 wq->rq.queue, (unsigned long long)vtophys(wq->rq.queue)); 178 memset(wq->rq.queue, 0, wq->rq.memsize); 179 pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); 180 181 wq->db = (void *)((unsigned long)rman_get_virtual(sc->regs_res) + 182 sc->sge_kdoorbell_reg); 183 wq->gts = (void *)((unsigned long)rman_get_virtual(rdev->adap->regs_res) 184 + sc->sge_gts_reg); 185 if (user) { 186 wq->sq.udb = (u64)((char*)rman_get_virtual(rdev->adap->udbs_res) + 187 (wq->sq.qid << rdev->qpshift)); 188 wq->sq.udb &= PAGE_MASK; 189 wq->rq.udb = (u64)((char*)rman_get_virtual(rdev->adap->udbs_res) + 190 (wq->rq.qid << rdev->qpshift)); 191 wq->rq.udb &= PAGE_MASK; 192 } 193 wq->rdev = rdev; 194 wq->rq.msn = 1; 195 196 /* build fw_ri_res_wr */ 197 wr_len = sizeof *res_wr + 2 * sizeof *res; 198 199 wr = alloc_wrqe(wr_len, &sc->sge.mgmtq); 200 if (wr == NULL) 201 return (0); 202 res_wr = wrtod(wr); 203 204 memset(res_wr, 0, wr_len); 205 res_wr->op_nres = cpu_to_be32( 206 V_FW_WR_OP(FW_RI_RES_WR) | 207 V_FW_RI_RES_WR_NRES(2) | 208 F_FW_WR_COMPL); 209 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 210 res_wr->cookie = (unsigned long) &wr_wait; 211 res = res_wr->res; 212 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; 213 res->u.sqrq.op = FW_RI_RES_OP_WRITE; 214 215 /* eqsize is the number of 64B entries plus the status page size. */ 216 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + 217 (sc->params.sge.spg_len / EQ_ESIZE); 218 219 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( 220 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ 221 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ 222 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ 223 V_FW_RI_RES_WR_IQID(scq->cqid)); 224 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( 225 V_FW_RI_RES_WR_DCAEN(0) | 226 V_FW_RI_RES_WR_DCACPU(0) | 227 V_FW_RI_RES_WR_FBMIN(2) | 228 V_FW_RI_RES_WR_FBMAX(2) | 229 V_FW_RI_RES_WR_CIDXFTHRESHO(0) | 230 V_FW_RI_RES_WR_CIDXFTHRESH(0) | 231 V_FW_RI_RES_WR_EQSIZE(eqsize)); 232 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); 233 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); 234 res++; 235 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ; 236 res->u.sqrq.op = FW_RI_RES_OP_WRITE; 237 238 /* eqsize is the number of 64B entries plus the status page size. */ 239 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + 240 (sc->params.sge.spg_len / EQ_ESIZE); 241 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( 242 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ 243 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ 244 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ 245 V_FW_RI_RES_WR_IQID(rcq->cqid)); 246 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( 247 V_FW_RI_RES_WR_DCAEN(0) | 248 V_FW_RI_RES_WR_DCACPU(0) | 249 V_FW_RI_RES_WR_FBMIN(2) | 250 V_FW_RI_RES_WR_FBMAX(2) | 251 V_FW_RI_RES_WR_CIDXFTHRESHO(0) | 252 V_FW_RI_RES_WR_CIDXFTHRESH(0) | 253 V_FW_RI_RES_WR_EQSIZE(eqsize)); 254 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); 255 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); 256 257 c4iw_init_wr_wait(&wr_wait); 258 259 t4_wrq_tx(sc, wr); 260 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__); 261 if (ret) 262 goto err7; 263 264 CTR6(KTR_IW_CXGBE, 265 "%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx", 266 __func__, wq->sq.qid, wq->rq.qid, wq->db, 267 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb); 268 269 return 0; 270err7: 271 contigfree(wq->rq.queue, wq->rq.memsize, M_DEVBUF); 272err6: 273 dealloc_sq(rdev, &wq->sq); 274err5: 275 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); 276err4: 277 kfree(wq->rq.sw_rq); 278err3: 279 kfree(wq->sq.sw_sq); 280err2: 281 c4iw_put_qpid(rdev, wq->rq.qid, uctx); 282err1: 283 c4iw_put_qpid(rdev, wq->sq.qid, uctx); 284 return -ENOMEM; 285} 286 287static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, 288 struct ib_send_wr *wr, int max, u32 *plenp) 289{ 290 u8 *dstp, *srcp; 291 u32 plen = 0; 292 int i; 293 int rem, len; 294 295 dstp = (u8 *)immdp->data; 296 for (i = 0; i < wr->num_sge; i++) { 297 if ((plen + wr->sg_list[i].length) > max) 298 return -EMSGSIZE; 299 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; 300 plen += wr->sg_list[i].length; 301 rem = wr->sg_list[i].length; 302 while (rem) { 303 if (dstp == (u8 *)&sq->queue[sq->size]) 304 dstp = (u8 *)sq->queue; 305 if (rem <= (u8 *)&sq->queue[sq->size] - dstp) 306 len = rem; 307 else 308 len = (u8 *)&sq->queue[sq->size] - dstp; 309 memcpy(dstp, srcp, len); 310 dstp += len; 311 srcp += len; 312 rem -= len; 313 } 314 } 315 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp); 316 if (len) 317 memset(dstp, 0, len); 318 immdp->op = FW_RI_DATA_IMMD; 319 immdp->r1 = 0; 320 immdp->r2 = 0; 321 immdp->immdlen = cpu_to_be32(plen); 322 *plenp = plen; 323 return 0; 324} 325 326static int build_isgl(__be64 *queue_start, __be64 *queue_end, 327 struct fw_ri_isgl *isglp, struct ib_sge *sg_list, 328 int num_sge, u32 *plenp) 329 330{ 331 int i; 332 u32 plen = 0; 333 __be64 *flitp = (__be64 *)isglp->sge; 334 335 for (i = 0; i < num_sge; i++) { 336 if ((plen + sg_list[i].length) < plen) 337 return -EMSGSIZE; 338 plen += sg_list[i].length; 339 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) | 340 sg_list[i].length); 341 if (++flitp == queue_end) 342 flitp = queue_start; 343 *flitp = cpu_to_be64(sg_list[i].addr); 344 if (++flitp == queue_end) 345 flitp = queue_start; 346 } 347 *flitp = (__force __be64)0; 348 isglp->op = FW_RI_DATA_ISGL; 349 isglp->r1 = 0; 350 isglp->nsge = cpu_to_be16(num_sge); 351 isglp->r2 = 0; 352 if (plenp) 353 *plenp = plen; 354 return 0; 355} 356 357static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, 358 struct ib_send_wr *wr, u8 *len16) 359{ 360 u32 plen; 361 int size; 362 int ret; 363 364 if (wr->num_sge > T4_MAX_SEND_SGE) 365 return -EINVAL; 366 switch (wr->opcode) { 367 case IB_WR_SEND: 368 if (wr->send_flags & IB_SEND_SOLICITED) 369 wqe->send.sendop_pkd = cpu_to_be32( 370 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE)); 371 else 372 wqe->send.sendop_pkd = cpu_to_be32( 373 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND)); 374 wqe->send.stag_inv = 0; 375 break; 376 case IB_WR_SEND_WITH_INV: 377 if (wr->send_flags & IB_SEND_SOLICITED) 378 wqe->send.sendop_pkd = cpu_to_be32( 379 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV)); 380 else 381 wqe->send.sendop_pkd = cpu_to_be32( 382 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV)); 383 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); 384 break; 385 386 default: 387 return -EINVAL; 388 } 389 390 plen = 0; 391 if (wr->num_sge) { 392 if (wr->send_flags & IB_SEND_INLINE) { 393 ret = build_immd(sq, wqe->send.u.immd_src, wr, 394 T4_MAX_SEND_INLINE, &plen); 395 if (ret) 396 return ret; 397 size = sizeof wqe->send + sizeof(struct fw_ri_immd) + 398 plen; 399 } else { 400 ret = build_isgl((__be64 *)sq->queue, 401 (__be64 *)&sq->queue[sq->size], 402 wqe->send.u.isgl_src, 403 wr->sg_list, wr->num_sge, &plen); 404 if (ret) 405 return ret; 406 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + 407 wr->num_sge * sizeof(struct fw_ri_sge); 408 } 409 } else { 410 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; 411 wqe->send.u.immd_src[0].r1 = 0; 412 wqe->send.u.immd_src[0].r2 = 0; 413 wqe->send.u.immd_src[0].immdlen = 0; 414 size = sizeof wqe->send + sizeof(struct fw_ri_immd); 415 plen = 0; 416 } 417 *len16 = DIV_ROUND_UP(size, 16); 418 wqe->send.plen = cpu_to_be32(plen); 419 return 0; 420} 421 422static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, 423 struct ib_send_wr *wr, u8 *len16) 424{ 425 u32 plen; 426 int size; 427 int ret; 428 429 if (wr->num_sge > T4_MAX_SEND_SGE) 430 return -EINVAL; 431 wqe->write.r2 = 0; 432 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); 433 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); 434 if (wr->num_sge) { 435 if (wr->send_flags & IB_SEND_INLINE) { 436 ret = build_immd(sq, wqe->write.u.immd_src, wr, 437 T4_MAX_WRITE_INLINE, &plen); 438 if (ret) 439 return ret; 440 size = sizeof wqe->write + sizeof(struct fw_ri_immd) + 441 plen; 442 } else { 443 ret = build_isgl((__be64 *)sq->queue, 444 (__be64 *)&sq->queue[sq->size], 445 wqe->write.u.isgl_src, 446 wr->sg_list, wr->num_sge, &plen); 447 if (ret) 448 return ret; 449 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + 450 wr->num_sge * sizeof(struct fw_ri_sge); 451 } 452 } else { 453 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; 454 wqe->write.u.immd_src[0].r1 = 0; 455 wqe->write.u.immd_src[0].r2 = 0; 456 wqe->write.u.immd_src[0].immdlen = 0; 457 size = sizeof wqe->write + sizeof(struct fw_ri_immd); 458 plen = 0; 459 } 460 *len16 = DIV_ROUND_UP(size, 16); 461 wqe->write.plen = cpu_to_be32(plen); 462 return 0; 463} 464 465static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) 466{ 467 if (wr->num_sge > 1) 468 return -EINVAL; 469 if (wr->num_sge) { 470 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey); 471 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr 472 >> 32)); 473 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr); 474 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); 475 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); 476 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr 477 >> 32)); 478 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); 479 } else { 480 wqe->read.stag_src = cpu_to_be32(2); 481 wqe->read.to_src_hi = 0; 482 wqe->read.to_src_lo = 0; 483 wqe->read.stag_sink = cpu_to_be32(2); 484 wqe->read.plen = 0; 485 wqe->read.to_sink_hi = 0; 486 wqe->read.to_sink_lo = 0; 487 } 488 wqe->read.r2 = 0; 489 wqe->read.r5 = 0; 490 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16); 491 return 0; 492} 493 494static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, 495 struct ib_recv_wr *wr, u8 *len16) 496{ 497 int ret; 498 499 ret = build_isgl((__be64 *)qhp->wq.rq.queue, 500 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], 501 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); 502 if (ret) 503 return ret; 504 *len16 = DIV_ROUND_UP(sizeof wqe->recv + 505 wr->num_sge * sizeof(struct fw_ri_sge), 16); 506 return 0; 507} 508 509static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, 510 struct ib_send_wr *wr, u8 *len16) 511{ 512 513 struct fw_ri_immd *imdp; 514 __be64 *p; 515 int i; 516 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); 517 int rem; 518 519 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH) 520 return -EINVAL; 521 522 wqe->fr.qpbinde_to_dcacpu = 0; 523 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12; 524 wqe->fr.addr_type = FW_RI_VA_BASED_TO; 525 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags); 526 wqe->fr.len_hi = 0; 527 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length); 528 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey); 529 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); 530 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & 531 0xffffffff); 532 WARN_ON(pbllen > T4_MAX_FR_IMMD); 533 imdp = (struct fw_ri_immd *)(&wqe->fr + 1); 534 imdp->op = FW_RI_DATA_IMMD; 535 imdp->r1 = 0; 536 imdp->r2 = 0; 537 imdp->immdlen = cpu_to_be32(pbllen); 538 p = (__be64 *)(imdp + 1); 539 rem = pbllen; 540 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { 541 *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]); 542 rem -= sizeof *p; 543 if (++p == (__be64 *)&sq->queue[sq->size]) 544 p = (__be64 *)sq->queue; 545 } 546 BUG_ON(rem < 0); 547 while (rem) { 548 *p = 0; 549 rem -= sizeof *p; 550 if (++p == (__be64 *)&sq->queue[sq->size]) 551 p = (__be64 *)sq->queue; 552 } 553 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16); 554 return 0; 555} 556 557static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, 558 u8 *len16) 559{ 560 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); 561 wqe->inv.r2 = 0; 562 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); 563 return 0; 564} 565 566void c4iw_qp_add_ref(struct ib_qp *qp) 567{ 568 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp); 569 atomic_inc(&(to_c4iw_qp(qp)->refcnt)); 570} 571 572void c4iw_qp_rem_ref(struct ib_qp *qp) 573{ 574 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp); 575 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt))) 576 wake_up(&(to_c4iw_qp(qp)->wait)); 577} 578 579int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 580 struct ib_send_wr **bad_wr) 581{ 582 int err = 0; 583 u8 len16 = 0; 584 enum fw_wr_opcodes fw_opcode = 0; 585 enum fw_ri_wr_flags fw_flags; 586 struct c4iw_qp *qhp; 587 union t4_wr *wqe; 588 u32 num_wrs; 589 struct t4_swsqe *swsqe; 590 unsigned long flag; 591 u16 idx = 0; 592 593 qhp = to_c4iw_qp(ibqp); 594 spin_lock_irqsave(&qhp->lock, flag); 595 if (t4_wq_in_error(&qhp->wq)) { 596 spin_unlock_irqrestore(&qhp->lock, flag); 597 return -EINVAL; 598 } 599 num_wrs = t4_sq_avail(&qhp->wq); 600 if (num_wrs == 0) { 601 spin_unlock_irqrestore(&qhp->lock, flag); 602 return -ENOMEM; 603 } 604 while (wr) { 605 if (num_wrs == 0) { 606 err = -ENOMEM; 607 *bad_wr = wr; 608 break; 609 } 610 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + 611 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); 612 613 fw_flags = 0; 614 if (wr->send_flags & IB_SEND_SOLICITED) 615 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; 616 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) 617 fw_flags |= FW_RI_COMPLETION_FLAG; 618 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; 619 switch (wr->opcode) { 620 case IB_WR_SEND_WITH_INV: 621 case IB_WR_SEND: 622 if (wr->send_flags & IB_SEND_FENCE) 623 fw_flags |= FW_RI_READ_FENCE_FLAG; 624 fw_opcode = FW_RI_SEND_WR; 625 if (wr->opcode == IB_WR_SEND) 626 swsqe->opcode = FW_RI_SEND; 627 else 628 swsqe->opcode = FW_RI_SEND_WITH_INV; 629 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); 630 break; 631 case IB_WR_RDMA_WRITE: 632 fw_opcode = FW_RI_RDMA_WRITE_WR; 633 swsqe->opcode = FW_RI_RDMA_WRITE; 634 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); 635 break; 636 case IB_WR_RDMA_READ: 637 case IB_WR_RDMA_READ_WITH_INV: 638 fw_opcode = FW_RI_RDMA_READ_WR; 639 swsqe->opcode = FW_RI_READ_REQ; 640 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) 641 fw_flags = FW_RI_RDMA_READ_INVALIDATE; 642 else 643 fw_flags = 0; 644 err = build_rdma_read(wqe, wr, &len16); 645 if (err) 646 break; 647 swsqe->read_len = wr->sg_list[0].length; 648 if (!qhp->wq.sq.oldest_read) 649 qhp->wq.sq.oldest_read = swsqe; 650 break; 651 case IB_WR_FAST_REG_MR: 652 fw_opcode = FW_RI_FR_NSMR_WR; 653 swsqe->opcode = FW_RI_FAST_REGISTER; 654 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16); 655 break; 656 case IB_WR_LOCAL_INV: 657 if (wr->send_flags & IB_SEND_FENCE) 658 fw_flags |= FW_RI_LOCAL_FENCE_FLAG; 659 fw_opcode = FW_RI_INV_LSTAG_WR; 660 swsqe->opcode = FW_RI_LOCAL_INV; 661 err = build_inv_stag(wqe, wr, &len16); 662 break; 663 default: 664 CTR2(KTR_IW_CXGBE, "%s post of type =%d TBD!", __func__, 665 wr->opcode); 666 err = -EINVAL; 667 } 668 if (err) { 669 *bad_wr = wr; 670 break; 671 } 672 swsqe->idx = qhp->wq.sq.pidx; 673 swsqe->complete = 0; 674 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) || 675 qhp->sq_sig_all; 676 swsqe->wr_id = wr->wr_id; 677 678 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); 679 680 CTR5(KTR_IW_CXGBE, 681 "%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u", 682 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, 683 swsqe->opcode, swsqe->read_len); 684 wr = wr->next; 685 num_wrs--; 686 t4_sq_produce(&qhp->wq, len16); 687 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); 688 } 689 690 t4_ring_sq_db(&qhp->wq, idx); 691 spin_unlock_irqrestore(&qhp->lock, flag); 692 return err; 693} 694 695int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 696 struct ib_recv_wr **bad_wr) 697{ 698 int err = 0; 699 struct c4iw_qp *qhp; 700 union t4_recv_wr *wqe; 701 u32 num_wrs; 702 u8 len16 = 0; 703 unsigned long flag; 704 u16 idx = 0; 705 706 qhp = to_c4iw_qp(ibqp); 707 spin_lock_irqsave(&qhp->lock, flag); 708 if (t4_wq_in_error(&qhp->wq)) { 709 spin_unlock_irqrestore(&qhp->lock, flag); 710 return -EINVAL; 711 } 712 num_wrs = t4_rq_avail(&qhp->wq); 713 if (num_wrs == 0) { 714 spin_unlock_irqrestore(&qhp->lock, flag); 715 return -ENOMEM; 716 } 717 while (wr) { 718 if (wr->num_sge > T4_MAX_RECV_SGE) { 719 err = -EINVAL; 720 *bad_wr = wr; 721 break; 722 } 723 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + 724 qhp->wq.rq.wq_pidx * 725 T4_EQ_ENTRY_SIZE); 726 if (num_wrs) 727 err = build_rdma_recv(qhp, wqe, wr, &len16); 728 else 729 err = -ENOMEM; 730 if (err) { 731 *bad_wr = wr; 732 break; 733 } 734 735 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; 736 737 wqe->recv.opcode = FW_RI_RECV_WR; 738 wqe->recv.r1 = 0; 739 wqe->recv.wrid = qhp->wq.rq.pidx; 740 wqe->recv.r2[0] = 0; 741 wqe->recv.r2[1] = 0; 742 wqe->recv.r2[2] = 0; 743 wqe->recv.len16 = len16; 744 CTR3(KTR_IW_CXGBE, "%s cookie 0x%llx pidx %u", __func__, 745 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); 746 t4_rq_produce(&qhp->wq, len16); 747 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); 748 wr = wr->next; 749 num_wrs--; 750 } 751 752 t4_ring_rq_db(&qhp->wq, idx); 753 spin_unlock_irqrestore(&qhp->lock, flag); 754 return err; 755} 756 757int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind) 758{ 759 return -ENOSYS; 760} 761 762static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type, 763 u8 *ecode) 764{ 765 int status; 766 int tagged; 767 int opcode; 768 int rqtype; 769 int send_inv; 770 771 if (!err_cqe) { 772 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; 773 *ecode = 0; 774 return; 775 } 776 777 status = CQE_STATUS(err_cqe); 778 opcode = CQE_OPCODE(err_cqe); 779 rqtype = RQ_TYPE(err_cqe); 780 send_inv = (opcode == FW_RI_SEND_WITH_INV) || 781 (opcode == FW_RI_SEND_WITH_SE_INV); 782 tagged = (opcode == FW_RI_RDMA_WRITE) || 783 (rqtype && (opcode == FW_RI_READ_RESP)); 784 785 switch (status) { 786 case T4_ERR_STAG: 787 if (send_inv) { 788 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 789 *ecode = RDMAP_CANT_INV_STAG; 790 } else { 791 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 792 *ecode = RDMAP_INV_STAG; 793 } 794 break; 795 case T4_ERR_PDID: 796 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 797 if ((opcode == FW_RI_SEND_WITH_INV) || 798 (opcode == FW_RI_SEND_WITH_SE_INV)) 799 *ecode = RDMAP_CANT_INV_STAG; 800 else 801 *ecode = RDMAP_STAG_NOT_ASSOC; 802 break; 803 case T4_ERR_QPID: 804 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 805 *ecode = RDMAP_STAG_NOT_ASSOC; 806 break; 807 case T4_ERR_ACCESS: 808 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 809 *ecode = RDMAP_ACC_VIOL; 810 break; 811 case T4_ERR_WRAP: 812 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 813 *ecode = RDMAP_TO_WRAP; 814 break; 815 case T4_ERR_BOUND: 816 if (tagged) { 817 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 818 *ecode = DDPT_BASE_BOUNDS; 819 } else { 820 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 821 *ecode = RDMAP_BASE_BOUNDS; 822 } 823 break; 824 case T4_ERR_INVALIDATE_SHARED_MR: 825 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: 826 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 827 *ecode = RDMAP_CANT_INV_STAG; 828 break; 829 case T4_ERR_ECC: 830 case T4_ERR_ECC_PSTAG: 831 case T4_ERR_INTERNAL_ERR: 832 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA; 833 *ecode = 0; 834 break; 835 case T4_ERR_OUT_OF_RQE: 836 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 837 *ecode = DDPU_INV_MSN_NOBUF; 838 break; 839 case T4_ERR_PBL_ADDR_BOUND: 840 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 841 *ecode = DDPT_BASE_BOUNDS; 842 break; 843 case T4_ERR_CRC: 844 *layer_type = LAYER_MPA|DDP_LLP; 845 *ecode = MPA_CRC_ERR; 846 break; 847 case T4_ERR_MARKER: 848 *layer_type = LAYER_MPA|DDP_LLP; 849 *ecode = MPA_MARKER_ERR; 850 break; 851 case T4_ERR_PDU_LEN_ERR: 852 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 853 *ecode = DDPU_MSG_TOOBIG; 854 break; 855 case T4_ERR_DDP_VERSION: 856 if (tagged) { 857 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 858 *ecode = DDPT_INV_VERS; 859 } else { 860 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 861 *ecode = DDPU_INV_VERS; 862 } 863 break; 864 case T4_ERR_RDMA_VERSION: 865 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 866 *ecode = RDMAP_INV_VERS; 867 break; 868 case T4_ERR_OPCODE: 869 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 870 *ecode = RDMAP_INV_OPCODE; 871 break; 872 case T4_ERR_DDP_QUEUE_NUM: 873 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 874 *ecode = DDPU_INV_QN; 875 break; 876 case T4_ERR_MSN: 877 case T4_ERR_MSN_GAP: 878 case T4_ERR_MSN_RANGE: 879 case T4_ERR_IRD_OVERFLOW: 880 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 881 *ecode = DDPU_INV_MSN_RANGE; 882 break; 883 case T4_ERR_TBIT: 884 *layer_type = LAYER_DDP|DDP_LOCAL_CATA; 885 *ecode = 0; 886 break; 887 case T4_ERR_MO: 888 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 889 *ecode = DDPU_INV_MO; 890 break; 891 default: 892 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; 893 *ecode = 0; 894 break; 895 } 896} 897 898static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, 899 gfp_t gfp) 900{ 901 struct fw_ri_wr *wqe; 902 struct terminate_message *term; 903 struct wrqe *wr; 904 struct socket *so = qhp->ep->com.so; 905 struct inpcb *inp = sotoinpcb(so); 906 struct tcpcb *tp = intotcpcb(inp); 907 struct toepcb *toep = tp->t_toe; 908 909 CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp, 910 qhp->wq.sq.qid, qhp->ep->hwtid); 911 912 wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq); 913 if (wr == NULL) 914 return; 915 wqe = wrtod(wr); 916 917 memset(wqe, 0, sizeof *wqe); 918 wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR)); 919 wqe->flowid_len16 = cpu_to_be32( 920 V_FW_WR_FLOWID(qhp->ep->hwtid) | 921 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); 922 923 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; 924 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); 925 term = (struct terminate_message *)wqe->u.terminate.termmsg; 926 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { 927 term->layer_etype = qhp->attr.layer_etype; 928 term->ecode = qhp->attr.ecode; 929 } else 930 build_term_codes(err_cqe, &term->layer_etype, &term->ecode); 931 creds(toep, sizeof(*wqe)); 932 t4_wrq_tx(qhp->rhp->rdev.adap, wr); 933} 934 935/* Assumes qhp lock is held. */ 936static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, 937 struct c4iw_cq *schp) 938{ 939 int count; 940 int flushed; 941 unsigned long flag; 942 943 CTR4(KTR_IW_CXGBE, "%s qhp %p rchp %p schp %p", __func__, qhp, rchp, 944 schp); 945 946 /* locking hierarchy: cq lock first, then qp lock. */ 947 spin_lock_irqsave(&rchp->lock, flag); 948 spin_lock(&qhp->lock); 949 c4iw_flush_hw_cq(&rchp->cq); 950 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); 951 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); 952 spin_unlock(&qhp->lock); 953 spin_unlock_irqrestore(&rchp->lock, flag); 954 if (flushed && rchp->ibcq.comp_handler) { 955 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 956 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); 957 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 958 } 959 960 /* locking hierarchy: cq lock first, then qp lock. */ 961 spin_lock_irqsave(&schp->lock, flag); 962 spin_lock(&qhp->lock); 963 c4iw_flush_hw_cq(&schp->cq); 964 c4iw_count_scqes(&schp->cq, &qhp->wq, &count); 965 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); 966 spin_unlock(&qhp->lock); 967 spin_unlock_irqrestore(&schp->lock, flag); 968 if (flushed && schp->ibcq.comp_handler) { 969 spin_lock_irqsave(&schp->comp_handler_lock, flag); 970 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); 971 spin_unlock_irqrestore(&schp->comp_handler_lock, flag); 972 } 973} 974 975static void flush_qp(struct c4iw_qp *qhp) 976{ 977 struct c4iw_cq *rchp, *schp; 978 unsigned long flag; 979 980 rchp = get_chp(qhp->rhp, qhp->attr.rcq); 981 schp = get_chp(qhp->rhp, qhp->attr.scq); 982 983 if (qhp->ibqp.uobject) { 984 t4_set_wq_in_error(&qhp->wq); 985 t4_set_cq_in_error(&rchp->cq); 986 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 987 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); 988 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 989 if (schp != rchp) { 990 t4_set_cq_in_error(&schp->cq); 991 spin_lock_irqsave(&schp->comp_handler_lock, flag); 992 (*schp->ibcq.comp_handler)(&schp->ibcq, 993 schp->ibcq.cq_context); 994 spin_unlock_irqrestore(&schp->comp_handler_lock, flag); 995 } 996 return; 997 } 998 __flush_qp(qhp, rchp, schp); 999} 1000 1001static int 1002rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep) 1003{ 1004 struct c4iw_rdev *rdev = &rhp->rdev; 1005 struct adapter *sc = rdev->adap; 1006 struct fw_ri_wr *wqe; 1007 int ret; 1008 struct wrqe *wr; 1009 struct socket *so = ep->com.so; 1010 struct inpcb *inp = sotoinpcb(so); 1011 struct tcpcb *tp = intotcpcb(inp); 1012 struct toepcb *toep = tp->t_toe; 1013 1014 KASSERT(rhp == qhp->rhp && ep == qhp->ep, ("%s: EDOOFUS", __func__)); 1015 1016 CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp, 1017 qhp->wq.sq.qid, ep->hwtid); 1018 1019 wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq); 1020 if (wr == NULL) 1021 return (0); 1022 wqe = wrtod(wr); 1023 1024 memset(wqe, 0, sizeof *wqe); 1025 1026 wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR) | F_FW_WR_COMPL); 1027 wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) | 1028 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); 1029 wqe->cookie = (unsigned long) &ep->com.wr_wait; 1030 wqe->u.fini.type = FW_RI_TYPE_FINI; 1031 1032 c4iw_init_wr_wait(&ep->com.wr_wait); 1033 1034 creds(toep, sizeof(*wqe)); 1035 t4_wrq_tx(sc, wr); 1036 1037 ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid, 1038 qhp->wq.sq.qid, __func__); 1039 return ret; 1040} 1041 1042static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) 1043{ 1044 CTR2(KTR_IW_CXGBE, "%s p2p_type = %d", __func__, p2p_type); 1045 memset(&init->u, 0, sizeof init->u); 1046 switch (p2p_type) { 1047 case FW_RI_INIT_P2PTYPE_RDMA_WRITE: 1048 init->u.write.opcode = FW_RI_RDMA_WRITE_WR; 1049 init->u.write.stag_sink = cpu_to_be32(1); 1050 init->u.write.to_sink = cpu_to_be64(1); 1051 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD; 1052 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write + 1053 sizeof(struct fw_ri_immd), 1054 16); 1055 break; 1056 case FW_RI_INIT_P2PTYPE_READ_REQ: 1057 init->u.write.opcode = FW_RI_RDMA_READ_WR; 1058 init->u.read.stag_src = cpu_to_be32(1); 1059 init->u.read.to_src_lo = cpu_to_be32(1); 1060 init->u.read.stag_sink = cpu_to_be32(1); 1061 init->u.read.to_sink_lo = cpu_to_be32(1); 1062 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16); 1063 break; 1064 } 1065} 1066 1067static void 1068creds(struct toepcb *toep, size_t wrsize) 1069{ 1070 struct ofld_tx_sdesc *txsd; 1071 1072 CTR3(KTR_IW_CXGBE, "%s:creB %p %u", __func__, toep , wrsize); 1073 INP_WLOCK(toep->inp); 1074 txsd = &toep->txsd[toep->txsd_pidx]; 1075 txsd->tx_credits = howmany(wrsize, 16); 1076 txsd->plen = 0; 1077 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 1078 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 1079 toep->tx_credits -= txsd->tx_credits; 1080 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 1081 toep->txsd_pidx = 0; 1082 toep->txsd_avail--; 1083 INP_WUNLOCK(toep->inp); 1084 CTR5(KTR_IW_CXGBE, "%s:creE %p %u %u %u", __func__, toep , 1085 txsd->tx_credits, toep->tx_credits, toep->txsd_pidx); 1086} 1087 1088static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) 1089{ 1090 struct fw_ri_wr *wqe; 1091 int ret; 1092 struct wrqe *wr; 1093 struct c4iw_ep *ep = qhp->ep; 1094 struct c4iw_rdev *rdev = &qhp->rhp->rdev; 1095 struct adapter *sc = rdev->adap; 1096 struct socket *so = ep->com.so; 1097 struct inpcb *inp = sotoinpcb(so); 1098 struct tcpcb *tp = intotcpcb(inp); 1099 struct toepcb *toep = tp->t_toe; 1100 1101 CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp, 1102 qhp->wq.sq.qid, ep->hwtid); 1103 1104 wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq); 1105 if (wr == NULL) 1106 return (0); 1107 wqe = wrtod(wr); 1108 1109 memset(wqe, 0, sizeof *wqe); 1110 1111 wqe->op_compl = cpu_to_be32( 1112 V_FW_WR_OP(FW_RI_WR) | 1113 F_FW_WR_COMPL); 1114 wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) | 1115 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); 1116 1117 wqe->cookie = (unsigned long) &ep->com.wr_wait; 1118 1119 wqe->u.init.type = FW_RI_TYPE_INIT; 1120 wqe->u.init.mpareqbit_p2ptype = 1121 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) | 1122 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type); 1123 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE; 1124 if (qhp->attr.mpa_attr.recv_marker_enabled) 1125 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE; 1126 if (qhp->attr.mpa_attr.xmit_marker_enabled) 1127 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE; 1128 if (qhp->attr.mpa_attr.crc_enabled) 1129 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE; 1130 1131 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE | 1132 FW_RI_QP_RDMA_WRITE_ENABLE | 1133 FW_RI_QP_BIND_ENABLE; 1134 if (!qhp->ibqp.uobject) 1135 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE | 1136 FW_RI_QP_STAG0_ENABLE; 1137 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); 1138 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd); 1139 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); 1140 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); 1141 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); 1142 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); 1143 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq); 1144 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord); 1145 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird); 1146 wqe->u.init.iss = cpu_to_be32(ep->snd_seq); 1147 wqe->u.init.irs = cpu_to_be32(ep->rcv_seq); 1148 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); 1149 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - 1150 sc->vres.rq.start); 1151 if (qhp->attr.mpa_attr.initiator) 1152 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); 1153 1154 c4iw_init_wr_wait(&ep->com.wr_wait); 1155 1156 creds(toep, sizeof(*wqe)); 1157 t4_wrq_tx(sc, wr); 1158 1159 ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid, 1160 qhp->wq.sq.qid, __func__); 1161 1162 toep->ulp_mode = ULP_MODE_RDMA; 1163 1164 return ret; 1165} 1166 1167int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, 1168 enum c4iw_qp_attr_mask mask, 1169 struct c4iw_qp_attributes *attrs, 1170 int internal) 1171{ 1172 int ret = 0; 1173 struct c4iw_qp_attributes newattr = qhp->attr; 1174 int disconnect = 0; 1175 int terminate = 0; 1176 int abort = 0; 1177 int free = 0; 1178 struct c4iw_ep *ep = NULL; 1179 1180 CTR5(KTR_IW_CXGBE, "%s qhp %p sqid 0x%x rqid 0x%x ep %p", __func__, qhp, 1181 qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep); 1182 CTR3(KTR_IW_CXGBE, "%s state %d -> %d", __func__, qhp->attr.state, 1183 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); 1184 1185 mutex_lock(&qhp->mutex); 1186 1187 /* Process attr changes if in IDLE */ 1188 if (mask & C4IW_QP_ATTR_VALID_MODIFY) { 1189 if (qhp->attr.state != C4IW_QP_STATE_IDLE) { 1190 ret = -EIO; 1191 goto out; 1192 } 1193 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ) 1194 newattr.enable_rdma_read = attrs->enable_rdma_read; 1195 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE) 1196 newattr.enable_rdma_write = attrs->enable_rdma_write; 1197 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND) 1198 newattr.enable_bind = attrs->enable_bind; 1199 if (mask & C4IW_QP_ATTR_MAX_ORD) { 1200 if (attrs->max_ord > c4iw_max_read_depth) { 1201 ret = -EINVAL; 1202 goto out; 1203 } 1204 newattr.max_ord = attrs->max_ord; 1205 } 1206 if (mask & C4IW_QP_ATTR_MAX_IRD) { 1207 if (attrs->max_ird > c4iw_max_read_depth) { 1208 ret = -EINVAL; 1209 goto out; 1210 } 1211 newattr.max_ird = attrs->max_ird; 1212 } 1213 qhp->attr = newattr; 1214 } 1215 1216 if (!(mask & C4IW_QP_ATTR_NEXT_STATE)) 1217 goto out; 1218 if (qhp->attr.state == attrs->next_state) 1219 goto out; 1220 1221 switch (qhp->attr.state) { 1222 case C4IW_QP_STATE_IDLE: 1223 switch (attrs->next_state) { 1224 case C4IW_QP_STATE_RTS: 1225 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) { 1226 ret = -EINVAL; 1227 goto out; 1228 } 1229 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) { 1230 ret = -EINVAL; 1231 goto out; 1232 } 1233 qhp->attr.mpa_attr = attrs->mpa_attr; 1234 qhp->attr.llp_stream_handle = attrs->llp_stream_handle; 1235 qhp->ep = qhp->attr.llp_stream_handle; 1236 set_state(qhp, C4IW_QP_STATE_RTS); 1237 1238 /* 1239 * Ref the endpoint here and deref when we 1240 * disassociate the endpoint from the QP. This 1241 * happens in CLOSING->IDLE transition or *->ERROR 1242 * transition. 1243 */ 1244 c4iw_get_ep(&qhp->ep->com); 1245 ret = rdma_init(rhp, qhp); 1246 if (ret) 1247 goto err; 1248 break; 1249 case C4IW_QP_STATE_ERROR: 1250 set_state(qhp, C4IW_QP_STATE_ERROR); 1251 flush_qp(qhp); 1252 break; 1253 default: 1254 ret = -EINVAL; 1255 goto out; 1256 } 1257 break; 1258 case C4IW_QP_STATE_RTS: 1259 switch (attrs->next_state) { 1260 case C4IW_QP_STATE_CLOSING: 1261 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); 1262 set_state(qhp, C4IW_QP_STATE_CLOSING); 1263 ep = qhp->ep; 1264 if (!internal) { 1265 abort = 0; 1266 disconnect = 1; 1267 c4iw_get_ep(&qhp->ep->com); 1268 } 1269 if (qhp->ibqp.uobject) 1270 t4_set_wq_in_error(&qhp->wq); 1271 ret = rdma_fini(rhp, qhp, ep); 1272 if (ret) 1273 goto err; 1274 break; 1275 case C4IW_QP_STATE_TERMINATE: 1276 set_state(qhp, C4IW_QP_STATE_TERMINATE); 1277 qhp->attr.layer_etype = attrs->layer_etype; 1278 qhp->attr.ecode = attrs->ecode; 1279 if (qhp->ibqp.uobject) 1280 t4_set_wq_in_error(&qhp->wq); 1281 ep = qhp->ep; 1282 if (!internal) 1283 terminate = 1; 1284 disconnect = 1; 1285 c4iw_get_ep(&qhp->ep->com); 1286 break; 1287 case C4IW_QP_STATE_ERROR: 1288 set_state(qhp, C4IW_QP_STATE_ERROR); 1289 if (qhp->ibqp.uobject) 1290 t4_set_wq_in_error(&qhp->wq); 1291 if (!internal) { 1292 abort = 1; 1293 disconnect = 1; 1294 ep = qhp->ep; 1295 c4iw_get_ep(&qhp->ep->com); 1296 } 1297 goto err; 1298 break; 1299 default: 1300 ret = -EINVAL; 1301 goto out; 1302 } 1303 break; 1304 case C4IW_QP_STATE_CLOSING: 1305 if (!internal) { 1306 ret = -EINVAL; 1307 goto out; 1308 } 1309 switch (attrs->next_state) { 1310 case C4IW_QP_STATE_IDLE: 1311 flush_qp(qhp); 1312 set_state(qhp, C4IW_QP_STATE_IDLE); 1313 qhp->attr.llp_stream_handle = NULL; 1314 c4iw_put_ep(&qhp->ep->com); 1315 qhp->ep = NULL; 1316 wake_up(&qhp->wait); 1317 break; 1318 case C4IW_QP_STATE_ERROR: 1319 goto err; 1320 default: 1321 ret = -EINVAL; 1322 goto err; 1323 } 1324 break; 1325 case C4IW_QP_STATE_ERROR: 1326 if (attrs->next_state != C4IW_QP_STATE_IDLE) { 1327 ret = -EINVAL; 1328 goto out; 1329 } 1330 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { 1331 ret = -EINVAL; 1332 goto out; 1333 } 1334 set_state(qhp, C4IW_QP_STATE_IDLE); 1335 break; 1336 case C4IW_QP_STATE_TERMINATE: 1337 if (!internal) { 1338 ret = -EINVAL; 1339 goto out; 1340 } 1341 goto err; 1342 break; 1343 default: 1344 printf("%s in a bad state %d\n", 1345 __func__, qhp->attr.state); 1346 ret = -EINVAL; 1347 goto err; 1348 break; 1349 } 1350 goto out; 1351err: 1352 CTR3(KTR_IW_CXGBE, "%s disassociating ep %p qpid 0x%x", __func__, 1353 qhp->ep, qhp->wq.sq.qid); 1354 1355 /* disassociate the LLP connection */ 1356 qhp->attr.llp_stream_handle = NULL; 1357 if (!ep) 1358 ep = qhp->ep; 1359 qhp->ep = NULL; 1360 set_state(qhp, C4IW_QP_STATE_ERROR); 1361 free = 1; 1362 BUG_ON(!ep); 1363 flush_qp(qhp); 1364 wake_up(&qhp->wait); 1365out: 1366 mutex_unlock(&qhp->mutex); 1367 1368 if (terminate) 1369 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); 1370 1371 /* 1372 * If disconnect is 1, then we need to initiate a disconnect 1373 * on the EP. This can be a normal close (RTS->CLOSING) or 1374 * an abnormal close (RTS/CLOSING->ERROR). 1375 */ 1376 if (disconnect) { 1377 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC : 1378 GFP_KERNEL); 1379 c4iw_put_ep(&ep->com); 1380 } 1381 1382 /* 1383 * If free is 1, then we've disassociated the EP from the QP 1384 * and we need to dereference the EP. 1385 */ 1386 if (free) 1387 c4iw_put_ep(&ep->com); 1388 CTR2(KTR_IW_CXGBE, "%s exit state %d", __func__, qhp->attr.state); 1389 return ret; 1390} 1391 1392int c4iw_destroy_qp(struct ib_qp *ib_qp) 1393{ 1394 struct c4iw_dev *rhp; 1395 struct c4iw_qp *qhp; 1396 struct c4iw_qp_attributes attrs; 1397 struct c4iw_ucontext *ucontext; 1398 1399 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ib_qp); 1400 qhp = to_c4iw_qp(ib_qp); 1401 rhp = qhp->rhp; 1402 1403 attrs.next_state = C4IW_QP_STATE_ERROR; 1404 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE) 1405 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1406 else 1407 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1408 wait_event(qhp->wait, !qhp->ep); 1409 1410 spin_lock_irq(&rhp->lock); 1411 remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid); 1412 spin_unlock_irq(&rhp->lock); 1413 atomic_dec(&qhp->refcnt); 1414 wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); 1415 1416 ucontext = ib_qp->uobject ? 1417 to_c4iw_ucontext(ib_qp->uobject->context) : NULL; 1418 destroy_qp(&rhp->rdev, &qhp->wq, 1419 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 1420 1421 CTR3(KTR_IW_CXGBE, "%s ib_qp %p qpid 0x%0x", __func__, ib_qp, 1422 qhp->wq.sq.qid); 1423 kfree(qhp); 1424 return 0; 1425} 1426 1427struct ib_qp * 1428c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, 1429 struct ib_udata *udata) 1430{ 1431 struct c4iw_dev *rhp; 1432 struct c4iw_qp *qhp; 1433 struct c4iw_pd *php; 1434 struct c4iw_cq *schp; 1435 struct c4iw_cq *rchp; 1436 struct c4iw_create_qp_resp uresp; 1437 int sqsize, rqsize; 1438 struct c4iw_ucontext *ucontext; 1439 int ret; 1440 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4; 1441 1442 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd); 1443 1444 if (attrs->qp_type != IB_QPT_RC) 1445 return ERR_PTR(-EINVAL); 1446 1447 php = to_c4iw_pd(pd); 1448 rhp = php->rhp; 1449 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); 1450 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); 1451 if (!schp || !rchp) 1452 return ERR_PTR(-EINVAL); 1453 1454 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE) 1455 return ERR_PTR(-EINVAL); 1456 1457 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16); 1458 if (rqsize > T4_MAX_RQ_SIZE) 1459 return ERR_PTR(-E2BIG); 1460 1461 sqsize = roundup(attrs->cap.max_send_wr + 1, 16); 1462 if (sqsize > T4_MAX_SQ_SIZE) 1463 return ERR_PTR(-E2BIG); 1464 1465 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; 1466 1467 1468 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); 1469 if (!qhp) 1470 return ERR_PTR(-ENOMEM); 1471 qhp->wq.sq.size = sqsize; 1472 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue; 1473 qhp->wq.rq.size = rqsize; 1474 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue; 1475 1476 if (ucontext) { 1477 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); 1478 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE); 1479 } 1480 1481 CTR5(KTR_IW_CXGBE, "%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu", 1482 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize); 1483 1484 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, 1485 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 1486 if (ret) 1487 goto err1; 1488 1489 attrs->cap.max_recv_wr = rqsize - 1; 1490 attrs->cap.max_send_wr = sqsize - 1; 1491 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE; 1492 1493 qhp->rhp = rhp; 1494 qhp->attr.pd = php->pdid; 1495 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; 1496 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid; 1497 qhp->attr.sq_num_entries = attrs->cap.max_send_wr; 1498 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; 1499 qhp->attr.sq_max_sges = attrs->cap.max_send_sge; 1500 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; 1501 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; 1502 qhp->attr.state = C4IW_QP_STATE_IDLE; 1503 qhp->attr.next_state = C4IW_QP_STATE_IDLE; 1504 qhp->attr.enable_rdma_read = 1; 1505 qhp->attr.enable_rdma_write = 1; 1506 qhp->attr.enable_bind = 1; 1507 qhp->attr.max_ord = 1; 1508 qhp->attr.max_ird = 1; 1509 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; 1510 spin_lock_init(&qhp->lock); 1511 mutex_init(&qhp->mutex); 1512 init_waitqueue_head(&qhp->wait); 1513 atomic_set(&qhp->refcnt, 1); 1514 1515 spin_lock_irq(&rhp->lock); 1516 ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); 1517 spin_unlock_irq(&rhp->lock); 1518 if (ret) 1519 goto err2; 1520 1521 if (udata) { 1522 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); 1523 if (!mm1) { 1524 ret = -ENOMEM; 1525 goto err3; 1526 } 1527 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); 1528 if (!mm2) { 1529 ret = -ENOMEM; 1530 goto err4; 1531 } 1532 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL); 1533 if (!mm3) { 1534 ret = -ENOMEM; 1535 goto err5; 1536 } 1537 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL); 1538 if (!mm4) { 1539 ret = -ENOMEM; 1540 goto err6; 1541 } 1542 uresp.flags = 0; 1543 uresp.qid_mask = rhp->rdev.qpmask; 1544 uresp.sqid = qhp->wq.sq.qid; 1545 uresp.sq_size = qhp->wq.sq.size; 1546 uresp.sq_memsize = qhp->wq.sq.memsize; 1547 uresp.rqid = qhp->wq.rq.qid; 1548 uresp.rq_size = qhp->wq.rq.size; 1549 uresp.rq_memsize = qhp->wq.rq.memsize; 1550 spin_lock(&ucontext->mmap_lock); 1551 uresp.sq_key = ucontext->key; 1552 ucontext->key += PAGE_SIZE; 1553 uresp.rq_key = ucontext->key; 1554 ucontext->key += PAGE_SIZE; 1555 uresp.sq_db_gts_key = ucontext->key; 1556 ucontext->key += PAGE_SIZE; 1557 uresp.rq_db_gts_key = ucontext->key; 1558 ucontext->key += PAGE_SIZE; 1559 spin_unlock(&ucontext->mmap_lock); 1560 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); 1561 if (ret) 1562 goto err7; 1563 mm1->key = uresp.sq_key; 1564 mm1->addr = qhp->wq.sq.phys_addr; 1565 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); 1566 CTR4(KTR_IW_CXGBE, "%s mm1 %x, %x, %d", __func__, mm1->key, 1567 mm1->addr, mm1->len); 1568 insert_mmap(ucontext, mm1); 1569 mm2->key = uresp.rq_key; 1570 mm2->addr = vtophys(qhp->wq.rq.queue); 1571 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); 1572 CTR4(KTR_IW_CXGBE, "%s mm2 %x, %x, %d", __func__, mm2->key, 1573 mm2->addr, mm2->len); 1574 insert_mmap(ucontext, mm2); 1575 mm3->key = uresp.sq_db_gts_key; 1576 mm3->addr = qhp->wq.sq.udb; 1577 mm3->len = PAGE_SIZE; 1578 CTR4(KTR_IW_CXGBE, "%s mm3 %x, %x, %d", __func__, mm3->key, 1579 mm3->addr, mm3->len); 1580 insert_mmap(ucontext, mm3); 1581 mm4->key = uresp.rq_db_gts_key; 1582 mm4->addr = qhp->wq.rq.udb; 1583 mm4->len = PAGE_SIZE; 1584 CTR4(KTR_IW_CXGBE, "%s mm4 %x, %x, %d", __func__, mm4->key, 1585 mm4->addr, mm4->len); 1586 insert_mmap(ucontext, mm4); 1587 } 1588 qhp->ibqp.qp_num = qhp->wq.sq.qid; 1589 init_timer(&(qhp->timer)); 1590 CTR5(KTR_IW_CXGBE, 1591 "%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x", 1592 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, 1593 qhp->wq.sq.qid); 1594 return &qhp->ibqp; 1595err7: 1596 kfree(mm4); 1597err6: 1598 kfree(mm3); 1599err5: 1600 kfree(mm2); 1601err4: 1602 kfree(mm1); 1603err3: 1604 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); 1605err2: 1606 destroy_qp(&rhp->rdev, &qhp->wq, 1607 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 1608err1: 1609 kfree(qhp); 1610 return ERR_PTR(ret); 1611} 1612 1613int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1614 int attr_mask, struct ib_udata *udata) 1615{ 1616 struct c4iw_dev *rhp; 1617 struct c4iw_qp *qhp; 1618 enum c4iw_qp_attr_mask mask = 0; 1619 struct c4iw_qp_attributes attrs; 1620 1621 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ibqp); 1622 1623 /* iwarp does not support the RTR state */ 1624 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) 1625 attr_mask &= ~IB_QP_STATE; 1626 1627 /* Make sure we still have something left to do */ 1628 if (!attr_mask) 1629 return 0; 1630 1631 memset(&attrs, 0, sizeof attrs); 1632 qhp = to_c4iw_qp(ibqp); 1633 rhp = qhp->rhp; 1634 1635 attrs.next_state = c4iw_convert_state(attr->qp_state); 1636 attrs.enable_rdma_read = (attr->qp_access_flags & 1637 IB_ACCESS_REMOTE_READ) ? 1 : 0; 1638 attrs.enable_rdma_write = (attr->qp_access_flags & 1639 IB_ACCESS_REMOTE_WRITE) ? 1 : 0; 1640 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; 1641 1642 1643 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0; 1644 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? 1645 (C4IW_QP_ATTR_ENABLE_RDMA_READ | 1646 C4IW_QP_ATTR_ENABLE_RDMA_WRITE | 1647 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0; 1648 1649 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); 1650} 1651 1652struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn) 1653{ 1654 CTR3(KTR_IW_CXGBE, "%s ib_dev %p qpn 0x%x", __func__, dev, qpn); 1655 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); 1656} 1657 1658int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1659 int attr_mask, struct ib_qp_init_attr *init_attr) 1660{ 1661 struct c4iw_qp *qhp = to_c4iw_qp(ibqp); 1662 1663 memset(attr, 0, sizeof *attr); 1664 memset(init_attr, 0, sizeof *init_attr); 1665 attr->qp_state = to_ib_qp_state(qhp->attr.state); 1666 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; 1667 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; 1668 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; 1669 init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; 1670 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; 1671 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; 1672 return 0; 1673} 1674#endif 1675