qp.c revision 319255
1/* 2 * Copyright (c) 2006-2014 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32#if HAVE_CONFIG_H 33# include <config.h> 34#endif /* HAVE_CONFIG_H */ 35 36#include <assert.h> 37#include <stdlib.h> 38#include <pthread.h> 39#include <string.h> 40#include <stdio.h> 41#include <netinet/in.h> 42#include "libcxgb4.h" 43 44#ifdef STATS 45struct c4iw_stats c4iw_stats; 46#endif 47 48static void copy_wr_to_sq(struct t4_wq *wq, union t4_wr *wqe, u8 len16) 49{ 50 void *src, *dst; 51 uintptr_t end; 52 int total, len; 53 54 src = &wqe->flits[0]; 55 dst = &wq->sq.queue->flits[wq->sq.wq_pidx * 56 (T4_EQ_ENTRY_SIZE / sizeof(__be64))]; 57 if (t4_sq_onchip(wq)) { 58 len16 = align(len16, 4); 59 wc_wmb(); 60 } 61 62 total = len16 * 16; 63 end = (uintptr_t)&wq->sq.queue[wq->sq.size]; 64 if (__predict_true((uintptr_t)dst + total <= end)) { 65 /* Won't wrap around. */ 66 memcpy(dst, src, total); 67 } else { 68 len = end - (uintptr_t)dst; 69 memcpy(dst, src, len); 70 memcpy(wq->sq.queue, src + len, total - len); 71 } 72} 73 74static void copy_wr_to_rq(struct t4_wq *wq, union t4_recv_wr *wqe, u8 len16) 75{ 76 void *src, *dst; 77 uintptr_t end; 78 int total, len; 79 80 src = &wqe->flits[0]; 81 dst = &wq->rq.queue->flits[wq->rq.wq_pidx * 82 (T4_EQ_ENTRY_SIZE / sizeof(__be64))]; 83 84 total = len16 * 16; 85 end = (uintptr_t)&wq->rq.queue[wq->rq.size]; 86 if (__predict_true((uintptr_t)dst + total <= end)) { 87 /* Won't wrap around. */ 88 memcpy(dst, src, total); 89 } else { 90 len = end - (uintptr_t)dst; 91 memcpy(dst, src, len); 92 memcpy(wq->rq.queue, src + len, total - len); 93 } 94} 95 96static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, 97 struct ibv_send_wr *wr, int max, u32 *plenp) 98{ 99 u8 *dstp, *srcp; 100 u32 plen = 0; 101 int i; 102 int len; 103 104 dstp = (u8 *)immdp->data; 105 for (i = 0; i < wr->num_sge; i++) { 106 if ((plen + wr->sg_list[i].length) > max) 107 return -EMSGSIZE; 108 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; 109 plen += wr->sg_list[i].length; 110 len = wr->sg_list[i].length; 111 memcpy(dstp, srcp, len); 112 dstp += len; 113 srcp += len; 114 } 115 len = ROUND_UP(plen + 8, 16) - (plen + 8); 116 if (len) 117 memset(dstp, 0, len); 118 immdp->op = FW_RI_DATA_IMMD; 119 immdp->r1 = 0; 120 immdp->r2 = 0; 121 immdp->immdlen = cpu_to_be32(plen); 122 *plenp = plen; 123 return 0; 124} 125 126static int build_isgl(struct fw_ri_isgl *isglp, struct ibv_sge *sg_list, 127 int num_sge, u32 *plenp) 128{ 129 int i; 130 u32 plen = 0; 131 __be64 *flitp = (__be64 *)isglp->sge; 132 133 for (i = 0; i < num_sge; i++) { 134 if ((plen + sg_list[i].length) < plen) 135 return -EMSGSIZE; 136 plen += sg_list[i].length; 137 *flitp++ = cpu_to_be64(((u64)sg_list[i].lkey << 32) | 138 sg_list[i].length); 139 *flitp++ = cpu_to_be64(sg_list[i].addr); 140 } 141 *flitp = 0; 142 isglp->op = FW_RI_DATA_ISGL; 143 isglp->r1 = 0; 144 isglp->nsge = cpu_to_be16(num_sge); 145 isglp->r2 = 0; 146 if (plenp) 147 *plenp = plen; 148 return 0; 149} 150 151static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, 152 struct ibv_send_wr *wr, u8 *len16) 153{ 154 u32 plen; 155 int size; 156 int ret; 157 158 if (wr->num_sge > T4_MAX_SEND_SGE) 159 return -EINVAL; 160 if (wr->send_flags & IBV_SEND_SOLICITED) 161 wqe->send.sendop_pkd = cpu_to_be32( 162 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE)); 163 else 164 wqe->send.sendop_pkd = cpu_to_be32( 165 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND)); 166 wqe->send.stag_inv = 0; 167 wqe->send.r3 = 0; 168 wqe->send.r4 = 0; 169 170 plen = 0; 171 if (wr->num_sge) { 172 if (wr->send_flags & IBV_SEND_INLINE) { 173 ret = build_immd(sq, wqe->send.u.immd_src, wr, 174 T4_MAX_SEND_INLINE, &plen); 175 if (ret) 176 return ret; 177 size = sizeof wqe->send + sizeof(struct fw_ri_immd) + 178 plen; 179 } else { 180 ret = build_isgl(wqe->send.u.isgl_src, 181 wr->sg_list, wr->num_sge, &plen); 182 if (ret) 183 return ret; 184 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + 185 wr->num_sge * sizeof (struct fw_ri_sge); 186 } 187 } else { 188 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; 189 wqe->send.u.immd_src[0].r1 = 0; 190 wqe->send.u.immd_src[0].r2 = 0; 191 wqe->send.u.immd_src[0].immdlen = 0; 192 size = sizeof wqe->send + sizeof(struct fw_ri_immd); 193 plen = 0; 194 } 195 *len16 = DIV_ROUND_UP(size, 16); 196 wqe->send.plen = cpu_to_be32(plen); 197 return 0; 198} 199 200static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, 201 struct ibv_send_wr *wr, u8 *len16) 202{ 203 u32 plen; 204 int size; 205 int ret; 206 207 if (wr->num_sge > T4_MAX_SEND_SGE) 208 return -EINVAL; 209 wqe->write.r2 = 0; 210 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); 211 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); 212 if (wr->num_sge) { 213 if (wr->send_flags & IBV_SEND_INLINE) { 214 ret = build_immd(sq, wqe->write.u.immd_src, wr, 215 T4_MAX_WRITE_INLINE, &plen); 216 if (ret) 217 return ret; 218 size = sizeof wqe->write + sizeof(struct fw_ri_immd) + 219 plen; 220 } else { 221 ret = build_isgl(wqe->write.u.isgl_src, 222 wr->sg_list, wr->num_sge, &plen); 223 if (ret) 224 return ret; 225 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + 226 wr->num_sge * sizeof (struct fw_ri_sge); 227 } 228 } else { 229 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; 230 wqe->write.u.immd_src[0].r1 = 0; 231 wqe->write.u.immd_src[0].r2 = 0; 232 wqe->write.u.immd_src[0].immdlen = 0; 233 size = sizeof wqe->write + sizeof(struct fw_ri_immd); 234 plen = 0; 235 } 236 *len16 = DIV_ROUND_UP(size, 16); 237 wqe->write.plen = cpu_to_be32(plen); 238 return 0; 239} 240 241static int build_rdma_read(union t4_wr *wqe, struct ibv_send_wr *wr, u8 *len16) 242{ 243 if (wr->num_sge > 1) 244 return -EINVAL; 245 if (wr->num_sge) { 246 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey); 247 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr >>32)); 248 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr); 249 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); 250 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); 251 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr >> 32)); 252 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); 253 } else { 254 wqe->read.stag_src = cpu_to_be32(2); 255 wqe->read.to_src_hi = 0; 256 wqe->read.to_src_lo = 0; 257 wqe->read.stag_sink = cpu_to_be32(2); 258 wqe->read.plen = 0; 259 wqe->read.to_sink_hi = 0; 260 wqe->read.to_sink_lo = 0; 261 } 262 wqe->read.r2 = 0; 263 wqe->read.r5 = 0; 264 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16); 265 return 0; 266} 267 268static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, 269 struct ibv_recv_wr *wr, u8 *len16) 270{ 271 int ret; 272 273 ret = build_isgl(&wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); 274 if (ret) 275 return ret; 276 *len16 = DIV_ROUND_UP(sizeof wqe->recv + 277 wr->num_sge * sizeof(struct fw_ri_sge), 16); 278 return 0; 279} 280 281void dump_wqe(void *arg) 282{ 283 u64 *p = arg; 284 int len16; 285 286 len16 = be64_to_cpu(*p) & 0xff; 287 while (len16--) { 288 printf("%02x: %016lx ", (u8)(unsigned long)p, be64_to_cpu(*p)); 289 p++; 290 printf("%016lx\n", be64_to_cpu(*p)); 291 p++; 292 } 293} 294 295static void ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 idx) 296{ 297 struct ibv_modify_qp cmd; 298 struct ibv_qp_attr attr; 299 int mask; 300 int ret; 301 302 wc_wmb(); 303 if (qid == qhp->wq.sq.qid) { 304 attr.sq_psn = idx; 305 mask = IBV_QP_SQ_PSN; 306 } else { 307 attr.rq_psn = idx; 308 mask = IBV_QP_RQ_PSN; 309 } 310 ret = ibv_cmd_modify_qp(&qhp->ibv_qp, &attr, mask, &cmd, sizeof cmd); 311 assert(!ret); 312} 313 314int c4iw_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr, 315 struct ibv_send_wr **bad_wr) 316{ 317 int err = 0; 318 u8 len16; 319 enum fw_wr_opcodes fw_opcode; 320 enum fw_ri_wr_flags fw_flags; 321 struct c4iw_qp *qhp; 322 union t4_wr *wqe, lwqe; 323 u32 num_wrs; 324 struct t4_swsqe *swsqe; 325 u16 idx = 0; 326 327 qhp = to_c4iw_qp(ibqp); 328 pthread_spin_lock(&qhp->lock); 329 if (t4_wq_in_error(&qhp->wq)) { 330 pthread_spin_unlock(&qhp->lock); 331 return -EINVAL; 332 } 333 num_wrs = t4_sq_avail(&qhp->wq); 334 if (num_wrs == 0) { 335 pthread_spin_unlock(&qhp->lock); 336 return -ENOMEM; 337 } 338 while (wr) { 339 if (num_wrs == 0) { 340 err = -ENOMEM; 341 *bad_wr = wr; 342 break; 343 } 344 345 wqe = &lwqe; 346 fw_flags = 0; 347 if (wr->send_flags & IBV_SEND_SOLICITED) 348 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; 349 if (wr->send_flags & IBV_SEND_SIGNALED || qhp->sq_sig_all) 350 fw_flags |= FW_RI_COMPLETION_FLAG; 351 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; 352 switch (wr->opcode) { 353 case IBV_WR_SEND: 354 INC_STAT(send); 355 if (wr->send_flags & IBV_SEND_FENCE) 356 fw_flags |= FW_RI_READ_FENCE_FLAG; 357 fw_opcode = FW_RI_SEND_WR; 358 swsqe->opcode = FW_RI_SEND; 359 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); 360 break; 361 case IBV_WR_RDMA_WRITE: 362 INC_STAT(write); 363 fw_opcode = FW_RI_RDMA_WRITE_WR; 364 swsqe->opcode = FW_RI_RDMA_WRITE; 365 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); 366 break; 367 case IBV_WR_RDMA_READ: 368 INC_STAT(read); 369 fw_opcode = FW_RI_RDMA_READ_WR; 370 swsqe->opcode = FW_RI_READ_REQ; 371 fw_flags = 0; 372 err = build_rdma_read(wqe, wr, &len16); 373 if (err) 374 break; 375 swsqe->read_len = wr->sg_list ? wr->sg_list[0].length : 0; 376 if (!qhp->wq.sq.oldest_read) 377 qhp->wq.sq.oldest_read = swsqe; 378 break; 379 default: 380 PDBG("%s post of type=%d TBD!\n", __func__, 381 wr->opcode); 382 err = -EINVAL; 383 } 384 if (err) { 385 *bad_wr = wr; 386 break; 387 } 388 swsqe->idx = qhp->wq.sq.pidx; 389 swsqe->complete = 0; 390 swsqe->signaled = (wr->send_flags & IBV_SEND_SIGNALED) || 391 qhp->sq_sig_all; 392 swsqe->flushed = 0; 393 swsqe->wr_id = wr->wr_id; 394 395 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); 396 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x\n", 397 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, 398 swsqe->opcode); 399 wr = wr->next; 400 num_wrs--; 401 copy_wr_to_sq(&qhp->wq, wqe, len16); 402 t4_sq_produce(&qhp->wq, len16); 403 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); 404 } 405 406 t4_ring_sq_db(&qhp->wq, idx, dev_is_t4(qhp->rhp), 407 len16, wqe); 408 qhp->wq.sq.queue[qhp->wq.sq.size].status.host_wq_pidx = \ 409 (qhp->wq.sq.wq_pidx); 410 pthread_spin_unlock(&qhp->lock); 411 return err; 412} 413 414int c4iw_post_receive(struct ibv_qp *ibqp, struct ibv_recv_wr *wr, 415 struct ibv_recv_wr **bad_wr) 416{ 417 int err = 0; 418 struct c4iw_qp *qhp; 419 union t4_recv_wr *wqe, lwqe; 420 u32 num_wrs; 421 u8 len16 = 0; 422 u16 idx = 0; 423 424 qhp = to_c4iw_qp(ibqp); 425 pthread_spin_lock(&qhp->lock); 426 if (t4_wq_in_error(&qhp->wq)) { 427 pthread_spin_unlock(&qhp->lock); 428 return -EINVAL; 429 } 430 INC_STAT(recv); 431 num_wrs = t4_rq_avail(&qhp->wq); 432 if (num_wrs == 0) { 433 pthread_spin_unlock(&qhp->lock); 434 return -ENOMEM; 435 } 436 while (wr) { 437 if (wr->num_sge > T4_MAX_RECV_SGE) { 438 err = -EINVAL; 439 *bad_wr = wr; 440 break; 441 } 442 wqe = &lwqe; 443 if (num_wrs) 444 err = build_rdma_recv(qhp, wqe, wr, &len16); 445 else 446 err = -ENOMEM; 447 if (err) { 448 *bad_wr = wr; 449 break; 450 } 451 452 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; 453 454 wqe->recv.opcode = FW_RI_RECV_WR; 455 wqe->recv.r1 = 0; 456 wqe->recv.wrid = qhp->wq.rq.pidx; 457 wqe->recv.r2[0] = 0; 458 wqe->recv.r2[1] = 0; 459 wqe->recv.r2[2] = 0; 460 wqe->recv.len16 = len16; 461 PDBG("%s cookie 0x%llx pidx %u\n", __func__, 462 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); 463 copy_wr_to_rq(&qhp->wq, wqe, len16); 464 t4_rq_produce(&qhp->wq, len16); 465 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); 466 wr = wr->next; 467 num_wrs--; 468 } 469 470 t4_ring_rq_db(&qhp->wq, idx, dev_is_t4(qhp->rhp), 471 len16, wqe); 472 qhp->wq.rq.queue[qhp->wq.rq.size].status.host_wq_pidx = \ 473 (qhp->wq.rq.wq_pidx); 474 pthread_spin_unlock(&qhp->lock); 475 return err; 476} 477 478static void update_qp_state(struct c4iw_qp *qhp) 479{ 480 struct ibv_query_qp cmd; 481 struct ibv_qp_attr attr; 482 struct ibv_qp_init_attr iattr; 483 int ret; 484 485 ret = ibv_cmd_query_qp(&qhp->ibv_qp, &attr, IBV_QP_STATE, &iattr, 486 &cmd, sizeof cmd); 487 assert(!ret); 488 if (!ret) 489 qhp->ibv_qp.state = attr.qp_state; 490} 491 492/* 493 * Assumes qhp lock is held. 494 */ 495void c4iw_flush_qp(struct c4iw_qp *qhp) 496{ 497 struct c4iw_cq *rchp, *schp; 498 int count; 499 500 if (qhp->wq.flushed) 501 return; 502 503 update_qp_state(qhp); 504 505 rchp = to_c4iw_cq(qhp->ibv_qp.recv_cq); 506 schp = to_c4iw_cq(qhp->ibv_qp.send_cq); 507 508 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); 509 qhp->wq.flushed = 1; 510 pthread_spin_unlock(&qhp->lock); 511 512 /* locking heirarchy: cq lock first, then qp lock. */ 513 pthread_spin_lock(&rchp->lock); 514 pthread_spin_lock(&qhp->lock); 515 c4iw_flush_hw_cq(rchp); 516 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); 517 c4iw_flush_rq(&qhp->wq, &rchp->cq, count); 518 pthread_spin_unlock(&qhp->lock); 519 pthread_spin_unlock(&rchp->lock); 520 521 /* locking heirarchy: cq lock first, then qp lock. */ 522 pthread_spin_lock(&schp->lock); 523 pthread_spin_lock(&qhp->lock); 524 if (schp != rchp) 525 c4iw_flush_hw_cq(schp); 526 c4iw_flush_sq(qhp); 527 pthread_spin_unlock(&qhp->lock); 528 pthread_spin_unlock(&schp->lock); 529 pthread_spin_lock(&qhp->lock); 530} 531 532void c4iw_flush_qps(struct c4iw_dev *dev) 533{ 534 int i; 535 536 pthread_spin_lock(&dev->lock); 537 for (i=0; i < dev->max_qp; i++) { 538 struct c4iw_qp *qhp = dev->qpid2ptr[i]; 539 if (qhp) { 540 if (!qhp->wq.flushed && t4_wq_in_error(&qhp->wq)) { 541 pthread_spin_lock(&qhp->lock); 542 c4iw_flush_qp(qhp); 543 pthread_spin_unlock(&qhp->lock); 544 } 545 } 546 } 547 pthread_spin_unlock(&dev->lock); 548} 549