cq.c revision 275228
1/* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35#if HAVE_CONFIG_H 36# include <config.h> 37#endif /* HAVE_CONFIG_H */ 38 39#include <stdio.h> 40#include <stdlib.h> 41#include <pthread.h> 42#include <netinet/in.h> 43#include <string.h> 44 45#include <infiniband/opcode.h> 46 47#include "mlx4.h" 48#include "doorbell.h" 49 50enum { 51 MLX4_CQ_DOORBELL = 0x20 52}; 53 54enum { 55 CQ_OK = 0, 56 CQ_EMPTY = -1, 57 CQ_POLL_ERR = -2 58}; 59 60#define MLX4_CQ_DB_REQ_NOT_SOL (1 << 24) 61#define MLX4_CQ_DB_REQ_NOT (2 << 24) 62 63enum { 64 MLX4_CQE_OWNER_MASK = 0x80, 65 MLX4_CQE_IS_SEND_MASK = 0x40, 66 MLX4_CQE_OPCODE_MASK = 0x1f 67}; 68 69enum { 70 MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01, 71 MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02, 72 MLX4_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04, 73 MLX4_CQE_SYNDROME_WR_FLUSH_ERR = 0x05, 74 MLX4_CQE_SYNDROME_MW_BIND_ERR = 0x06, 75 MLX4_CQE_SYNDROME_BAD_RESP_ERR = 0x10, 76 MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11, 77 MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, 78 MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13, 79 MLX4_CQE_SYNDROME_REMOTE_OP_ERR = 0x14, 80 MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15, 81 MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16, 82 MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, 83}; 84 85struct mlx4_cqe { 86 uint32_t my_qpn; 87 uint32_t immed_rss_invalid; 88 uint32_t g_mlpath_rqpn; 89 uint8_t sl; 90 uint8_t reserved1; 91 uint16_t rlid; 92 uint32_t reserved2; 93 uint32_t byte_cnt; 94 uint16_t wqe_index; 95 uint16_t checksum; 96 uint8_t reserved3[3]; 97 uint8_t owner_sr_opcode; 98}; 99 100struct mlx4_err_cqe { 101 uint32_t my_qpn; 102 uint32_t reserved1[5]; 103 uint16_t wqe_index; 104 uint8_t vendor_err; 105 uint8_t syndrome; 106 uint8_t reserved2[3]; 107 uint8_t owner_sr_opcode; 108}; 109 110static struct mlx4_cqe *get_cqe(struct mlx4_cq *cq, int entry) 111{ 112 return cq->buf.buf + entry * cq->cqe_size; 113} 114 115static void *get_sw_cqe(struct mlx4_cq *cq, int n) 116{ 117 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibv_cq.cqe); 118 struct mlx4_cqe *tcqe = cq->cqe_size == 64 ? cqe + 1 : cqe; 119 120 return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^ 121 !!(n & (cq->ibv_cq.cqe + 1))) ? NULL : tcqe; 122} 123 124static struct mlx4_cqe *next_cqe_sw(struct mlx4_cq *cq) 125{ 126 return get_sw_cqe(cq, cq->cons_index); 127} 128 129static void update_cons_index(struct mlx4_cq *cq) 130{ 131 *cq->set_ci_db = htonl(cq->cons_index & 0xffffff); 132} 133 134static void mlx4_handle_error_cqe(struct mlx4_err_cqe *cqe, struct ibv_wc *wc) 135{ 136 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) 137 printf(PFX "local QP operation err " 138 "(QPN %06x, WQE index %x, vendor syndrome %02x, " 139 "opcode = %02x)\n", 140 htonl(cqe->my_qpn), htonl(cqe->wqe_index), 141 cqe->vendor_err, 142 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); 143 144 switch (cqe->syndrome) { 145 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR: 146 wc->status = IBV_WC_LOC_LEN_ERR; 147 break; 148 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR: 149 wc->status = IBV_WC_LOC_QP_OP_ERR; 150 break; 151 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR: 152 wc->status = IBV_WC_LOC_PROT_ERR; 153 break; 154 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR: 155 wc->status = IBV_WC_WR_FLUSH_ERR; 156 break; 157 case MLX4_CQE_SYNDROME_MW_BIND_ERR: 158 wc->status = IBV_WC_MW_BIND_ERR; 159 break; 160 case MLX4_CQE_SYNDROME_BAD_RESP_ERR: 161 wc->status = IBV_WC_BAD_RESP_ERR; 162 break; 163 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR: 164 wc->status = IBV_WC_LOC_ACCESS_ERR; 165 break; 166 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: 167 wc->status = IBV_WC_REM_INV_REQ_ERR; 168 break; 169 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR: 170 wc->status = IBV_WC_REM_ACCESS_ERR; 171 break; 172 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR: 173 wc->status = IBV_WC_REM_OP_ERR; 174 break; 175 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: 176 wc->status = IBV_WC_RETRY_EXC_ERR; 177 break; 178 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR: 179 wc->status = IBV_WC_RNR_RETRY_EXC_ERR; 180 break; 181 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR: 182 wc->status = IBV_WC_REM_ABORT_ERR; 183 break; 184 default: 185 wc->status = IBV_WC_GENERAL_ERR; 186 break; 187 } 188 189 wc->vendor_err = cqe->vendor_err; 190} 191 192static int mlx4_poll_one(struct mlx4_cq *cq, 193 struct mlx4_qp **cur_qp, 194 struct ibv_wc *wc) 195{ 196 struct mlx4_wq *wq; 197 struct mlx4_cqe *cqe; 198 struct mlx4_srq *srq = NULL; 199 uint32_t qpn; 200 uint32_t srqn; 201 uint32_t g_mlpath_rqpn; 202 uint16_t wqe_index; 203 int is_error; 204 int is_send; 205 206 cqe = next_cqe_sw(cq); 207 if (!cqe) 208 return CQ_EMPTY; 209 210 ++cq->cons_index; 211 212 VALGRIND_MAKE_MEM_DEFINED(cqe, sizeof *cqe); 213 214 /* 215 * Make sure we read CQ entry contents after we've checked the 216 * ownership bit. 217 */ 218 rmb(); 219 220 qpn = ntohl(cqe->my_qpn); 221 222 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK; 223 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 224 MLX4_CQE_OPCODE_ERROR; 225 226 if (qpn & MLX4_XRC_QPN_BIT && !is_send) { 227 srqn = ntohl(cqe->g_mlpath_rqpn) & 0xffffff; 228 /* 229 * We do not have to take the XRC SRQ table lock here, 230 * because CQs will be locked while XRC SRQs are removed 231 * from the table. 232 */ 233 srq = mlx4_find_xrc_srq(to_mctx(cq->ibv_cq.context), srqn); 234 if (!srq) 235 return CQ_POLL_ERR; 236 } else if (!*cur_qp || (qpn & 0xffffff) != (*cur_qp)->ibv_qp.qp_num) { 237 /* 238 * We do not have to take the QP table lock here, 239 * because CQs will be locked while QPs are removed 240 * from the table. 241 */ 242 *cur_qp = mlx4_find_qp(to_mctx(cq->ibv_cq.context), 243 qpn & 0xffffff); 244 if (!*cur_qp) 245 return CQ_POLL_ERR; 246 } 247 248 wc->qp_num = qpn & 0xffffff; 249 250 if (is_send) { 251 wq = &(*cur_qp)->sq; 252 wqe_index = ntohs(cqe->wqe_index); 253 wq->tail += (uint16_t) (wqe_index - (uint16_t) wq->tail); 254 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 255 ++wq->tail; 256 } else if (srq) { 257 wqe_index = htons(cqe->wqe_index); 258 wc->wr_id = srq->wrid[wqe_index]; 259 mlx4_free_srq_wqe(srq, wqe_index); 260 } else if ((*cur_qp)->ibv_qp.srq) { 261 srq = to_msrq((*cur_qp)->ibv_qp.srq); 262 wqe_index = htons(cqe->wqe_index); 263 wc->wr_id = srq->wrid[wqe_index]; 264 mlx4_free_srq_wqe(srq, wqe_index); 265 } else { 266 wq = &(*cur_qp)->rq; 267 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 268 ++wq->tail; 269 } 270 271 if (is_error) { 272 mlx4_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc); 273 return CQ_OK; 274 } 275 276 wc->status = IBV_WC_SUCCESS; 277 278 if (is_send) { 279 wc->wc_flags = 0; 280 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { 281 case MLX4_OPCODE_RDMA_WRITE_IMM: 282 wc->wc_flags |= IBV_WC_WITH_IMM; 283 case MLX4_OPCODE_RDMA_WRITE: 284 wc->opcode = IBV_WC_RDMA_WRITE; 285 break; 286 case MLX4_OPCODE_SEND_IMM: 287 wc->wc_flags |= IBV_WC_WITH_IMM; 288 case MLX4_OPCODE_SEND: 289 wc->opcode = IBV_WC_SEND; 290 break; 291 case MLX4_OPCODE_RDMA_READ: 292 wc->opcode = IBV_WC_RDMA_READ; 293 wc->byte_len = ntohl(cqe->byte_cnt); 294 break; 295 case MLX4_OPCODE_ATOMIC_CS: 296 wc->opcode = IBV_WC_COMP_SWAP; 297 wc->byte_len = 8; 298 break; 299 case MLX4_OPCODE_ATOMIC_FA: 300 wc->opcode = IBV_WC_FETCH_ADD; 301 wc->byte_len = 8; 302 break; 303 case MLX4_OPCODE_BIND_MW: 304 wc->opcode = IBV_WC_BIND_MW; 305 break; 306 default: 307 /* assume it's a send completion */ 308 wc->opcode = IBV_WC_SEND; 309 break; 310 } 311 } else { 312 wc->byte_len = ntohl(cqe->byte_cnt); 313 314 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { 315 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM: 316 wc->opcode = IBV_WC_RECV_RDMA_WITH_IMM; 317 wc->wc_flags = IBV_WC_WITH_IMM; 318 wc->imm_data = cqe->immed_rss_invalid; 319 break; 320 case MLX4_RECV_OPCODE_SEND: 321 wc->opcode = IBV_WC_RECV; 322 wc->wc_flags = 0; 323 break; 324 case MLX4_RECV_OPCODE_SEND_IMM: 325 wc->opcode = IBV_WC_RECV; 326 wc->wc_flags = IBV_WC_WITH_IMM; 327 wc->imm_data = cqe->immed_rss_invalid; 328 break; 329 } 330 331 wc->slid = ntohs(cqe->rlid); 332 wc->sl = cqe->sl >> 4; 333 g_mlpath_rqpn = ntohl(cqe->g_mlpath_rqpn); 334 wc->src_qp = g_mlpath_rqpn & 0xffffff; 335 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 336 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IBV_WC_GRH : 0; 337 wc->pkey_index = ntohl(cqe->immed_rss_invalid) & 0x7f; 338 } 339 340 return CQ_OK; 341} 342 343int mlx4_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc) 344{ 345 struct mlx4_cq *cq = to_mcq(ibcq); 346 struct mlx4_qp *qp = NULL; 347 int npolled; 348 int err = CQ_OK; 349 350 pthread_spin_lock(&cq->lock); 351 352 for (npolled = 0; npolled < ne; ++npolled) { 353 err = mlx4_poll_one(cq, &qp, wc + npolled); 354 if (err != CQ_OK) 355 break; 356 } 357 358 if (npolled) 359 update_cons_index(cq); 360 361 pthread_spin_unlock(&cq->lock); 362 363 return err == CQ_POLL_ERR ? err : npolled; 364} 365 366int mlx4_arm_cq(struct ibv_cq *ibvcq, int solicited) 367{ 368 struct mlx4_cq *cq = to_mcq(ibvcq); 369 uint32_t doorbell[2]; 370 uint32_t sn; 371 uint32_t ci; 372 uint32_t cmd; 373 374 sn = cq->arm_sn & 3; 375 ci = cq->cons_index & 0xffffff; 376 cmd = solicited ? MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT; 377 378 *cq->arm_db = htonl(sn << 28 | cmd | ci); 379 380 /* 381 * Make sure that the doorbell record in host memory is 382 * written before ringing the doorbell via PCI MMIO. 383 */ 384 wmb(); 385 386 doorbell[0] = htonl(sn << 28 | cmd | cq->cqn); 387 doorbell[1] = htonl(ci); 388 389 mlx4_write64(doorbell, to_mctx(ibvcq->context), MLX4_CQ_DOORBELL); 390 391 return 0; 392} 393 394void mlx4_cq_event(struct ibv_cq *cq) 395{ 396 to_mcq(cq)->arm_sn++; 397} 398 399void __mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq) 400{ 401 struct mlx4_cqe *cqe, *dest; 402 uint32_t prod_index; 403 uint8_t owner_bit; 404 int nfreed = 0; 405 int is_xrc_srq = 0; 406 int cqe_inc = cq->cqe_size == 64 ? 1 : 0; 407 408 if (srq && srq->ibv_srq.xrc_cq) 409 is_xrc_srq = 1; 410 411 /* 412 * First we need to find the current producer index, so we 413 * know where to start cleaning from. It doesn't matter if HW 414 * adds new entries after this loop -- the QP we're worried 415 * about is already in RESET, so the new entries won't come 416 * from our QP and therefore don't need to be checked. 417 */ 418 for (prod_index = cq->cons_index; get_sw_cqe(cq, prod_index); ++prod_index) 419 if (prod_index == cq->cons_index + cq->ibv_cq.cqe) 420 break; 421 422 /* 423 * Now sweep backwards through the CQ, removing CQ entries 424 * that match our QP by copying older entries on top of them. 425 */ 426 while ((int) --prod_index - (int) cq->cons_index >= 0) { 427 cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe); 428 cqe += cqe_inc; 429 if (is_xrc_srq && 430 (ntohl(cqe->g_mlpath_rqpn & 0xffffff) == srq->srqn) && 431 !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) { 432 mlx4_free_srq_wqe(srq, ntohs(cqe->wqe_index)); 433 ++nfreed; 434 } else if ((ntohl(cqe->my_qpn) & 0xffffff) == qpn) { 435 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) 436 mlx4_free_srq_wqe(srq, ntohs(cqe->wqe_index)); 437 ++nfreed; 438 } else if (nfreed) { 439 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibv_cq.cqe); 440 dest += cqe_inc; 441 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK; 442 memcpy(dest, cqe, sizeof *cqe); 443 dest->owner_sr_opcode = owner_bit | 444 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); 445 } 446 } 447 448 if (nfreed) { 449 cq->cons_index += nfreed; 450 /* 451 * Make sure update of buffer contents is done before 452 * updating consumer index. 453 */ 454 wmb(); 455 update_cons_index(cq); 456 } 457} 458 459void mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq) 460{ 461 pthread_spin_lock(&cq->lock); 462 __mlx4_cq_clean(cq, qpn, srq); 463 pthread_spin_unlock(&cq->lock); 464} 465 466int mlx4_get_outstanding_cqes(struct mlx4_cq *cq) 467{ 468 uint32_t i; 469 470 for (i = cq->cons_index; get_sw_cqe(cq, (i & cq->ibv_cq.cqe)); ++i) 471 ; 472 473 return i - cq->cons_index; 474} 475 476void mlx4_cq_resize_copy_cqes(struct mlx4_cq *cq, void *buf, int old_cqe) 477{ 478 struct mlx4_cqe *cqe; 479 int i; 480 int cqe_inc = cq->cqe_size == 64 ? 1 : 0; 481 482 i = cq->cons_index; 483 cqe = get_cqe(cq, (i & old_cqe)); 484 cqe += cqe_inc; 485 486 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) { 487 cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) | 488 (((i + 1) & (cq->ibv_cq.cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0); 489 memcpy(buf + ((i + 1) & cq->ibv_cq.cqe) * cq->cqe_size, 490 cqe - cqe_inc, cq->cqe_size); 491 ++i; 492 cqe = get_cqe(cq, (i & old_cqe)); 493 cqe += cqe_inc; 494 } 495 496 ++cq->cons_index; 497} 498 499int mlx4_alloc_cq_buf(struct mlx4_device *dev, struct mlx4_buf *buf, int nent, 500 int entry_size) 501{ 502 if (mlx4_alloc_buf(buf, align(nent * entry_size, dev->page_size), 503 dev->page_size)) 504 return -1; 505 memset(buf->buf, 0, nent * entry_size); 506 507 return 0; 508} 509