1/* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: mthca_srq.c,v 1.1.1.1 2007/08/03 18:52:32 Exp $ 33 */ 34 35#include <linux/slab.h> 36#include <linux/string.h> 37#include <linux/sched.h> 38 39#include <asm/io.h> 40 41#include "mthca_dev.h" 42#include "mthca_cmd.h" 43#include "mthca_memfree.h" 44#include "mthca_wqe.h" 45 46enum { 47 MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE 48}; 49 50struct mthca_tavor_srq_context { 51 __be64 wqe_base_ds; /* low 6 bits is descriptor size */ 52 __be32 state_pd; 53 __be32 lkey; 54 __be32 uar; 55 __be16 limit_watermark; 56 __be16 wqe_cnt; 57 u32 reserved[2]; 58}; 59 60struct mthca_arbel_srq_context { 61 __be32 state_logsize_srqn; 62 __be32 lkey; 63 __be32 db_index; 64 __be32 logstride_usrpage; 65 __be64 wqe_base; 66 __be32 eq_pd; 67 __be16 limit_watermark; 68 __be16 wqe_cnt; 69 u16 reserved1; 70 __be16 wqe_counter; 71 u32 reserved2[3]; 72}; 73 74static void *get_wqe(struct mthca_srq *srq, int n) 75{ 76 if (srq->is_direct) 77 return srq->queue.direct.buf + (n << srq->wqe_shift); 78 else 79 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + 80 ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); 81} 82 83/* 84 * Return a pointer to the location within a WQE that we're using as a 85 * link when the WQE is in the free list. We use the imm field 86 * because in the Tavor case, posting a WQE may overwrite the next 87 * segment of the previous WQE, but a receive WQE will never touch the 88 * imm field. This avoids corrupting our free list if the previous 89 * WQE has already completed and been put on the free list when we 90 * post the next WQE. 91 */ 92static inline int *wqe_to_link(void *wqe) 93{ 94 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); 95} 96 97static void mthca_tavor_init_srq_context(struct mthca_dev *dev, 98 struct mthca_pd *pd, 99 struct mthca_srq *srq, 100 struct mthca_tavor_srq_context *context) 101{ 102 memset(context, 0, sizeof *context); 103 104 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); 105 context->state_pd = cpu_to_be32(pd->pd_num); 106 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); 107 108 if (pd->ibpd.uobject) 109 context->uar = 110 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); 111 else 112 context->uar = cpu_to_be32(dev->driver_uar.index); 113} 114 115static void mthca_arbel_init_srq_context(struct mthca_dev *dev, 116 struct mthca_pd *pd, 117 struct mthca_srq *srq, 118 struct mthca_arbel_srq_context *context) 119{ 120 int logsize, max; 121 122 memset(context, 0, sizeof *context); 123 124 max = srq->max; 125 logsize = ilog2(max); 126 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); 127 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); 128 context->db_index = cpu_to_be32(srq->db_index); 129 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); 130 if (pd->ibpd.uobject) 131 context->logstride_usrpage |= 132 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); 133 else 134 context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index); 135 context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num); 136} 137 138static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) 139{ 140 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, 141 srq->is_direct, &srq->mr); 142 kfree(srq->wrid); 143} 144 145static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, 146 struct mthca_srq *srq) 147{ 148 struct mthca_data_seg *scatter; 149 void *wqe; 150 int err; 151 int i; 152 153 if (pd->ibpd.uobject) 154 return 0; 155 156 srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL); 157 if (!srq->wrid) 158 return -ENOMEM; 159 160 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, 161 MTHCA_MAX_DIRECT_SRQ_SIZE, 162 &srq->queue, &srq->is_direct, pd, 1, &srq->mr); 163 if (err) { 164 kfree(srq->wrid); 165 return err; 166 } 167 168 /* 169 * Now initialize the SRQ buffer so that all of the WQEs are 170 * linked into the list of free WQEs. In addition, set the 171 * scatter list L_Keys to the sentry value of 0x100. 172 */ 173 for (i = 0; i < srq->max; ++i) { 174 wqe = get_wqe(srq, i); 175 176 *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; 177 178 for (scatter = wqe + sizeof (struct mthca_next_seg); 179 (void *) scatter < wqe + (1 << srq->wqe_shift); 180 ++scatter) 181 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 182 } 183 184 srq->last = get_wqe(srq, srq->max - 1); 185 186 return 0; 187} 188 189int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, 190 struct ib_srq_attr *attr, struct mthca_srq *srq) 191{ 192 struct mthca_mailbox *mailbox; 193 u8 status; 194 int ds; 195 int err; 196 197 /* Sanity check SRQ size before proceeding */ 198 if (attr->max_wr > dev->limits.max_srq_wqes || 199 attr->max_sge > dev->limits.max_srq_sge) 200 return -EINVAL; 201 202 srq->max = attr->max_wr; 203 srq->max_gs = attr->max_sge; 204 srq->counter = 0; 205 206 if (mthca_is_memfree(dev)) 207 srq->max = roundup_pow_of_two(srq->max + 1); 208 else 209 srq->max = srq->max + 1; 210 211 ds = max(64UL, 212 roundup_pow_of_two(sizeof (struct mthca_next_seg) + 213 srq->max_gs * sizeof (struct mthca_data_seg))); 214 215 if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz)) 216 return -EINVAL; 217 218 srq->wqe_shift = ilog2(ds); 219 220 srq->srqn = mthca_alloc(&dev->srq_table.alloc); 221 if (srq->srqn == -1) 222 return -ENOMEM; 223 224 if (mthca_is_memfree(dev)) { 225 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); 226 if (err) 227 goto err_out; 228 229 if (!pd->ibpd.uobject) { 230 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, 231 srq->srqn, &srq->db); 232 if (srq->db_index < 0) { 233 err = -ENOMEM; 234 goto err_out_icm; 235 } 236 } 237 } 238 239 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 240 if (IS_ERR(mailbox)) { 241 err = PTR_ERR(mailbox); 242 goto err_out_db; 243 } 244 245 err = mthca_alloc_srq_buf(dev, pd, srq); 246 if (err) 247 goto err_out_mailbox; 248 249 spin_lock_init(&srq->lock); 250 srq->refcount = 1; 251 init_waitqueue_head(&srq->wait); 252 mutex_init(&srq->mutex); 253 254 if (mthca_is_memfree(dev)) 255 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); 256 else 257 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); 258 259 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); 260 261 if (err) { 262 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err); 263 goto err_out_free_buf; 264 } 265 if (status) { 266 mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n", 267 status); 268 err = -EINVAL; 269 goto err_out_free_buf; 270 } 271 272 spin_lock_irq(&dev->srq_table.lock); 273 if (mthca_array_set(&dev->srq_table.srq, 274 srq->srqn & (dev->limits.num_srqs - 1), 275 srq)) { 276 spin_unlock_irq(&dev->srq_table.lock); 277 goto err_out_free_srq; 278 } 279 spin_unlock_irq(&dev->srq_table.lock); 280 281 mthca_free_mailbox(dev, mailbox); 282 283 srq->first_free = 0; 284 srq->last_free = srq->max - 1; 285 286 attr->max_wr = srq->max - 1; 287 attr->max_sge = srq->max_gs; 288 289 return 0; 290 291err_out_free_srq: 292 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 293 if (err) 294 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 295 else if (status) 296 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 297 298err_out_free_buf: 299 if (!pd->ibpd.uobject) 300 mthca_free_srq_buf(dev, srq); 301 302err_out_mailbox: 303 mthca_free_mailbox(dev, mailbox); 304 305err_out_db: 306 if (!pd->ibpd.uobject && mthca_is_memfree(dev)) 307 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); 308 309err_out_icm: 310 mthca_table_put(dev, dev->srq_table.table, srq->srqn); 311 312err_out: 313 mthca_free(&dev->srq_table.alloc, srq->srqn); 314 315 return err; 316} 317 318static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) 319{ 320 int c; 321 322 spin_lock_irq(&dev->srq_table.lock); 323 c = srq->refcount; 324 spin_unlock_irq(&dev->srq_table.lock); 325 326 return c; 327} 328 329void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) 330{ 331 struct mthca_mailbox *mailbox; 332 int err; 333 u8 status; 334 335 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 336 if (IS_ERR(mailbox)) { 337 mthca_warn(dev, "No memory for mailbox to free SRQ.\n"); 338 return; 339 } 340 341 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 342 if (err) 343 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 344 else if (status) 345 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 346 347 spin_lock_irq(&dev->srq_table.lock); 348 mthca_array_clear(&dev->srq_table.srq, 349 srq->srqn & (dev->limits.num_srqs - 1)); 350 --srq->refcount; 351 spin_unlock_irq(&dev->srq_table.lock); 352 353 wait_event(srq->wait, !get_srq_refcount(dev, srq)); 354 355 if (!srq->ibsrq.uobject) { 356 mthca_free_srq_buf(dev, srq); 357 if (mthca_is_memfree(dev)) 358 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); 359 } 360 361 mthca_table_put(dev, dev->srq_table.table, srq->srqn); 362 mthca_free(&dev->srq_table.alloc, srq->srqn); 363 mthca_free_mailbox(dev, mailbox); 364} 365 366int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 367 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) 368{ 369 struct mthca_dev *dev = to_mdev(ibsrq->device); 370 struct mthca_srq *srq = to_msrq(ibsrq); 371 int ret; 372 u8 status; 373 374 /* We don't support resizing SRQs (yet?) */ 375 if (attr_mask & IB_SRQ_MAX_WR) 376 return -EINVAL; 377 378 if (attr_mask & IB_SRQ_LIMIT) { 379 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max; 380 if (attr->srq_limit > max_wr) 381 return -EINVAL; 382 383 mutex_lock(&srq->mutex); 384 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); 385 mutex_unlock(&srq->mutex); 386 387 if (ret) 388 return ret; 389 if (status) 390 return -EINVAL; 391 } 392 393 return 0; 394} 395 396int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) 397{ 398 struct mthca_dev *dev = to_mdev(ibsrq->device); 399 struct mthca_srq *srq = to_msrq(ibsrq); 400 struct mthca_mailbox *mailbox; 401 struct mthca_arbel_srq_context *arbel_ctx; 402 struct mthca_tavor_srq_context *tavor_ctx; 403 u8 status; 404 int err; 405 406 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 407 if (IS_ERR(mailbox)) 408 return PTR_ERR(mailbox); 409 410 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status); 411 if (err) 412 goto out; 413 414 if (mthca_is_memfree(dev)) { 415 arbel_ctx = mailbox->buf; 416 srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark); 417 } else { 418 tavor_ctx = mailbox->buf; 419 srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark); 420 } 421 422 srq_attr->max_wr = srq->max - 1; 423 srq_attr->max_sge = srq->max_gs; 424 425out: 426 mthca_free_mailbox(dev, mailbox); 427 428 return err; 429} 430 431void mthca_srq_event(struct mthca_dev *dev, u32 srqn, 432 enum ib_event_type event_type) 433{ 434 struct mthca_srq *srq; 435 struct ib_event event; 436 437 spin_lock(&dev->srq_table.lock); 438 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); 439 if (srq) 440 ++srq->refcount; 441 spin_unlock(&dev->srq_table.lock); 442 443 if (!srq) { 444 mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn); 445 return; 446 } 447 448 if (!srq->ibsrq.event_handler) 449 goto out; 450 451 event.device = &dev->ib_dev; 452 event.event = event_type; 453 event.element.srq = &srq->ibsrq; 454 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); 455 456out: 457 spin_lock(&dev->srq_table.lock); 458 if (!--srq->refcount) 459 wake_up(&srq->wait); 460 spin_unlock(&dev->srq_table.lock); 461} 462 463/* 464 * This function must be called with IRQs disabled. 465 */ 466void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) 467{ 468 int ind; 469 470 ind = wqe_addr >> srq->wqe_shift; 471 472 spin_lock(&srq->lock); 473 474 if (likely(srq->first_free >= 0)) 475 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; 476 else 477 srq->first_free = ind; 478 479 *wqe_to_link(get_wqe(srq, ind)) = -1; 480 srq->last_free = ind; 481 482 spin_unlock(&srq->lock); 483} 484 485int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 486 struct ib_recv_wr **bad_wr) 487{ 488 struct mthca_dev *dev = to_mdev(ibsrq->device); 489 struct mthca_srq *srq = to_msrq(ibsrq); 490 __be32 doorbell[2]; 491 unsigned long flags; 492 int err = 0; 493 int first_ind; 494 int ind; 495 int next_ind; 496 int nreq; 497 int i; 498 void *wqe; 499 void *prev_wqe; 500 501 spin_lock_irqsave(&srq->lock, flags); 502 503 first_ind = srq->first_free; 504 505 for (nreq = 0; wr; wr = wr->next) { 506 ind = srq->first_free; 507 508 if (ind < 0) { 509 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 510 err = -ENOMEM; 511 *bad_wr = wr; 512 break; 513 } 514 515 wqe = get_wqe(srq, ind); 516 next_ind = *wqe_to_link(wqe); 517 518 if (next_ind < 0) { 519 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 520 err = -ENOMEM; 521 *bad_wr = wr; 522 break; 523 } 524 525 prev_wqe = srq->last; 526 srq->last = wqe; 527 528 ((struct mthca_next_seg *) wqe)->nda_op = 0; 529 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 530 /* flags field will always remain 0 */ 531 532 wqe += sizeof (struct mthca_next_seg); 533 534 if (unlikely(wr->num_sge > srq->max_gs)) { 535 err = -EINVAL; 536 *bad_wr = wr; 537 srq->last = prev_wqe; 538 break; 539 } 540 541 for (i = 0; i < wr->num_sge; ++i) { 542 ((struct mthca_data_seg *) wqe)->byte_count = 543 cpu_to_be32(wr->sg_list[i].length); 544 ((struct mthca_data_seg *) wqe)->lkey = 545 cpu_to_be32(wr->sg_list[i].lkey); 546 ((struct mthca_data_seg *) wqe)->addr = 547 cpu_to_be64(wr->sg_list[i].addr); 548 wqe += sizeof (struct mthca_data_seg); 549 } 550 551 if (i < srq->max_gs) { 552 ((struct mthca_data_seg *) wqe)->byte_count = 0; 553 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 554 ((struct mthca_data_seg *) wqe)->addr = 0; 555 } 556 557 ((struct mthca_next_seg *) prev_wqe)->nda_op = 558 cpu_to_be32((ind << srq->wqe_shift) | 1); 559 wmb(); 560 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 561 cpu_to_be32(MTHCA_NEXT_DBD); 562 563 srq->wrid[ind] = wr->wr_id; 564 srq->first_free = next_ind; 565 566 ++nreq; 567 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { 568 nreq = 0; 569 570 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); 571 doorbell[1] = cpu_to_be32(srq->srqn << 8); 572 573 /* 574 * Make sure that descriptors are written 575 * before doorbell is rung. 576 */ 577 wmb(); 578 579 mthca_write64(doorbell, 580 dev->kar + MTHCA_RECEIVE_DOORBELL, 581 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 582 583 first_ind = srq->first_free; 584 } 585 } 586 587 if (likely(nreq)) { 588 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); 589 doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq); 590 591 /* 592 * Make sure that descriptors are written before 593 * doorbell is rung. 594 */ 595 wmb(); 596 597 mthca_write64(doorbell, 598 dev->kar + MTHCA_RECEIVE_DOORBELL, 599 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 600 } 601 602 /* 603 * Make sure doorbells don't leak out of SRQ spinlock and 604 * reach the HCA out of order: 605 */ 606 mmiowb(); 607 608 spin_unlock_irqrestore(&srq->lock, flags); 609 return err; 610} 611 612int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 613 struct ib_recv_wr **bad_wr) 614{ 615 struct mthca_dev *dev = to_mdev(ibsrq->device); 616 struct mthca_srq *srq = to_msrq(ibsrq); 617 unsigned long flags; 618 int err = 0; 619 int ind; 620 int next_ind; 621 int nreq; 622 int i; 623 void *wqe; 624 625 spin_lock_irqsave(&srq->lock, flags); 626 627 for (nreq = 0; wr; ++nreq, wr = wr->next) { 628 ind = srq->first_free; 629 630 if (ind < 0) { 631 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 632 err = -ENOMEM; 633 *bad_wr = wr; 634 break; 635 } 636 637 wqe = get_wqe(srq, ind); 638 next_ind = *wqe_to_link(wqe); 639 640 if (next_ind < 0) { 641 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 642 err = -ENOMEM; 643 *bad_wr = wr; 644 break; 645 } 646 647 ((struct mthca_next_seg *) wqe)->nda_op = 648 cpu_to_be32((next_ind << srq->wqe_shift) | 1); 649 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 650 /* flags field will always remain 0 */ 651 652 wqe += sizeof (struct mthca_next_seg); 653 654 if (unlikely(wr->num_sge > srq->max_gs)) { 655 err = -EINVAL; 656 *bad_wr = wr; 657 break; 658 } 659 660 for (i = 0; i < wr->num_sge; ++i) { 661 ((struct mthca_data_seg *) wqe)->byte_count = 662 cpu_to_be32(wr->sg_list[i].length); 663 ((struct mthca_data_seg *) wqe)->lkey = 664 cpu_to_be32(wr->sg_list[i].lkey); 665 ((struct mthca_data_seg *) wqe)->addr = 666 cpu_to_be64(wr->sg_list[i].addr); 667 wqe += sizeof (struct mthca_data_seg); 668 } 669 670 if (i < srq->max_gs) { 671 ((struct mthca_data_seg *) wqe)->byte_count = 0; 672 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 673 ((struct mthca_data_seg *) wqe)->addr = 0; 674 } 675 676 srq->wrid[ind] = wr->wr_id; 677 srq->first_free = next_ind; 678 } 679 680 if (likely(nreq)) { 681 srq->counter += nreq; 682 683 /* 684 * Make sure that descriptors are written before 685 * we write doorbell record. 686 */ 687 wmb(); 688 *srq->db = cpu_to_be32(srq->counter); 689 } 690 691 spin_unlock_irqrestore(&srq->lock, flags); 692 return err; 693} 694 695int mthca_max_srq_sge(struct mthca_dev *dev) 696{ 697 if (mthca_is_memfree(dev)) 698 return dev->limits.max_sg; 699 700 /* 701 * SRQ allocations are based on powers of 2 for Tavor, 702 * (although they only need to be multiples of 16 bytes). 703 * 704 * Therefore, we need to base the max number of sg entries on 705 * the largest power of 2 descriptor size that is <= to the 706 * actual max WQE descriptor size, rather than return the 707 * max_sg value given by the firmware (which is based on WQE 708 * sizes as multiples of 16, not powers of 2). 709 * 710 * If SRQ implementation is changed for Tavor to be based on 711 * multiples of 16, the calculation below can be deleted and 712 * the FW max_sg value returned. 713 */ 714 return min_t(int, dev->limits.max_sg, 715 ((1 << (fls(dev->limits.max_desc_sz) - 1)) - 716 sizeof (struct mthca_next_seg)) / 717 sizeof (struct mthca_data_seg)); 718} 719 720int mthca_init_srq_table(struct mthca_dev *dev) 721{ 722 int err; 723 724 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 725 return 0; 726 727 spin_lock_init(&dev->srq_table.lock); 728 729 err = mthca_alloc_init(&dev->srq_table.alloc, 730 dev->limits.num_srqs, 731 dev->limits.num_srqs - 1, 732 dev->limits.reserved_srqs); 733 if (err) 734 return err; 735 736 err = mthca_array_init(&dev->srq_table.srq, 737 dev->limits.num_srqs); 738 if (err) 739 mthca_alloc_cleanup(&dev->srq_table.alloc); 740 741 return err; 742} 743 744void mthca_cleanup_srq_table(struct mthca_dev *dev) 745{ 746 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 747 return; 748 749 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); 750 mthca_alloc_cleanup(&dev->srq_table.alloc); 751} 752