1/* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35#include <linux/init.h> 36#include <linux/errno.h> 37#include <linux/slab.h> 38#include <linux/kernel.h> 39#include <linux/vmalloc.h> 40 41#include <linux/mlx4/cmd.h> 42 43#include "mlx4.h" 44#include "icm.h" 45 46#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) 47#define MLX4_MPT_FLAG_FREE (0x3UL << 28) 48#define MLX4_MPT_FLAG_MIO (1 << 17) 49#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) 50#define MLX4_MPT_FLAG_PHYSICAL (1 << 9) 51#define MLX4_MPT_FLAG_REGION (1 << 8) 52 53#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) 54#define MLX4_MPT_PD_FLAG_RAE (1 << 28) 55#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) 56 57#define MLX4_MPT_STATUS_SW 0xF0 58#define MLX4_MPT_STATUS_HW 0x00 59 60static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) 61{ 62 int o; 63 int m; 64 u32 seg; 65 66 spin_lock(&buddy->lock); 67 68 for (o = order; o <= buddy->max_order; ++o) 69 if (buddy->num_free[o]) { 70 m = 1 << (buddy->max_order - o); 71 seg = find_first_bit(buddy->bits[o], m); 72 if (seg < m) 73 goto found; 74 } 75 76 spin_unlock(&buddy->lock); 77 return -1; 78 79 found: 80 clear_bit(seg, buddy->bits[o]); 81 --buddy->num_free[o]; 82 83 while (o > order) { 84 --o; 85 seg <<= 1; 86 set_bit(seg ^ 1, buddy->bits[o]); 87 ++buddy->num_free[o]; 88 } 89 90 spin_unlock(&buddy->lock); 91 92 seg <<= order; 93 94 return seg; 95} 96 97static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) 98{ 99 seg >>= order; 100 101 spin_lock(&buddy->lock); 102 103 while (test_bit(seg ^ 1, buddy->bits[order])) { 104 clear_bit(seg ^ 1, buddy->bits[order]); 105 --buddy->num_free[order]; 106 seg >>= 1; 107 ++order; 108 } 109 110 set_bit(seg, buddy->bits[order]); 111 ++buddy->num_free[order]; 112 113 spin_unlock(&buddy->lock); 114} 115 116static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) 117{ 118 int i, s; 119 120 buddy->max_order = max_order; 121 spin_lock_init(&buddy->lock); 122 123 buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *), 124 GFP_KERNEL); 125 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, 126 GFP_KERNEL); 127 if (!buddy->bits || !buddy->num_free) 128 goto err_out; 129 130 for (i = 0; i <= buddy->max_order; ++i) { 131 s = BITS_TO_LONGS(1 << (buddy->max_order - i)); 132 buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN); 133 if (!buddy->bits[i]) { 134 goto err_out_free; 135 } 136 } 137 138 set_bit(0, buddy->bits[buddy->max_order]); 139 buddy->num_free[buddy->max_order] = 1; 140 141 return 0; 142 143err_out_free: 144 for (i = 0; i <= buddy->max_order; ++i) 145 if ( buddy->bits[i] ) 146 kfree(buddy->bits[i]); 147 148err_out: 149 kfree(buddy->bits); 150 kfree(buddy->num_free); 151 152 return -ENOMEM; 153} 154 155static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) 156{ 157 int i; 158 159 for (i = 0; i <= buddy->max_order; ++i) 160 kfree(buddy->bits[i]); 161 162 kfree(buddy->bits); 163 kfree(buddy->num_free); 164} 165 166u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 167{ 168 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 169 u32 seg; 170 int seg_order; 171 u32 offset; 172 173 seg_order = max_t(int, order - log_mtts_per_seg, 0); 174 175 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order); 176 if (seg == -1) 177 return -1; 178 179 offset = seg * (1 << log_mtts_per_seg); 180 181 if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset, 182 offset + (1 << order) - 1)) { 183 mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order); 184 return -1; 185 } 186 187 return offset; 188} 189 190static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 191{ 192 u64 in_param = 0; 193 u64 out_param; 194 int err; 195 196 if (mlx4_is_mfunc(dev)) { 197 set_param_l(&in_param, order); 198 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT, 199 RES_OP_RESERVE_AND_MAP, 200 MLX4_CMD_ALLOC_RES, 201 MLX4_CMD_TIME_CLASS_A, 202 MLX4_CMD_WRAPPED); 203 if (err) 204 return -1; 205 return get_param_l(&out_param); 206 } 207 return __mlx4_alloc_mtt_range(dev, order); 208} 209 210int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, 211 struct mlx4_mtt *mtt) 212{ 213 int i; 214 215 if (!npages) { 216 mtt->order = -1; 217 mtt->page_shift = MLX4_ICM_PAGE_SHIFT; 218 return 0; 219 } else 220 mtt->page_shift = page_shift; 221 222 for (mtt->order = 0, i = 1; i < npages; i <<= 1) 223 ++mtt->order; 224 225 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); 226 if (mtt->offset == -1) { 227 mlx4_err(dev, "Failed to allocate mtts for %d pages(order %d)\n", 228 npages, mtt->order); 229 return -ENOMEM; 230 } 231 232 return 0; 233} 234EXPORT_SYMBOL_GPL(mlx4_mtt_init); 235 236void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) 237{ 238 u32 first_seg; 239 int seg_order; 240 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 241 242 seg_order = max_t(int, order - log_mtts_per_seg, 0); 243 first_seg = offset / (1 << log_mtts_per_seg); 244 245 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order); 246 mlx4_table_put_range(dev, &mr_table->mtt_table, offset, 247 offset + (1 << order) - 1); 248} 249 250static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) 251{ 252 u64 in_param = 0; 253 int err; 254 255 if (mlx4_is_mfunc(dev)) { 256 set_param_l(&in_param, offset); 257 set_param_h(&in_param, order); 258 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP, 259 MLX4_CMD_FREE_RES, 260 MLX4_CMD_TIME_CLASS_A, 261 MLX4_CMD_WRAPPED); 262 if (err) 263 mlx4_warn(dev, "Failed to free mtt range at:" 264 "%d order:%d\n", offset, order); 265 return; 266 } 267 __mlx4_free_mtt_range(dev, offset, order); 268} 269 270void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 271{ 272 if (mtt->order < 0) 273 return; 274 275 mlx4_free_mtt_range(dev, mtt->offset, mtt->order); 276} 277EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); 278 279u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 280{ 281 return (u64) mtt->offset * dev->caps.mtt_entry_sz; 282} 283EXPORT_SYMBOL_GPL(mlx4_mtt_addr); 284 285static u32 hw_index_to_key(u32 ind) 286{ 287 return (ind >> 24) | (ind << 8); 288} 289 290static u32 key_to_hw_index(u32 key) 291{ 292 return (key << 24) | (key >> 8); 293} 294 295static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 296 int mpt_index) 297{ 298 return mlx4_cmd(dev, mailbox->dma, mpt_index, 299 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B, 300 MLX4_CMD_WRAPPED); 301} 302 303static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 304 int mpt_index) 305{ 306 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, 307 !mailbox, MLX4_CMD_HW2SW_MPT, 308 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 309} 310 311static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, 312 u64 iova, u64 size, u32 access, int npages, 313 int page_shift, struct mlx4_mr *mr) 314{ 315 mr->iova = iova; 316 mr->size = size; 317 mr->pd = pd; 318 mr->access = access; 319 mr->enabled = MLX4_MR_DISABLED; 320 mr->key = hw_index_to_key(mridx); 321 322 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 323} 324 325static int mlx4_WRITE_MTT(struct mlx4_dev *dev, 326 struct mlx4_cmd_mailbox *mailbox, 327 int num_entries) 328{ 329 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT, 330 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 331} 332 333int __mlx4_mr_reserve(struct mlx4_dev *dev) 334{ 335 struct mlx4_priv *priv = mlx4_priv(dev); 336 337 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); 338} 339 340static int mlx4_mr_reserve(struct mlx4_dev *dev) 341{ 342 u64 out_param; 343 344 if (mlx4_is_mfunc(dev)) { 345 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE, 346 MLX4_CMD_ALLOC_RES, 347 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) 348 return -1; 349 return get_param_l(&out_param); 350 } 351 return __mlx4_mr_reserve(dev); 352} 353 354void __mlx4_mr_release(struct mlx4_dev *dev, u32 index) 355{ 356 struct mlx4_priv *priv = mlx4_priv(dev); 357 358 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); 359} 360 361static void mlx4_mr_release(struct mlx4_dev *dev, u32 index) 362{ 363 u64 in_param = 0; 364 365 if (mlx4_is_mfunc(dev)) { 366 set_param_l(&in_param, index); 367 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE, 368 MLX4_CMD_FREE_RES, 369 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) 370 mlx4_warn(dev, "Failed to release mr index:%d\n", 371 index); 372 return; 373 } 374 __mlx4_mr_release(dev, index); 375} 376 377int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) 378{ 379 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 380 381 return mlx4_table_get(dev, &mr_table->dmpt_table, index); 382} 383 384static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index) 385{ 386 u64 param = 0; 387 388 if (mlx4_is_mfunc(dev)) { 389 set_param_l(¶m, index); 390 return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM, 391 MLX4_CMD_ALLOC_RES, 392 MLX4_CMD_TIME_CLASS_A, 393 MLX4_CMD_WRAPPED); 394 } 395 return __mlx4_mr_alloc_icm(dev, index); 396} 397 398void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) 399{ 400 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 401 402 mlx4_table_put(dev, &mr_table->dmpt_table, index); 403} 404 405static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index) 406{ 407 u64 in_param = 0; 408 409 if (mlx4_is_mfunc(dev)) { 410 set_param_l(&in_param, index); 411 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM, 412 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 413 MLX4_CMD_WRAPPED)) 414 mlx4_warn(dev, "Failed to free icm of mr index:%d\n", 415 index); 416 return; 417 } 418 return __mlx4_mr_free_icm(dev, index); 419} 420 421int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, 422 int npages, int page_shift, struct mlx4_mr *mr) 423{ 424 u32 index; 425 int err; 426 427 index = mlx4_mr_reserve(dev); 428 if (index == -1) 429 return -ENOMEM; 430 431 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, 432 access, npages, page_shift, mr); 433 if (err) 434 mlx4_mr_release(dev, index); 435 436 return err; 437} 438EXPORT_SYMBOL_GPL(mlx4_mr_alloc); 439 440static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) 441{ 442 int err; 443 444 if (mr->enabled == MLX4_MR_EN_HW) { 445 err = mlx4_HW2SW_MPT(dev, NULL, 446 key_to_hw_index(mr->key) & 447 (dev->caps.num_mpts - 1)); 448 if (err) 449 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); 450 451 mr->enabled = MLX4_MR_EN_SW; 452 } 453 mlx4_mtt_cleanup(dev, &mr->mtt); 454} 455 456void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) 457{ 458 mlx4_mr_free_reserved(dev, mr); 459 if (mr->enabled) 460 mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); 461 mlx4_mr_release(dev, key_to_hw_index(mr->key)); 462} 463EXPORT_SYMBOL_GPL(mlx4_mr_free); 464 465int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) 466{ 467 struct mlx4_cmd_mailbox *mailbox; 468 struct mlx4_mpt_entry *mpt_entry; 469 int err; 470 471 err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key)); 472 if (err) 473 return err; 474 475 mailbox = mlx4_alloc_cmd_mailbox(dev); 476 if (IS_ERR(mailbox)) { 477 err = PTR_ERR(mailbox); 478 goto err_table; 479 } 480 mpt_entry = mailbox->buf; 481 482 memset(mpt_entry, 0, sizeof *mpt_entry); 483 484 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | 485 MLX4_MPT_FLAG_REGION | 486 mr->access); 487 488 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); 489 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV); 490 mpt_entry->start = cpu_to_be64(mr->iova); 491 mpt_entry->length = cpu_to_be64(mr->size); 492 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 493 494 if (mr->mtt.order < 0) { 495 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 496 mpt_entry->mtt_addr = 0; 497 } else { 498 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, 499 &mr->mtt)); 500 } 501 502 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { 503 /* fast register MR in free state */ 504 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); 505 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | 506 MLX4_MPT_PD_FLAG_RAE); 507 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); 508 } else { 509 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); 510 } 511 512 err = mlx4_SW2HW_MPT(dev, mailbox, 513 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); 514 if (err) { 515 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); 516 goto err_cmd; 517 } 518 mr->enabled = MLX4_MR_EN_HW; 519 520 mlx4_free_cmd_mailbox(dev, mailbox); 521 522 return 0; 523 524err_cmd: 525 mlx4_free_cmd_mailbox(dev, mailbox); 526 527err_table: 528 mlx4_mr_free_icm(dev, key_to_hw_index(mr->key)); 529 return err; 530} 531EXPORT_SYMBOL_GPL(mlx4_mr_enable); 532 533static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 534 int start_index, int npages, u64 *page_list) 535{ 536 struct mlx4_priv *priv = mlx4_priv(dev); 537 __be64 *mtts; 538 dma_addr_t dma_handle; 539 int i; 540 541 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset + 542 start_index, &dma_handle); 543 544 if (!mtts) 545 return -ENOMEM; 546 547 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, 548 npages * sizeof (u64), DMA_TO_DEVICE); 549 550 for (i = 0; i < npages; ++i) 551 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 552 553 dma_sync_single_for_device(&dev->pdev->dev, dma_handle, 554 npages * sizeof (u64), DMA_TO_DEVICE); 555 556 return 0; 557} 558 559int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 560 int start_index, int npages, u64 *page_list) 561{ 562 int err = 0; 563 int chunk; 564 int mtts_per_page; 565 int max_mtts_first_page; 566 567 /* compute how may mtts fit in the first page */ 568 mtts_per_page = PAGE_SIZE / sizeof(u64); 569 max_mtts_first_page = mtts_per_page - (mtt->offset + start_index) 570 % mtts_per_page; 571 572 chunk = min_t(int, max_mtts_first_page, npages); 573 574 while (npages > 0) { 575 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); 576 if (err) 577 return err; 578 npages -= chunk; 579 start_index += chunk; 580 page_list += chunk; 581 582 chunk = min_t(int, mtts_per_page, npages); 583 } 584 return err; 585} 586 587int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 588 int start_index, int npages, u64 *page_list) 589{ 590 struct mlx4_cmd_mailbox *mailbox = NULL; 591 __be64 *inbox = NULL; 592 int chunk; 593 int err = 0; 594 int i; 595 596 if (mtt->order < 0) 597 return -EINVAL; 598 599 if (mlx4_is_mfunc(dev)) { 600 mailbox = mlx4_alloc_cmd_mailbox(dev); 601 if (IS_ERR(mailbox)) 602 return PTR_ERR(mailbox); 603 inbox = mailbox->buf; 604 605 while (npages > 0) { 606 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2, 607 npages); 608 inbox[0] = cpu_to_be64(mtt->offset + start_index); 609 inbox[1] = 0; 610 for (i = 0; i < chunk; ++i) 611 inbox[i + 2] = cpu_to_be64(page_list[i] | 612 MLX4_MTT_FLAG_PRESENT); 613 err = mlx4_WRITE_MTT(dev, mailbox, chunk); 614 if (err) { 615 mlx4_free_cmd_mailbox(dev, mailbox); 616 return err; 617 } 618 619 npages -= chunk; 620 start_index += chunk; 621 page_list += chunk; 622 } 623 mlx4_free_cmd_mailbox(dev, mailbox); 624 return err; 625 } 626 627 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); 628} 629EXPORT_SYMBOL_GPL(mlx4_write_mtt); 630 631int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 632 struct mlx4_buf *buf) 633{ 634 u64 *page_list; 635 int err; 636 int i; 637 638 page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL); 639 if (!page_list) 640 return -ENOMEM; 641 642 for (i = 0; i < buf->npages; ++i) 643 if (buf->nbufs == 1) 644 page_list[i] = buf->direct.map + (i << buf->page_shift); 645 else 646 page_list[i] = buf->page_list[i].map; 647 648 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); 649 650 kfree(page_list); 651 return err; 652} 653EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); 654 655int mlx4_init_mr_table(struct mlx4_dev *dev) 656{ 657 struct mlx4_priv *priv = mlx4_priv(dev); 658 struct mlx4_mr_table *mr_table = &priv->mr_table; 659 int err; 660 661 /* Nothing to do for slaves - all MR handling is forwarded 662 * to the master */ 663 if (mlx4_is_slave(dev)) 664 return 0; 665 666 if (!is_power_of_2(dev->caps.num_mpts)) 667 return -EINVAL; 668 669 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, 670 ~0, dev->caps.reserved_mrws, 0); 671 if (err) 672 return err; 673 674 err = mlx4_buddy_init(&mr_table->mtt_buddy, 675 ilog2((u32)dev->caps.num_mtts / 676 (1 << log_mtts_per_seg))); 677 if (err) 678 goto err_buddy; 679 680 if (dev->caps.reserved_mtts) { 681 priv->reserved_mtts = 682 mlx4_alloc_mtt_range(dev, 683 fls(dev->caps.reserved_mtts - 1)); 684 if (priv->reserved_mtts < 0) { 685 mlx4_warn(dev, "MTT table of order %u is too small.\n", 686 mr_table->mtt_buddy.max_order); 687 err = -ENOMEM; 688 goto err_reserve_mtts; 689 } 690 } 691 692 return 0; 693 694err_reserve_mtts: 695 mlx4_buddy_cleanup(&mr_table->mtt_buddy); 696 697err_buddy: 698 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); 699 700 return err; 701} 702 703void mlx4_cleanup_mr_table(struct mlx4_dev *dev) 704{ 705 struct mlx4_priv *priv = mlx4_priv(dev); 706 struct mlx4_mr_table *mr_table = &priv->mr_table; 707 708 if (mlx4_is_slave(dev)) 709 return; 710 if (priv->reserved_mtts >= 0) 711 mlx4_free_mtt_range(dev, priv->reserved_mtts, 712 fls(dev->caps.reserved_mtts - 1)); 713 mlx4_buddy_cleanup(&mr_table->mtt_buddy); 714 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); 715} 716 717static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, 718 int npages, u64 iova) 719{ 720 int i, page_mask; 721 722 if (npages > fmr->max_pages) 723 return -EINVAL; 724 725 page_mask = (1 << fmr->page_shift) - 1; 726 727 /* We are getting page lists, so va must be page aligned. */ 728 if (iova & page_mask) 729 return -EINVAL; 730 731 /* Trust the user not to pass misaligned data in page_list */ 732 if (0) 733 for (i = 0; i < npages; ++i) { 734 if (page_list[i] & ~page_mask) 735 return -EINVAL; 736 } 737 738 if (fmr->maps >= fmr->max_maps) 739 return -EINVAL; 740 741 return 0; 742} 743 744int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, 745 int npages, u64 iova, u32 *lkey, u32 *rkey) 746{ 747 u32 key; 748 int i, err; 749 750 err = mlx4_check_fmr(fmr, page_list, npages, iova); 751 if (err) 752 return err; 753 754 ++fmr->maps; 755 756 key = key_to_hw_index(fmr->mr.key); 757 key += dev->caps.num_mpts; 758 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); 759 760 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; 761 762 /* Make sure MPT status is visible before writing MTT entries */ 763 wmb(); 764 765 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, 766 npages * sizeof(u64), DMA_TO_DEVICE); 767 768 for (i = 0; i < npages; ++i) 769 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 770 771 dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, 772 npages * sizeof(u64), DMA_TO_DEVICE); 773 774 fmr->mpt->key = cpu_to_be32(key); 775 fmr->mpt->lkey = cpu_to_be32(key); 776 fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift)); 777 fmr->mpt->start = cpu_to_be64(iova); 778 779 /* Make MTT entries are visible before setting MPT status */ 780 wmb(); 781 782 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW; 783 784 /* Make sure MPT status is visible before consumer can use FMR */ 785 wmb(); 786 787 return 0; 788} 789EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr); 790 791int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, 792 int max_maps, u8 page_shift, struct mlx4_fmr *fmr) 793{ 794 struct mlx4_priv *priv = mlx4_priv(dev); 795 int err = -ENOMEM; 796 797 if (max_maps > dev->caps.max_fmr_maps) 798 return -EINVAL; 799 800 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) 801 return -EINVAL; 802 803 /* All MTTs must fit in the same page */ 804 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) 805 return -EINVAL; 806 807 fmr->page_shift = page_shift; 808 fmr->max_pages = max_pages; 809 fmr->max_maps = max_maps; 810 fmr->maps = 0; 811 812 err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, 813 page_shift, &fmr->mr); 814 if (err) 815 return err; 816 817 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, 818 fmr->mr.mtt.offset, 819 &fmr->dma_handle); 820 821 if (!fmr->mtts) { 822 err = -ENOMEM; 823 goto err_free; 824 } 825 826 return 0; 827 828err_free: 829 mlx4_mr_free(dev, &fmr->mr); 830 return err; 831} 832EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); 833 834int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 835{ 836 struct mlx4_priv *priv = mlx4_priv(dev); 837 int err; 838 839 err = mlx4_mr_enable(dev, &fmr->mr); 840 if (err) 841 return err; 842 843 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table, 844 key_to_hw_index(fmr->mr.key), NULL); 845 if (!fmr->mpt) 846 return -ENOMEM; 847 848 return 0; 849} 850EXPORT_SYMBOL_GPL(mlx4_fmr_enable); 851 852void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, 853 u32 *lkey, u32 *rkey) 854{ 855 struct mlx4_cmd_mailbox *mailbox; 856 int err; 857 858 if (!fmr->maps) 859 return; 860 861 fmr->maps = 0; 862 863 mailbox = mlx4_alloc_cmd_mailbox(dev); 864 if (IS_ERR(mailbox)) { 865 err = PTR_ERR(mailbox); 866 mlx4_warn(dev, "mlx4_alloc_cmd_mailbox failed (%d)\n", err); 867 return; 868 } 869 870 err = mlx4_HW2SW_MPT(dev, NULL, 871 key_to_hw_index(fmr->mr.key) & 872 (dev->caps.num_mpts - 1)); 873 mlx4_free_cmd_mailbox(dev, mailbox); 874 if (err) { 875 mlx4_warn(dev, "mlx4_HW2SW_MPT failed (%d)\n", err); 876 return; 877 } 878 fmr->mr.enabled = MLX4_MR_EN_SW; 879} 880EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); 881 882int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 883{ 884 if (fmr->maps) 885 return -EBUSY; 886 887 mlx4_mr_free(dev, &fmr->mr); 888 fmr->mr.enabled = MLX4_MR_DISABLED; 889 890 return 0; 891} 892EXPORT_SYMBOL_GPL(mlx4_fmr_free); 893 894int mlx4_SYNC_TPT(struct mlx4_dev *dev) 895{ 896 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, 897 MLX4_CMD_NATIVE); 898} 899EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); 900