mcg.c revision 272407
1/* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008, 2014 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/string.h> 35#include <linux/etherdevice.h> 36 37#include <linux/mlx4/cmd.h> 38#include <linux/module.h> 39 40#include "mlx4.h" 41 42 43static const u8 zero_gid[16]; /* automatically initialized to 0 */ 44 45int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 46{ 47 return 1 << dev->oper_log_mgm_entry_size; 48} 49 50int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) 51{ 52 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); 53} 54 55static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, 56 struct mlx4_cmd_mailbox *mailbox, 57 u32 size, 58 u64 *reg_id) 59{ 60 u64 imm; 61 int err = 0; 62 63 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, 64 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 65 MLX4_CMD_NATIVE); 66 if (err) 67 return err; 68 *reg_id = imm; 69 70 return err; 71} 72 73static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) 74{ 75 int err = 0; 76 77 err = mlx4_cmd(dev, regid, 0, 0, 78 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 79 MLX4_CMD_NATIVE); 80 81 return err; 82} 83 84static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, 85 struct mlx4_cmd_mailbox *mailbox) 86{ 87 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 88 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 89} 90 91static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, 92 struct mlx4_cmd_mailbox *mailbox) 93{ 94 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 95 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 96} 97 98static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, 99 struct mlx4_cmd_mailbox *mailbox) 100{ 101 u32 in_mod; 102 103 in_mod = (u32) port << 16 | steer << 1; 104 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, 105 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, 106 MLX4_CMD_NATIVE); 107} 108 109static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 110 u16 *hash, u8 op_mod) 111{ 112 u64 imm; 113 int err; 114 115 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, 116 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, 117 MLX4_CMD_NATIVE); 118 119 if (!err) 120 *hash = imm; 121 122 return err; 123} 124 125static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port, 126 enum mlx4_steer_type steer, 127 u32 qpn) 128{ 129 struct mlx4_steer *s_steer; 130 struct mlx4_promisc_qp *pqp; 131 132 if (port < 1 || port > dev->caps.num_ports) 133 return NULL; 134 135 s_steer = &mlx4_priv(dev)->steer[port - 1]; 136 137 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 138 if (pqp->qpn == qpn) 139 return pqp; 140 } 141 /* not found */ 142 return NULL; 143} 144 145/* 146 * Add new entry to steering data structure. 147 * All promisc QPs should be added as well 148 */ 149static int new_steering_entry(struct mlx4_dev *dev, u8 port, 150 enum mlx4_steer_type steer, 151 unsigned int index, u32 qpn) 152{ 153 struct mlx4_steer *s_steer; 154 struct mlx4_cmd_mailbox *mailbox; 155 struct mlx4_mgm *mgm; 156 u32 members_count; 157 struct mlx4_steer_index *new_entry; 158 struct mlx4_promisc_qp *pqp; 159 struct mlx4_promisc_qp *dqp = NULL; 160 u32 prot; 161 int err; 162 163 if (port < 1 || port > dev->caps.num_ports) 164 return -EINVAL; 165 166 s_steer = &mlx4_priv(dev)->steer[port - 1]; 167 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); 168 if (!new_entry) 169 return -ENOMEM; 170 171 INIT_LIST_HEAD(&new_entry->duplicates); 172 new_entry->index = index; 173 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); 174 175 /* If the given qpn is also a promisc qp, 176 * it should be inserted to duplicates list 177 */ 178 pqp = get_promisc_qp(dev, port, steer, qpn); 179 if (pqp) { 180 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 181 if (!dqp) { 182 err = -ENOMEM; 183 goto out_alloc; 184 } 185 dqp->qpn = qpn; 186 list_add_tail(&dqp->list, &new_entry->duplicates); 187 } 188 189 /* if no promisc qps for this vep, we are done */ 190 if (list_empty(&s_steer->promisc_qps[steer])) 191 return 0; 192 193 /* now need to add all the promisc qps to the new 194 * steering entry, as they should also receive the packets 195 * destined to this address */ 196 mailbox = mlx4_alloc_cmd_mailbox(dev); 197 if (IS_ERR(mailbox)) { 198 err = -ENOMEM; 199 goto out_alloc; 200 } 201 mgm = mailbox->buf; 202 203 err = mlx4_READ_ENTRY(dev, index, mailbox); 204 if (err) 205 goto out_mailbox; 206 207 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 208 prot = be32_to_cpu(mgm->members_count) >> 30; 209 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 210 /* don't add already existing qpn */ 211 if (pqp->qpn == qpn) 212 continue; 213 if (members_count == dev->caps.num_qp_per_mgm) { 214 /* out of space */ 215 err = -ENOMEM; 216 goto out_mailbox; 217 } 218 219 /* add the qpn */ 220 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); 221 } 222 /* update the qps count and update the entry with all the promisc qps*/ 223 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 224 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 225 226out_mailbox: 227 mlx4_free_cmd_mailbox(dev, mailbox); 228 if (!err) 229 return 0; 230out_alloc: 231 if (dqp) { 232 list_del(&dqp->list); 233 kfree(dqp); 234 } 235 list_del(&new_entry->list); 236 kfree(new_entry); 237 return err; 238} 239 240/* update the data structures with existing steering entry */ 241static int existing_steering_entry(struct mlx4_dev *dev, u8 port, 242 enum mlx4_steer_type steer, 243 unsigned int index, u32 qpn) 244{ 245 struct mlx4_steer *s_steer; 246 struct mlx4_steer_index *tmp_entry, *entry = NULL; 247 struct mlx4_promisc_qp *pqp; 248 struct mlx4_promisc_qp *dqp; 249 250 if (port < 1 || port > dev->caps.num_ports) 251 return -EINVAL; 252 253 s_steer = &mlx4_priv(dev)->steer[port - 1]; 254 255 pqp = get_promisc_qp(dev, port, steer, qpn); 256 if (!pqp) 257 return 0; /* nothing to do */ 258 259 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 260 if (tmp_entry->index == index) { 261 entry = tmp_entry; 262 break; 263 } 264 } 265 if (unlikely(!entry)) { 266 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); 267 return -EINVAL; 268 } 269 270 /* the given qpn is listed as a promisc qpn 271 * we need to add it as a duplicate to this entry 272 * for future references */ 273 list_for_each_entry(dqp, &entry->duplicates, list) { 274 if (qpn == dqp->qpn) 275 return 0; /* qp is already duplicated */ 276 } 277 278 /* add the qp as a duplicate on this index */ 279 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 280 if (!dqp) 281 return -ENOMEM; 282 dqp->qpn = qpn; 283 list_add_tail(&dqp->list, &entry->duplicates); 284 285 return 0; 286} 287 288/* Check whether a qpn is a duplicate on steering entry 289 * If so, it should not be removed from mgm */ 290static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, 291 enum mlx4_steer_type steer, 292 unsigned int index, u32 qpn) 293{ 294 struct mlx4_steer *s_steer; 295 struct mlx4_steer_index *tmp_entry, *entry = NULL; 296 struct mlx4_promisc_qp *dqp, *tmp_dqp; 297 298 if (port < 1 || port > dev->caps.num_ports) 299 return NULL; 300 301 s_steer = &mlx4_priv(dev)->steer[port - 1]; 302 303 /* if qp is not promisc, it cannot be duplicated */ 304 if (!get_promisc_qp(dev, port, steer, qpn)) 305 return false; 306 307 /* The qp is promisc qp so it is a duplicate on this index 308 * Find the index entry, and remove the duplicate */ 309 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 310 if (tmp_entry->index == index) { 311 entry = tmp_entry; 312 break; 313 } 314 } 315 if (unlikely(!entry)) { 316 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); 317 return false; 318 } 319 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { 320 if (dqp->qpn == qpn) { 321 list_del(&dqp->list); 322 kfree(dqp); 323 } 324 } 325 return true; 326} 327 328/* 329 * returns true if all the QPs != tqpn contained in this entry 330 * are Promisc QPs. return false otherwise. 331 */ 332static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port, 333 enum mlx4_steer_type steer, 334 unsigned int index, u32 tqpn, u32 *members_count) 335{ 336 struct mlx4_steer *s_steer; 337 struct mlx4_cmd_mailbox *mailbox; 338 struct mlx4_mgm *mgm; 339 u32 m_count; 340 bool ret = false; 341 int i; 342 343 if (port < 1 || port > dev->caps.num_ports) 344 return false; 345 346 s_steer = &mlx4_priv(dev)->steer[port - 1]; 347 348 mailbox = mlx4_alloc_cmd_mailbox(dev); 349 if (IS_ERR(mailbox)) 350 return false; 351 mgm = mailbox->buf; 352 353 if (mlx4_READ_ENTRY(dev, index, mailbox)) 354 goto out; 355 m_count = be32_to_cpu(mgm->members_count) & 0xffffff; 356 if (members_count) 357 *members_count = m_count; 358 359 for (i = 0; i < m_count; i++) { 360 u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; 361 if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) { 362 /* the qp is not promisc, the entry can't be removed */ 363 goto out; 364 } 365 } 366 ret = true; 367out: 368 mlx4_free_cmd_mailbox(dev, mailbox); 369 return ret; 370} 371 372/* IF a steering entry contains only promisc QPs, it can be removed. */ 373static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, 374 enum mlx4_steer_type steer, 375 unsigned int index, u32 tqpn) 376{ 377 struct mlx4_steer *s_steer; 378 struct mlx4_steer_index *entry = NULL, *tmp_entry; 379 u32 members_count; 380 bool ret = false; 381 382 if (port < 1 || port > dev->caps.num_ports) 383 return NULL; 384 385 s_steer = &mlx4_priv(dev)->steer[port - 1]; 386 387 if (!promisc_steering_entry(dev, port, steer, index, tqpn, &members_count)) 388 goto out; 389 390 /* All the qps currently registered for this entry are promiscuous, 391 * Checking for duplicates */ 392 ret = true; 393 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { 394 if (entry->index == index) { 395 if (list_empty(&entry->duplicates) || members_count == 1) { 396 struct mlx4_promisc_qp *pqp, *tmp_pqp; 397 /* 398 * If there is only 1 entry in duplicates than 399 * this is the QP we want to delete, going over 400 * the list and deleting the entry. 401 */ 402 list_del(&entry->list); 403 list_for_each_entry_safe(pqp, tmp_pqp, 404 &entry->duplicates, 405 list) { 406 list_del(&pqp->list); 407 kfree(pqp); 408 } 409 kfree(entry); 410 } else { 411 /* This entry contains duplicates so it shouldn't be removed */ 412 ret = false; 413 goto out; 414 } 415 } 416 } 417 418out: 419 return ret; 420} 421 422static int add_promisc_qp(struct mlx4_dev *dev, u8 port, 423 enum mlx4_steer_type steer, u32 qpn) 424{ 425 struct mlx4_steer *s_steer; 426 struct mlx4_cmd_mailbox *mailbox; 427 struct mlx4_mgm *mgm; 428 struct mlx4_steer_index *entry; 429 struct mlx4_promisc_qp *pqp; 430 struct mlx4_promisc_qp *dqp; 431 u32 members_count; 432 u32 prot; 433 int i; 434 bool found; 435 int err; 436 struct mlx4_priv *priv = mlx4_priv(dev); 437 438 if (port < 1 || port > dev->caps.num_ports) 439 return -EINVAL; 440 441 s_steer = &mlx4_priv(dev)->steer[port - 1]; 442 443 mutex_lock(&priv->mcg_table.mutex); 444 445 if (get_promisc_qp(dev, port, steer, qpn)) { 446 err = 0; /* Noting to do, already exists */ 447 goto out_mutex; 448 } 449 450 pqp = kmalloc(sizeof *pqp, GFP_KERNEL); 451 if (!pqp) { 452 err = -ENOMEM; 453 goto out_mutex; 454 } 455 pqp->qpn = qpn; 456 457 mailbox = mlx4_alloc_cmd_mailbox(dev); 458 if (IS_ERR(mailbox)) { 459 err = -ENOMEM; 460 goto out_alloc; 461 } 462 mgm = mailbox->buf; 463 464 if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) { 465 /* the promisc qp needs to be added for each one of the steering 466 * entries, if it already exists, needs to be added as a duplicate 467 * for this entry */ 468 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 469 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 470 if (err) 471 goto out_mailbox; 472 473 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 474 prot = be32_to_cpu(mgm->members_count) >> 30; 475 found = false; 476 for (i = 0; i < members_count; i++) { 477 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { 478 /* Entry already exists, add to duplicates */ 479 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 480 if (!dqp) { 481 err = -ENOMEM; 482 goto out_mailbox; 483 } 484 dqp->qpn = qpn; 485 list_add_tail(&dqp->list, &entry->duplicates); 486 found = true; 487 } 488 } 489 if (!found) { 490 /* Need to add the qpn to mgm */ 491 if (members_count == dev->caps.num_qp_per_mgm) { 492 /* entry is full */ 493 err = -ENOMEM; 494 goto out_mailbox; 495 } 496 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK); 497 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 498 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 499 if (err) 500 goto out_mailbox; 501 } 502 } 503 } 504 505 /* add the new qpn to list of promisc qps */ 506 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 507 /* now need to add all the promisc qps to default entry */ 508 memset(mgm, 0, sizeof *mgm); 509 members_count = 0; 510 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) { 511 if (members_count == dev->caps.num_qp_per_mgm) { 512 /* entry is full */ 513 err = -ENOMEM; 514 goto out_list; 515 } 516 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 517 } 518 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 519 520 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 521 if (err) 522 goto out_list; 523 524 mlx4_free_cmd_mailbox(dev, mailbox); 525 mutex_unlock(&priv->mcg_table.mutex); 526 return 0; 527 528out_list: 529 list_del(&pqp->list); 530out_mailbox: 531 mlx4_free_cmd_mailbox(dev, mailbox); 532out_alloc: 533 kfree(pqp); 534out_mutex: 535 mutex_unlock(&priv->mcg_table.mutex); 536 return err; 537} 538 539static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, 540 enum mlx4_steer_type steer, u32 qpn) 541{ 542 struct mlx4_priv *priv = mlx4_priv(dev); 543 struct mlx4_steer *s_steer; 544 struct mlx4_cmd_mailbox *mailbox; 545 struct mlx4_mgm *mgm; 546 struct mlx4_steer_index *entry, *tmp_entry; 547 struct mlx4_promisc_qp *pqp; 548 struct mlx4_promisc_qp *dqp; 549 u32 members_count; 550 bool found; 551 bool back_to_list = false; 552 int i, loc = -1; 553 int err; 554 555 if (port < 1 || port > dev->caps.num_ports) 556 return -EINVAL; 557 558 s_steer = &mlx4_priv(dev)->steer[port - 1]; 559 mutex_lock(&priv->mcg_table.mutex); 560 561 pqp = get_promisc_qp(dev, port, steer, qpn); 562 if (unlikely(!pqp)) { 563 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); 564 /* nothing to do */ 565 err = 0; 566 goto out_mutex; 567 } 568 569 /*remove from list of promisc qps */ 570 list_del(&pqp->list); 571 572 /* set the default entry not to include the removed one */ 573 mailbox = mlx4_alloc_cmd_mailbox(dev); 574 if (IS_ERR(mailbox)) { 575 err = -ENOMEM; 576 back_to_list = true; 577 goto out_list; 578 } 579 mgm = mailbox->buf; 580 memset(mgm, 0, sizeof *mgm); 581 members_count = 0; 582 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 583 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 584 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 585 586 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 587 if (err) 588 goto out_mailbox; 589 590 if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) { 591 /* remove the qp from all the steering entries*/ 592 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { 593 found = false; 594 list_for_each_entry(dqp, &entry->duplicates, list) { 595 if (dqp->qpn == qpn) { 596 found = true; 597 break; 598 } 599 } 600 if (found) { 601 /* a duplicate, no need to change the mgm, 602 * only update the duplicates list */ 603 list_del(&dqp->list); 604 kfree(dqp); 605 } else { 606 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 607 if (err) 608 goto out_mailbox; 609 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 610 if (!members_count) { 611 mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0." 612 " deleting entry...\n", qpn, entry->index); 613 list_del(&entry->list); 614 kfree(entry); 615 continue; 616 } 617 618 for (i = 0; i < members_count; ++i) 619 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { 620 loc = i; 621 break; 622 } 623 624 if (loc < 0) { 625 mlx4_err(dev, "QP %06x wasn't found in entry %d\n", 626 qpn, entry->index); 627 err = -EINVAL; 628 goto out_mailbox; 629 } 630 631 /* copy the last QP in this MGM over removed QP */ 632 mgm->qp[loc] = mgm->qp[members_count - 1]; 633 mgm->qp[members_count - 1] = 0; 634 mgm->members_count = cpu_to_be32(--members_count | 635 (MLX4_PROT_ETH << 30)); 636 637 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 638 if (err) 639 goto out_mailbox; 640 } 641 } 642 } 643 644out_mailbox: 645 mlx4_free_cmd_mailbox(dev, mailbox); 646out_list: 647 if (back_to_list) 648 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 649 else 650 kfree(pqp); 651out_mutex: 652 mutex_unlock(&priv->mcg_table.mutex); 653 return err; 654} 655 656/* 657 * Caller must hold MCG table semaphore. gid and mgm parameters must 658 * be properly aligned for command interface. 659 * 660 * Returns 0 unless a firmware command error occurs. 661 * 662 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 663 * and *mgm holds MGM entry. 664 * 665 * if GID is found in AMGM, *index = index in AMGM, *prev = index of 666 * previous entry in hash chain and *mgm holds AMGM entry. 667 * 668 * If no AMGM exists for given gid, *index = -1, *prev = index of last 669 * entry in hash chain and *mgm holds end of hash chain. 670 */ 671static int find_entry(struct mlx4_dev *dev, u8 port, 672 u8 *gid, enum mlx4_protocol prot, 673 struct mlx4_cmd_mailbox *mgm_mailbox, 674 int *prev, int *index) 675{ 676 struct mlx4_cmd_mailbox *mailbox; 677 struct mlx4_mgm *mgm = mgm_mailbox->buf; 678 u8 *mgid; 679 int err; 680 u16 hash; 681 u8 op_mod = (prot == MLX4_PROT_ETH) ? 682 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; 683 684 mailbox = mlx4_alloc_cmd_mailbox(dev); 685 if (IS_ERR(mailbox)) 686 return -ENOMEM; 687 mgid = mailbox->buf; 688 689 memcpy(mgid, gid, 16); 690 691 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod); 692 mlx4_free_cmd_mailbox(dev, mailbox); 693 if (err) 694 return err; 695 696 if (0) 697 mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash); 698 699 *index = hash; 700 *prev = -1; 701 702 do { 703 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); 704 if (err) 705 return err; 706 707 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 708 if (*index != hash) { 709 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 710 err = -EINVAL; 711 } 712 return err; 713 } 714 715 if (!memcmp(mgm->gid, gid, 16) && 716 be32_to_cpu(mgm->members_count) >> 30 == prot) 717 return err; 718 719 *prev = *index; 720 *index = be32_to_cpu(mgm->next_gid_index) >> 6; 721 } while (*index); 722 723 *index = -1; 724 return err; 725} 726 727static const u8 __promisc_mode[] = { 728 [MLX4_FS_REGULAR] = 0x0, 729 [MLX4_FS_ALL_DEFAULT] = 0x1, 730 [MLX4_FS_MC_DEFAULT] = 0x3, 731 [MLX4_FS_UC_SNIFFER] = 0x4, 732 [MLX4_FS_MC_SNIFFER] = 0x5, 733}; 734 735int map_sw_to_hw_steering_mode(struct mlx4_dev *dev, 736 enum mlx4_net_trans_promisc_mode flow_type) 737{ 738 if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) { 739 mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); 740 return -EINVAL; 741 } 742 return __promisc_mode[flow_type]; 743} 744EXPORT_SYMBOL_GPL(map_sw_to_hw_steering_mode); 745 746static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, 747 struct mlx4_net_trans_rule_hw_ctrl *hw) 748{ 749 u8 flags = 0; 750 751 flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; 752 flags |= ctrl->exclusive ? (1 << 2) : 0; 753 flags |= ctrl->allow_loopback ? (1 << 3) : 0; 754 755 hw->flags = flags; 756 hw->type = __promisc_mode[ctrl->promisc_mode]; 757 hw->prio = cpu_to_be16(ctrl->priority); 758 hw->port = ctrl->port; 759 hw->qpn = cpu_to_be32(ctrl->qpn); 760} 761 762const u16 __sw_id_hw[] = { 763 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, 764 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, 765 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, 766 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, 767 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, 768 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 769}; 770 771int map_sw_to_hw_steering_id(struct mlx4_dev *dev, 772 enum mlx4_net_trans_rule_id id) 773{ 774 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { 775 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 776 return -EINVAL; 777 } 778 return __sw_id_hw[id]; 779} 780EXPORT_SYMBOL_GPL(map_sw_to_hw_steering_id); 781 782static const int __rule_hw_sz[] = { 783 [MLX4_NET_TRANS_RULE_ID_ETH] = 784 sizeof(struct mlx4_net_trans_rule_hw_eth), 785 [MLX4_NET_TRANS_RULE_ID_IB] = 786 sizeof(struct mlx4_net_trans_rule_hw_ib), 787 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, 788 [MLX4_NET_TRANS_RULE_ID_IPV4] = 789 sizeof(struct mlx4_net_trans_rule_hw_ipv4), 790 [MLX4_NET_TRANS_RULE_ID_TCP] = 791 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), 792 [MLX4_NET_TRANS_RULE_ID_UDP] = 793 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) 794}; 795 796int hw_rule_sz(struct mlx4_dev *dev, 797 enum mlx4_net_trans_rule_id id) 798{ 799 if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { 800 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 801 return -EINVAL; 802 } 803 804 return __rule_hw_sz[id]; 805} 806EXPORT_SYMBOL_GPL(hw_rule_sz); 807 808static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, 809 struct _rule_hw *rule_hw) 810{ 811 if (hw_rule_sz(dev, spec->id) < 0) 812 return -EINVAL; 813 memset(rule_hw, 0, hw_rule_sz(dev, spec->id)); 814 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); 815 rule_hw->size = hw_rule_sz(dev, spec->id) >> 2; 816 817 switch (spec->id) { 818 case MLX4_NET_TRANS_RULE_ID_ETH: 819 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); 820 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, 821 ETH_ALEN); 822 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); 823 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, 824 ETH_ALEN); 825 if (spec->eth.ether_type_enable) { 826 rule_hw->eth.ether_type_enable = 1; 827 rule_hw->eth.ether_type = spec->eth.ether_type; 828 } 829 rule_hw->eth.vlan_tag = spec->eth.vlan_id; 830 rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk; 831 break; 832 833 case MLX4_NET_TRANS_RULE_ID_IB: 834 rule_hw->ib.l3_qpn = spec->ib.l3_qpn; 835 rule_hw->ib.qpn_mask = spec->ib.qpn_msk; 836 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); 837 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); 838 break; 839 840 case MLX4_NET_TRANS_RULE_ID_IPV6: 841 return -EOPNOTSUPP; 842 843 case MLX4_NET_TRANS_RULE_ID_IPV4: 844 rule_hw->ipv4.src_ip = spec->ipv4.src_ip; 845 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; 846 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; 847 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; 848 break; 849 850 case MLX4_NET_TRANS_RULE_ID_TCP: 851 case MLX4_NET_TRANS_RULE_ID_UDP: 852 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; 853 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; 854 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; 855 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; 856 break; 857 858 default: 859 return -EINVAL; 860 } 861 862 return __rule_hw_sz[spec->id]; 863} 864 865static void mlx4_err_rule(struct mlx4_dev *dev, char *str, 866 struct mlx4_net_trans_rule *rule) 867{ 868#define BUF_SIZE 256 869 struct mlx4_spec_list *cur; 870 char buf[BUF_SIZE]; 871 int len = 0; 872 873 mlx4_err(dev, "%s", str); 874 len += snprintf(buf + len, BUF_SIZE - len, 875 "port = %d prio = 0x%x qp = 0x%x ", 876 rule->port, rule->priority, rule->qpn); 877 878 list_for_each_entry(cur, &rule->list, list) { 879 switch (cur->id) { 880 case MLX4_NET_TRANS_RULE_ID_ETH: 881 len += snprintf(buf + len, BUF_SIZE - len, 882 "dmac = %pM ", &cur->eth.dst_mac); 883 if (cur->eth.ether_type) 884 len += snprintf(buf + len, BUF_SIZE - len, 885 "ethertype = 0x%x ", 886 be16_to_cpu(cur->eth.ether_type)); 887 if (cur->eth.vlan_id) 888 len += snprintf(buf + len, BUF_SIZE - len, 889 "vlan-id = %d ", 890 be16_to_cpu(cur->eth.vlan_id)); 891 break; 892 893 case MLX4_NET_TRANS_RULE_ID_IPV4: 894 if (cur->ipv4.src_ip) 895 len += snprintf(buf + len, BUF_SIZE - len, 896 "src-ip = %pI4 ", 897 &cur->ipv4.src_ip); 898 if (cur->ipv4.dst_ip) 899 len += snprintf(buf + len, BUF_SIZE - len, 900 "dst-ip = %pI4 ", 901 &cur->ipv4.dst_ip); 902 break; 903 904 case MLX4_NET_TRANS_RULE_ID_TCP: 905 case MLX4_NET_TRANS_RULE_ID_UDP: 906 if (cur->tcp_udp.src_port) 907 len += snprintf(buf + len, BUF_SIZE - len, 908 "src-port = %d ", 909 be16_to_cpu(cur->tcp_udp.src_port)); 910 if (cur->tcp_udp.dst_port) 911 len += snprintf(buf + len, BUF_SIZE - len, 912 "dst-port = %d ", 913 be16_to_cpu(cur->tcp_udp.dst_port)); 914 break; 915 916 case MLX4_NET_TRANS_RULE_ID_IB: 917 len += snprintf(buf + len, BUF_SIZE - len, 918 "dst-gid = %pI6\n", cur->ib.dst_gid); 919 len += snprintf(buf + len, BUF_SIZE - len, 920 "dst-gid-mask = %pI6\n", 921 cur->ib.dst_gid_msk); 922 break; 923 924 case MLX4_NET_TRANS_RULE_ID_IPV6: 925 break; 926 927 default: 928 break; 929 } 930 } 931 len += snprintf(buf + len, BUF_SIZE - len, "\n"); 932 mlx4_err(dev, "%s", buf); 933 934 if (len >= BUF_SIZE) 935 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); 936} 937 938int mlx4_flow_attach(struct mlx4_dev *dev, 939 struct mlx4_net_trans_rule *rule, u64 *reg_id) 940{ 941 struct mlx4_cmd_mailbox *mailbox; 942 struct mlx4_spec_list *cur; 943 u32 size = 0; 944 int ret; 945 946 mailbox = mlx4_alloc_cmd_mailbox(dev); 947 if (IS_ERR(mailbox)) 948 return PTR_ERR(mailbox); 949 950 memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl)); 951 trans_rule_ctrl_to_hw(rule, mailbox->buf); 952 953 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 954 955 list_for_each_entry(cur, &rule->list, list) { 956 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 957 if (ret < 0) { 958 mlx4_free_cmd_mailbox(dev, mailbox); 959 return -EINVAL; 960 } 961 size += ret; 962 } 963 964 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); 965 if (ret == -ENOMEM) 966 mlx4_err_rule(dev, 967 "mcg table is full. Fail to register network rule.\n", 968 rule); 969 else if (ret) 970 mlx4_err_rule(dev, "Fail to register network rule.\n", rule); 971 972 mlx4_free_cmd_mailbox(dev, mailbox); 973 974 return ret; 975} 976EXPORT_SYMBOL_GPL(mlx4_flow_attach); 977 978int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) 979{ 980 int err; 981 982 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id); 983 if (err) 984 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n", 985 (unsigned long long)reg_id); 986 return err; 987} 988EXPORT_SYMBOL_GPL(mlx4_flow_detach); 989 990int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn) 991{ 992 int err; 993 u64 in_param; 994 995 in_param = ((u64) min_range_qpn) << 32; 996 in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF; 997 998 err = mlx4_cmd(dev, in_param, 0, 0, 999 MLX4_FLOW_STEERING_IB_UC_QP_RANGE, 1000 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1001 1002 return err; 1003} 1004EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE); 1005 1006int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1007 int block_mcast_loopback, enum mlx4_protocol prot, 1008 enum mlx4_steer_type steer) 1009{ 1010 struct mlx4_priv *priv = mlx4_priv(dev); 1011 struct mlx4_cmd_mailbox *mailbox; 1012 struct mlx4_mgm *mgm; 1013 u32 members_count; 1014 int index, prev; 1015 int link = 0; 1016 int i; 1017 int err; 1018 u8 port = gid[5]; 1019 u8 new_entry = 0; 1020 1021 mailbox = mlx4_alloc_cmd_mailbox(dev); 1022 if (IS_ERR(mailbox)) 1023 return PTR_ERR(mailbox); 1024 mgm = mailbox->buf; 1025 1026 mutex_lock(&priv->mcg_table.mutex); 1027 err = find_entry(dev, port, gid, prot, 1028 mailbox, &prev, &index); 1029 if (err) 1030 goto out; 1031 1032 if (index != -1) { 1033 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 1034 new_entry = 1; 1035 memcpy(mgm->gid, gid, 16); 1036 } 1037 } else { 1038 link = 1; 1039 1040 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); 1041 if (index == -1) { 1042 mlx4_err(dev, "No AMGM entries left\n"); 1043 err = -ENOMEM; 1044 goto out; 1045 } 1046 index += dev->caps.num_mgms; 1047 1048 new_entry = 1; 1049 memset(mgm, 0, sizeof *mgm); 1050 memcpy(mgm->gid, gid, 16); 1051 } 1052 1053 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 1054 if (members_count == dev->caps.num_qp_per_mgm) { 1055 mlx4_err(dev, "MGM at index %x is full.\n", index); 1056 err = -ENOMEM; 1057 goto out; 1058 } 1059 1060 for (i = 0; i < members_count; ++i) 1061 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { 1062 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); 1063 err = 0; 1064 goto out; 1065 } 1066 1067 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | 1068 (!!mlx4_blck_lb << MGM_BLCK_LB_BIT)); 1069 1070 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); 1071 1072 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1073 if (err) 1074 goto out; 1075 1076 /* if !link, still add the new entry. */ 1077 if (!link) 1078 goto skip_link; 1079 1080 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1081 if (err) 1082 goto out; 1083 1084 mgm->next_gid_index = cpu_to_be32(index << 6); 1085 1086 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1087 if (err) 1088 goto out; 1089 1090skip_link: 1091 if (prot == MLX4_PROT_ETH) { 1092 /* manage the steering entry for promisc mode */ 1093 if (new_entry) 1094 new_steering_entry(dev, port, steer, index, qp->qpn); 1095 else 1096 existing_steering_entry(dev, port, steer, 1097 index, qp->qpn); 1098 } 1099 1100out: 1101 if (err && link && index != -1) { 1102 if (index < dev->caps.num_mgms) 1103 mlx4_warn(dev, "Got AMGM index %d < %d", 1104 index, dev->caps.num_mgms); 1105 else 1106 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1107 index - dev->caps.num_mgms, MLX4_USE_RR); 1108 } 1109 mutex_unlock(&priv->mcg_table.mutex); 1110 1111 mlx4_free_cmd_mailbox(dev, mailbox); 1112 return err; 1113} 1114 1115int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1116 enum mlx4_protocol prot, enum mlx4_steer_type steer) 1117{ 1118 struct mlx4_priv *priv = mlx4_priv(dev); 1119 struct mlx4_cmd_mailbox *mailbox; 1120 struct mlx4_mgm *mgm; 1121 u32 members_count; 1122 int prev, index; 1123 int i, loc = -1; 1124 int err; 1125 u8 port = gid[5]; 1126 bool removed_entry = false; 1127 1128 mailbox = mlx4_alloc_cmd_mailbox(dev); 1129 if (IS_ERR(mailbox)) 1130 return PTR_ERR(mailbox); 1131 mgm = mailbox->buf; 1132 1133 mutex_lock(&priv->mcg_table.mutex); 1134 1135 err = find_entry(dev, port, gid, prot, 1136 mailbox, &prev, &index); 1137 if (err) 1138 goto out; 1139 1140 if (index == -1) { 1141 mlx4_err(dev, "MGID %pI6 not found\n", gid); 1142 err = -EINVAL; 1143 goto out; 1144 } 1145 1146 /* 1147 if this QP is also a promisc QP, it shouldn't be removed only if 1148 at least one none promisc QP is also attached to this MCG 1149 */ 1150 if (prot == MLX4_PROT_ETH && 1151 check_duplicate_entry(dev, port, steer, index, qp->qpn) && 1152 !promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL)) 1153 goto out; 1154 1155 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 1156 for (i = 0; i < members_count; ++i) 1157 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { 1158 loc = i; 1159 break; 1160 } 1161 1162 if (loc == -1) { 1163 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); 1164 err = -EINVAL; 1165 goto out; 1166 } 1167 1168 /* copy the last QP in this MGM over removed QP */ 1169 mgm->qp[loc] = mgm->qp[members_count - 1]; 1170 mgm->qp[members_count - 1] = 0; 1171 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); 1172 1173 if (prot == MLX4_PROT_ETH) 1174 removed_entry = can_remove_steering_entry(dev, port, steer, 1175 index, qp->qpn); 1176 if (members_count && (prot != MLX4_PROT_ETH || !removed_entry)) { 1177 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1178 goto out; 1179 } 1180 1181 /* We are going to delete the entry, members count should be 0 */ 1182 mgm->members_count = cpu_to_be32((u32) prot << 30); 1183 1184 if (prev == -1) { 1185 /* Remove entry from MGM */ 1186 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1187 if (amgm_index) { 1188 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); 1189 if (err) 1190 goto out; 1191 } else 1192 memset(mgm->gid, 0, 16); 1193 1194 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1195 if (err) 1196 goto out; 1197 1198 if (amgm_index) { 1199 if (amgm_index < dev->caps.num_mgms) 1200 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", 1201 index, amgm_index, dev->caps.num_mgms); 1202 else 1203 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1204 amgm_index - dev->caps.num_mgms, MLX4_USE_RR); 1205 } 1206 } else { 1207 /* Remove entry from AMGM */ 1208 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1209 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1210 if (err) 1211 goto out; 1212 1213 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 1214 1215 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1216 if (err) 1217 goto out; 1218 1219 if (index < dev->caps.num_mgms) 1220 mlx4_warn(dev, "entry %d had next AMGM index %d < %d", 1221 prev, index, dev->caps.num_mgms); 1222 else 1223 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1224 index - dev->caps.num_mgms, MLX4_USE_RR); 1225 } 1226 1227out: 1228 mutex_unlock(&priv->mcg_table.mutex); 1229 1230 mlx4_free_cmd_mailbox(dev, mailbox); 1231 return err; 1232} 1233 1234static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, 1235 u8 gid[16], u8 attach, u8 block_loopback, 1236 enum mlx4_protocol prot) 1237{ 1238 struct mlx4_cmd_mailbox *mailbox; 1239 int err = 0; 1240 int qpn; 1241 1242 if (!mlx4_is_mfunc(dev)) 1243 return -EBADF; 1244 1245 mailbox = mlx4_alloc_cmd_mailbox(dev); 1246 if (IS_ERR(mailbox)) 1247 return PTR_ERR(mailbox); 1248 1249 memcpy(mailbox->buf, gid, 16); 1250 qpn = qp->qpn; 1251 qpn |= (prot << 28); 1252 if (attach && block_loopback) 1253 qpn |= (1 << 31); 1254 1255 err = mlx4_cmd(dev, mailbox->dma, qpn, attach, 1256 MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, 1257 MLX4_CMD_WRAPPED); 1258 1259 mlx4_free_cmd_mailbox(dev, mailbox); 1260 return err; 1261} 1262 1263int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1264 u8 gid[16], u8 port, 1265 int block_mcast_loopback, 1266 enum mlx4_protocol prot, u64 *reg_id) 1267{ 1268 struct mlx4_spec_list spec = { {NULL} }; 1269 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 1270 1271 struct mlx4_net_trans_rule rule = { 1272 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1273 .exclusive = 0, 1274 .promisc_mode = MLX4_FS_REGULAR, 1275 .priority = MLX4_DOMAIN_NIC, 1276 }; 1277 1278 rule.allow_loopback = !block_mcast_loopback; 1279 rule.port = port; 1280 rule.qpn = qp->qpn; 1281 INIT_LIST_HEAD(&rule.list); 1282 1283 switch (prot) { 1284 case MLX4_PROT_ETH: 1285 spec.id = MLX4_NET_TRANS_RULE_ID_ETH; 1286 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN); 1287 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 1288 break; 1289 1290 case MLX4_PROT_IB_IPV6: 1291 spec.id = MLX4_NET_TRANS_RULE_ID_IB; 1292 memcpy(spec.ib.dst_gid, gid, 16); 1293 memset(&spec.ib.dst_gid_msk, 0xff, 16); 1294 break; 1295 default: 1296 return -EINVAL; 1297 } 1298 list_add_tail(&spec.list, &rule.list); 1299 1300 return mlx4_flow_attach(dev, &rule, reg_id); 1301} 1302 1303int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1304 u8 port, int block_mcast_loopback, 1305 enum mlx4_protocol prot, u64 *reg_id) 1306{ 1307 enum mlx4_steer_type steer; 1308 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; 1309 1310 switch (dev->caps.steering_mode) { 1311 case MLX4_STEERING_MODE_A0: 1312 if (prot == MLX4_PROT_ETH) 1313 return 0; 1314 1315 case MLX4_STEERING_MODE_B0: 1316 if (prot == MLX4_PROT_ETH) 1317 gid[7] |= (steer << 1); 1318 1319 if (mlx4_is_mfunc(dev)) 1320 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1321 block_mcast_loopback, prot); 1322 return mlx4_qp_attach_common(dev, qp, gid, 1323 block_mcast_loopback, prot, 1324 MLX4_MC_STEER); 1325 1326 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1327 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, 1328 block_mcast_loopback, 1329 prot, reg_id); 1330 default: 1331 return -EINVAL; 1332 } 1333} 1334EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 1335 1336int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1337 enum mlx4_protocol prot, u64 reg_id) 1338{ 1339 enum mlx4_steer_type steer; 1340 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; 1341 1342 switch (dev->caps.steering_mode) { 1343 case MLX4_STEERING_MODE_A0: 1344 if (prot == MLX4_PROT_ETH) 1345 return 0; 1346 1347 case MLX4_STEERING_MODE_B0: 1348 if (prot == MLX4_PROT_ETH) 1349 gid[7] |= (steer << 1); 1350 1351 if (mlx4_is_mfunc(dev)) 1352 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1353 1354 return mlx4_qp_detach_common(dev, qp, gid, prot, 1355 MLX4_MC_STEER); 1356 1357 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1358 return mlx4_flow_detach(dev, reg_id); 1359 1360 default: 1361 return -EINVAL; 1362 } 1363} 1364EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 1365 1366int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, 1367 u32 qpn, enum mlx4_net_trans_promisc_mode mode) 1368{ 1369 struct mlx4_net_trans_rule rule; 1370 u64 *regid_p; 1371 1372 switch (mode) { 1373 case MLX4_FS_ALL_DEFAULT: 1374 regid_p = &dev->regid_promisc_array[port]; 1375 break; 1376 case MLX4_FS_MC_DEFAULT: 1377 regid_p = &dev->regid_allmulti_array[port]; 1378 break; 1379 default: 1380 return -1; 1381 } 1382 1383 if (*regid_p != 0) 1384 return -1; 1385 1386 rule.promisc_mode = mode; 1387 rule.port = port; 1388 rule.qpn = qpn; 1389 INIT_LIST_HEAD(&rule.list); 1390 mlx4_err(dev, "going promisc on %x\n", port); 1391 1392 return mlx4_flow_attach(dev, &rule, regid_p); 1393} 1394EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add); 1395 1396int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, 1397 enum mlx4_net_trans_promisc_mode mode) 1398{ 1399 int ret; 1400 u64 *regid_p; 1401 1402 switch (mode) { 1403 case MLX4_FS_ALL_DEFAULT: 1404 regid_p = &dev->regid_promisc_array[port]; 1405 break; 1406 case MLX4_FS_MC_DEFAULT: 1407 regid_p = &dev->regid_allmulti_array[port]; 1408 break; 1409 default: 1410 return -1; 1411 } 1412 1413 if (*regid_p == 0) 1414 return -1; 1415 1416 ret = mlx4_flow_detach(dev, *regid_p); 1417 if (ret == 0) 1418 *regid_p = 0; 1419 1420 return ret; 1421} 1422EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove); 1423 1424int mlx4_unicast_attach(struct mlx4_dev *dev, 1425 struct mlx4_qp *qp, u8 gid[16], 1426 int block_mcast_loopback, enum mlx4_protocol prot) 1427{ 1428 if (prot == MLX4_PROT_ETH) 1429 gid[7] |= (MLX4_UC_STEER << 1); 1430 1431 if (mlx4_is_mfunc(dev)) 1432 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1433 block_mcast_loopback, prot); 1434 1435 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, 1436 prot, MLX4_UC_STEER); 1437} 1438EXPORT_SYMBOL_GPL(mlx4_unicast_attach); 1439 1440int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1441 u8 gid[16], enum mlx4_protocol prot) 1442{ 1443 if (prot == MLX4_PROT_ETH) 1444 gid[7] |= (MLX4_UC_STEER << 1); 1445 1446 if (mlx4_is_mfunc(dev)) 1447 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1448 1449 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); 1450} 1451EXPORT_SYMBOL_GPL(mlx4_unicast_detach); 1452 1453int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1454 struct mlx4_vhcr *vhcr, 1455 struct mlx4_cmd_mailbox *inbox, 1456 struct mlx4_cmd_mailbox *outbox, 1457 struct mlx4_cmd_info *cmd) 1458{ 1459 u32 qpn = (u32) vhcr->in_param & 0xffffffff; 1460 u8 port = vhcr->in_param >> 62; 1461 enum mlx4_steer_type steer = vhcr->in_modifier; 1462 1463 /* Promiscuous unicast is not allowed in mfunc for VFs */ 1464 if ((slave != dev->caps.function) && (steer == MLX4_UC_STEER)) 1465 return 0; 1466 1467 if (vhcr->op_modifier) 1468 return add_promisc_qp(dev, port, steer, qpn); 1469 else 1470 return remove_promisc_qp(dev, port, steer, qpn); 1471} 1472 1473static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, 1474 enum mlx4_steer_type steer, u8 add, u8 port) 1475{ 1476 return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, 1477 MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, 1478 MLX4_CMD_WRAPPED); 1479} 1480 1481int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1482{ 1483 if (mlx4_is_mfunc(dev)) 1484 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); 1485 1486 return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1487} 1488EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); 1489 1490int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1491{ 1492 if (mlx4_is_mfunc(dev)) 1493 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); 1494 1495 return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1496} 1497EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); 1498 1499int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1500{ 1501 if (mlx4_is_mfunc(dev)) 1502 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); 1503 1504 return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1505} 1506EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); 1507 1508int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1509{ 1510 if (mlx4_is_mfunc(dev)) 1511 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); 1512 1513 return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1514} 1515EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); 1516 1517int mlx4_init_mcg_table(struct mlx4_dev *dev) 1518{ 1519 struct mlx4_priv *priv = mlx4_priv(dev); 1520 int err; 1521 1522 /* No need for mcg_table when fw managed the mcg table*/ 1523 if (dev->caps.steering_mode == 1524 MLX4_STEERING_MODE_DEVICE_MANAGED) 1525 return 0; 1526 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, 1527 dev->caps.num_amgms - 1, 0, 0); 1528 if (err) 1529 return err; 1530 1531 mutex_init(&priv->mcg_table.mutex); 1532 1533 return 0; 1534} 1535 1536void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) 1537{ 1538 if (dev->caps.steering_mode != 1539 MLX4_STEERING_MODE_DEVICE_MANAGED) 1540 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); 1541} 1542