mcg.c revision 271127
1/* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/string.h> 35 36#include <linux/mlx4/cmd.h> 37 38#include "mlx4.h" 39 40 41static const u8 zero_gid[16]; /* automatically initialized to 0 */ 42 43int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 44{ 45 return 1 << dev->oper_log_mgm_entry_size; 46} 47 48int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) 49{ 50 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); 51} 52 53static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, 54 struct mlx4_cmd_mailbox *mailbox, 55 u32 size, 56 u64 *reg_id) 57{ 58 u64 imm; 59 int err = 0; 60 61 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, 62 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 63 MLX4_CMD_NATIVE); 64 if (err) 65 return err; 66 *reg_id = imm; 67 68 return err; 69} 70 71static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) 72{ 73 int err = 0; 74 75 err = mlx4_cmd(dev, regid, 0, 0, 76 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 77 MLX4_CMD_NATIVE); 78 79 return err; 80} 81 82static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, 83 struct mlx4_cmd_mailbox *mailbox) 84{ 85 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 86 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 87} 88 89static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, 90 struct mlx4_cmd_mailbox *mailbox) 91{ 92 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 93 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 94} 95 96static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, 97 struct mlx4_cmd_mailbox *mailbox) 98{ 99 u32 in_mod; 100 101 in_mod = (u32) port << 16 | steer << 1; 102 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, 103 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, 104 MLX4_CMD_NATIVE); 105} 106 107static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 108 u16 *hash, u8 op_mod) 109{ 110 u64 imm; 111 int err; 112 113 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, 114 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, 115 MLX4_CMD_NATIVE); 116 117 if (!err) 118 *hash = imm; 119 120 return err; 121} 122 123static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port, 124 enum mlx4_steer_type steer, 125 u32 qpn) 126{ 127 struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1]; 128 struct mlx4_promisc_qp *pqp; 129 130 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 131 if (pqp->qpn == qpn) 132 return pqp; 133 } 134 /* not found */ 135 return NULL; 136} 137 138/* 139 * Add new entry to steering data structure. 140 * All promisc QPs should be added as well 141 */ 142static int new_steering_entry(struct mlx4_dev *dev, u8 port, 143 enum mlx4_steer_type steer, 144 unsigned int index, u32 qpn) 145{ 146 struct mlx4_steer *s_steer; 147 struct mlx4_cmd_mailbox *mailbox; 148 struct mlx4_mgm *mgm; 149 u32 members_count; 150 struct mlx4_steer_index *new_entry; 151 struct mlx4_promisc_qp *pqp; 152 struct mlx4_promisc_qp *dqp = NULL; 153 u32 prot; 154 int err; 155 156 s_steer = &mlx4_priv(dev)->steer[port - 1]; 157 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); 158 if (!new_entry) 159 return -ENOMEM; 160 161 INIT_LIST_HEAD(&new_entry->duplicates); 162 new_entry->index = index; 163 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); 164 165 /* If the given qpn is also a promisc qp, 166 * it should be inserted to duplicates list 167 */ 168 pqp = get_promisc_qp(dev, port, steer, qpn); 169 if (pqp) { 170 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 171 if (!dqp) { 172 err = -ENOMEM; 173 goto out_alloc; 174 } 175 dqp->qpn = qpn; 176 list_add_tail(&dqp->list, &new_entry->duplicates); 177 } 178 179 /* if no promisc qps for this vep, we are done */ 180 if (list_empty(&s_steer->promisc_qps[steer])) 181 return 0; 182 183 /* now need to add all the promisc qps to the new 184 * steering entry, as they should also receive the packets 185 * destined to this address */ 186 mailbox = mlx4_alloc_cmd_mailbox(dev); 187 if (IS_ERR(mailbox)) { 188 err = -ENOMEM; 189 goto out_alloc; 190 } 191 mgm = mailbox->buf; 192 193 err = mlx4_READ_ENTRY(dev, index, mailbox); 194 if (err) 195 goto out_mailbox; 196 197 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 198 prot = be32_to_cpu(mgm->members_count) >> 30; 199 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 200 /* don't add already existing qpn */ 201 if (pqp->qpn == qpn) 202 continue; 203 if (members_count == dev->caps.num_qp_per_mgm) { 204 /* out of space */ 205 err = -ENOMEM; 206 goto out_mailbox; 207 } 208 209 /* add the qpn */ 210 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); 211 } 212 /* update the qps count and update the entry with all the promisc qps*/ 213 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 214 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 215 216out_mailbox: 217 mlx4_free_cmd_mailbox(dev, mailbox); 218 if (!err) 219 return 0; 220out_alloc: 221 if (dqp) { 222 list_del(&dqp->list); 223 kfree(dqp); 224 } 225 list_del(&new_entry->list); 226 kfree(new_entry); 227 return err; 228} 229 230/* update the data structures with existing steering entry */ 231static int existing_steering_entry(struct mlx4_dev *dev, u8 port, 232 enum mlx4_steer_type steer, 233 unsigned int index, u32 qpn) 234{ 235 struct mlx4_steer *s_steer; 236 struct mlx4_steer_index *tmp_entry, *entry = NULL; 237 struct mlx4_promisc_qp *pqp; 238 struct mlx4_promisc_qp *dqp; 239 240 s_steer = &mlx4_priv(dev)->steer[port - 1]; 241 242 pqp = get_promisc_qp(dev, port, steer, qpn); 243 if (!pqp) 244 return 0; /* nothing to do */ 245 246 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 247 if (tmp_entry->index == index) { 248 entry = tmp_entry; 249 break; 250 } 251 } 252 if (unlikely(!entry)) { 253 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); 254 return -EINVAL; 255 } 256 257 /* the given qpn is listed as a promisc qpn 258 * we need to add it as a duplicate to this entry 259 * for future references */ 260 list_for_each_entry(dqp, &entry->duplicates, list) { 261 if (qpn == pqp->qpn) 262 return 0; /* qp is already duplicated */ 263 } 264 265 /* add the qp as a duplicate on this index */ 266 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 267 if (!dqp) 268 return -ENOMEM; 269 dqp->qpn = qpn; 270 list_add_tail(&dqp->list, &entry->duplicates); 271 272 return 0; 273} 274 275/* Check whether a qpn is a duplicate on steering entry 276 * If so, it should not be removed from mgm */ 277static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, 278 enum mlx4_steer_type steer, 279 unsigned int index, u32 qpn) 280{ 281 struct mlx4_steer *s_steer; 282 struct mlx4_steer_index *tmp_entry, *entry = NULL; 283 struct mlx4_promisc_qp *dqp, *tmp_dqp; 284 285 s_steer = &mlx4_priv(dev)->steer[port - 1]; 286 287 /* if qp is not promisc, it cannot be duplicated */ 288 if (!get_promisc_qp(dev, port, steer, qpn)) 289 return false; 290 291 /* The qp is promisc qp so it is a duplicate on this index 292 * Find the index entry, and remove the duplicate */ 293 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 294 if (tmp_entry->index == index) { 295 entry = tmp_entry; 296 break; 297 } 298 } 299 if (unlikely(!entry)) { 300 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); 301 return false; 302 } 303 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { 304 if (dqp->qpn == qpn) { 305 list_del(&dqp->list); 306 kfree(dqp); 307 } 308 } 309 return true; 310} 311 312/* I a steering entry contains only promisc QPs, it can be removed. */ 313static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, 314 enum mlx4_steer_type steer, 315 unsigned int index, u32 tqpn) 316{ 317 struct mlx4_steer *s_steer; 318 struct mlx4_cmd_mailbox *mailbox; 319 struct mlx4_mgm *mgm; 320 struct mlx4_steer_index *entry = NULL, *tmp_entry; 321 u32 qpn; 322 u32 members_count; 323 bool ret = false; 324 int i; 325 326 s_steer = &mlx4_priv(dev)->steer[port - 1]; 327 328 mailbox = mlx4_alloc_cmd_mailbox(dev); 329 if (IS_ERR(mailbox)) 330 return false; 331 mgm = mailbox->buf; 332 333 if (mlx4_READ_ENTRY(dev, index, mailbox)) 334 goto out; 335 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 336 for (i = 0; i < members_count; i++) { 337 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; 338 if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) { 339 /* the qp is not promisc, the entry can't be removed */ 340 goto out; 341 } 342 } 343 /* All the qps currently registered for this entry are promiscuous, 344 * Checking for duplicates */ 345 ret = true; 346 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { 347 if (entry->index == index) { 348 if (list_empty(&entry->duplicates) || members_count == 1) { 349 struct mlx4_promisc_qp *pqp, *tmp_pqp; 350 /* 351 * If there is only 1 entry in duplicates than 352 * this is the QP we want to delete, going over 353 * the list and deleting the entry. 354 */ 355 list_del(&entry->list); 356 list_for_each_entry_safe(pqp, tmp_pqp, 357 &entry->duplicates, 358 list) { 359 list_del(&pqp->list); 360 kfree(pqp); 361 } 362 kfree(entry); 363 } else { 364 /* This entry contains duplicates so it shouldn't be removed */ 365 ret = false; 366 goto out; 367 } 368 } 369 } 370 371out: 372 mlx4_free_cmd_mailbox(dev, mailbox); 373 return ret; 374} 375 376static int add_promisc_qp(struct mlx4_dev *dev, u8 port, 377 enum mlx4_steer_type steer, u32 qpn) 378{ 379 struct mlx4_steer *s_steer; 380 struct mlx4_cmd_mailbox *mailbox; 381 struct mlx4_mgm *mgm; 382 struct mlx4_steer_index *entry; 383 struct mlx4_promisc_qp *pqp; 384 struct mlx4_promisc_qp *dqp; 385 u32 members_count; 386 u32 prot; 387 int i; 388 bool found; 389 int err; 390 struct mlx4_priv *priv = mlx4_priv(dev); 391 392 s_steer = &mlx4_priv(dev)->steer[port - 1]; 393 394 mutex_lock(&priv->mcg_table.mutex); 395 396 if (get_promisc_qp(dev, port, steer, qpn)) { 397 err = 0; /* Noting to do, already exists */ 398 goto out_mutex; 399 } 400 401 pqp = kmalloc(sizeof *pqp, GFP_KERNEL); 402 if (!pqp) { 403 err = -ENOMEM; 404 goto out_mutex; 405 } 406 pqp->qpn = qpn; 407 408 mailbox = mlx4_alloc_cmd_mailbox(dev); 409 if (IS_ERR(mailbox)) { 410 err = -ENOMEM; 411 goto out_alloc; 412 } 413 mgm = mailbox->buf; 414 415 /* the promisc qp needs to be added for each one of the steering 416 * entries, if it already exists, needs to be added as a duplicate 417 * for this entry */ 418 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 419 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 420 if (err) 421 goto out_mailbox; 422 423 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 424 prot = be32_to_cpu(mgm->members_count) >> 30; 425 found = false; 426 for (i = 0; i < members_count; i++) { 427 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { 428 /* Entry already exists, add to duplicates */ 429 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 430 if (!dqp) { 431 err = -ENOMEM; 432 goto out_mailbox; 433 } 434 dqp->qpn = qpn; 435 list_add_tail(&dqp->list, &entry->duplicates); 436 found = true; 437 } 438 } 439 if (!found) { 440 /* Need to add the qpn to mgm */ 441 if (members_count == dev->caps.num_qp_per_mgm) { 442 /* entry is full */ 443 err = -ENOMEM; 444 goto out_mailbox; 445 } 446 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK); 447 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 448 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 449 if (err) 450 goto out_mailbox; 451 } 452 } 453 454 /* add the new qpn to list of promisc qps */ 455 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 456 /* now need to add all the promisc qps to default entry */ 457 memset(mgm, 0, sizeof *mgm); 458 members_count = 0; 459 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) { 460 if (members_count == dev->caps.num_qp_per_mgm) { 461 /* entry is full */ 462 err = -ENOMEM; 463 goto out_list; 464 } 465 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 466 } 467 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 468 469 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 470 if (err) 471 goto out_list; 472 473 mlx4_free_cmd_mailbox(dev, mailbox); 474 mutex_unlock(&priv->mcg_table.mutex); 475 return 0; 476 477out_list: 478 list_del(&pqp->list); 479out_mailbox: 480 mlx4_free_cmd_mailbox(dev, mailbox); 481out_alloc: 482 kfree(pqp); 483out_mutex: 484 mutex_unlock(&priv->mcg_table.mutex); 485 return err; 486} 487 488static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, 489 enum mlx4_steer_type steer, u32 qpn) 490{ 491 struct mlx4_priv *priv = mlx4_priv(dev); 492 struct mlx4_steer *s_steer; 493 struct mlx4_cmd_mailbox *mailbox; 494 struct mlx4_mgm *mgm; 495 struct mlx4_steer_index *entry; 496 struct mlx4_promisc_qp *pqp; 497 struct mlx4_promisc_qp *dqp; 498 u32 members_count; 499 bool found; 500 bool back_to_list = false; 501 int i, loc = -1; 502 int err; 503 504 s_steer = &mlx4_priv(dev)->steer[port - 1]; 505 mutex_lock(&priv->mcg_table.mutex); 506 507 pqp = get_promisc_qp(dev, port, steer, qpn); 508 if (unlikely(!pqp)) { 509 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); 510 /* nothing to do */ 511 err = 0; 512 goto out_mutex; 513 } 514 515 /*remove from list of promisc qps */ 516 list_del(&pqp->list); 517 518 /* set the default entry not to include the removed one */ 519 mailbox = mlx4_alloc_cmd_mailbox(dev); 520 if (IS_ERR(mailbox)) { 521 err = -ENOMEM; 522 back_to_list = true; 523 goto out_list; 524 } 525 mgm = mailbox->buf; 526 memset(mgm, 0, sizeof *mgm); 527 members_count = 0; 528 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 529 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 530 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 531 532 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 533 if (err) 534 goto out_mailbox; 535 536 /* remove the qp from all the steering entries*/ 537 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 538 found = false; 539 list_for_each_entry(dqp, &entry->duplicates, list) { 540 if (dqp->qpn == qpn) { 541 found = true; 542 break; 543 } 544 } 545 if (found) { 546 /* a duplicate, no need to change the mgm, 547 * only update the duplicates list */ 548 list_del(&dqp->list); 549 kfree(dqp); 550 } else { 551 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 552 if (err) 553 goto out_mailbox; 554 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 555 for (i = 0; i < members_count; ++i) 556 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { 557 loc = i; 558 break; 559 } 560 561 if (loc < 0) { 562 mlx4_err(dev, "QP %06x wasn't found in entry %d\n", 563 qpn, entry->index); 564 err = -EINVAL; 565 goto out_mailbox; 566 } 567 568 /* copy the last QP in this MGM over removed QP */ 569 mgm->qp[loc] = mgm->qp[members_count - 1]; 570 mgm->qp[members_count - 1] = 0; 571 mgm->members_count = cpu_to_be32(--members_count | 572 (MLX4_PROT_ETH << 30)); 573 574 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 575 if (err) 576 goto out_mailbox; 577 } 578 579 } 580 581out_mailbox: 582 mlx4_free_cmd_mailbox(dev, mailbox); 583out_list: 584 if (back_to_list) 585 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 586 else 587 kfree(pqp); 588out_mutex: 589 mutex_unlock(&priv->mcg_table.mutex); 590 return err; 591} 592 593/* 594 * Caller must hold MCG table semaphore. gid and mgm parameters must 595 * be properly aligned for command interface. 596 * 597 * Returns 0 unless a firmware command error occurs. 598 * 599 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 600 * and *mgm holds MGM entry. 601 * 602 * if GID is found in AMGM, *index = index in AMGM, *prev = index of 603 * previous entry in hash chain and *mgm holds AMGM entry. 604 * 605 * If no AMGM exists for given gid, *index = -1, *prev = index of last 606 * entry in hash chain and *mgm holds end of hash chain. 607 */ 608static int find_entry(struct mlx4_dev *dev, u8 port, 609 u8 *gid, enum mlx4_protocol prot, 610 struct mlx4_cmd_mailbox *mgm_mailbox, 611 int *prev, int *index) 612{ 613 struct mlx4_cmd_mailbox *mailbox; 614 struct mlx4_mgm *mgm = mgm_mailbox->buf; 615 u8 *mgid; 616 int err; 617 u16 hash; 618 u8 op_mod = (prot == MLX4_PROT_ETH) ? 619 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; 620 621 mailbox = mlx4_alloc_cmd_mailbox(dev); 622 if (IS_ERR(mailbox)) 623 return -ENOMEM; 624 mgid = mailbox->buf; 625 626 memcpy(mgid, gid, 16); 627 628 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod); 629 mlx4_free_cmd_mailbox(dev, mailbox); 630 if (err) 631 return err; 632 633 if (0) 634 mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash); 635 636 *index = hash; 637 *prev = -1; 638 639 do { 640 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); 641 if (err) 642 return err; 643 644 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 645 if (*index != hash) { 646 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 647 err = -EINVAL; 648 } 649 return err; 650 } 651 652 if (!memcmp(mgm->gid, gid, 16) && 653 be32_to_cpu(mgm->members_count) >> 30 == prot) 654 return err; 655 656 *prev = *index; 657 *index = be32_to_cpu(mgm->next_gid_index) >> 6; 658 } while (*index); 659 660 *index = -1; 661 return err; 662} 663 664static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, 665 struct mlx4_net_trans_rule_hw_ctrl *hw) 666{ 667 static const u8 __promisc_mode[] = { 668 [MLX4_FS_REGULAR] = 0x0, 669 [MLX4_FS_ALL_DEFAULT] = 0x1, 670 [MLX4_FS_MC_DEFAULT] = 0x3, 671 [MLX4_FS_UC_SNIFFER] = 0x4, 672 [MLX4_FS_MC_SNIFFER] = 0x5, 673 }; 674 675 u32 dw = 0; 676 677 dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; 678 dw |= ctrl->exclusive ? (1 << 2) : 0; 679 dw |= ctrl->allow_loopback ? (1 << 3) : 0; 680 dw |= __promisc_mode[ctrl->promisc_mode] << 8; 681 dw |= ctrl->priority << 16; 682 683 hw->ctrl = cpu_to_be32(dw); 684 hw->port = ctrl->port; 685 hw->qpn = cpu_to_be32(ctrl->qpn); 686} 687 688const u16 __sw_id_hw[] = { 689 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, 690 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, 691 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, 692 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, 693 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, 694 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 695}; 696 697static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, 698 struct _rule_hw *rule_hw) 699{ 700 static const size_t __rule_hw_sz[] = { 701 [MLX4_NET_TRANS_RULE_ID_ETH] = 702 sizeof(struct mlx4_net_trans_rule_hw_eth), 703 [MLX4_NET_TRANS_RULE_ID_IB] = 704 sizeof(struct mlx4_net_trans_rule_hw_ib), 705 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, 706 [MLX4_NET_TRANS_RULE_ID_IPV4] = 707 sizeof(struct mlx4_net_trans_rule_hw_ipv4), 708 [MLX4_NET_TRANS_RULE_ID_TCP] = 709 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), 710 [MLX4_NET_TRANS_RULE_ID_UDP] = 711 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) 712 }; 713 if (spec->id >= MLX4_NET_TRANS_RULE_NUM) { 714 mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id); 715 return -EINVAL; 716 } 717 memset(rule_hw, 0, __rule_hw_sz[spec->id]); 718 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); 719 rule_hw->size = __rule_hw_sz[spec->id] >> 2; 720 721 switch (spec->id) { 722 case MLX4_NET_TRANS_RULE_ID_ETH: 723 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); 724 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, 725 ETH_ALEN); 726 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); 727 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, 728 ETH_ALEN); 729 if (spec->eth.ether_type_enable) { 730 rule_hw->eth.ether_type_enable = 1; 731 rule_hw->eth.ether_type = spec->eth.ether_type; 732 } 733 rule_hw->eth.vlan_id = spec->eth.vlan_id; 734 rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk; 735 break; 736 737 case MLX4_NET_TRANS_RULE_ID_IB: 738 rule_hw->ib.r_u_qpn = spec->ib.r_u_qpn; 739 rule_hw->ib.qpn_mask = spec->ib.qpn_msk; 740 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); 741 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); 742 break; 743 744 case MLX4_NET_TRANS_RULE_ID_IPV6: 745 return -EOPNOTSUPP; 746 747 case MLX4_NET_TRANS_RULE_ID_IPV4: 748 rule_hw->ipv4.src_ip = spec->ipv4.src_ip; 749 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; 750 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; 751 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; 752 break; 753 754 case MLX4_NET_TRANS_RULE_ID_TCP: 755 case MLX4_NET_TRANS_RULE_ID_UDP: 756 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; 757 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; 758 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; 759 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; 760 break; 761 762 default: 763 return -EINVAL; 764 } 765 766 return __rule_hw_sz[spec->id]; 767} 768 769static void mlx4_err_rule(struct mlx4_dev *dev, char *str, 770 struct mlx4_net_trans_rule *rule) 771{ 772#define BUF_SIZE 256 773 struct mlx4_spec_list *cur; 774 char buf[BUF_SIZE]; 775 int len = 0; 776 777 mlx4_err(dev, "%s", str); 778 len += snprintf(buf + len, BUF_SIZE - len, 779 "port = %d prio = 0x%x qp = 0x%x ", 780 rule->port, rule->priority, rule->qpn); 781 782 list_for_each_entry(cur, &rule->list, list) { 783 switch (cur->id) { 784 case MLX4_NET_TRANS_RULE_ID_ETH: 785 len += snprintf(buf + len, BUF_SIZE - len, 786 "dmac = %pM ", &cur->eth.dst_mac); 787 if (cur->eth.ether_type) 788 len += snprintf(buf + len, BUF_SIZE - len, 789 "ethertype = 0x%x ", 790 be16_to_cpu(cur->eth.ether_type)); 791 if (cur->eth.vlan_id) 792 len += snprintf(buf + len, BUF_SIZE - len, 793 "vlan-id = %d ", 794 be16_to_cpu(cur->eth.vlan_id)); 795 break; 796 797 case MLX4_NET_TRANS_RULE_ID_IPV4: 798 if (cur->ipv4.src_ip) 799 len += snprintf(buf + len, BUF_SIZE - len, 800 "src-ip = %pI4 ", 801 &cur->ipv4.src_ip); 802 if (cur->ipv4.dst_ip) 803 len += snprintf(buf + len, BUF_SIZE - len, 804 "dst-ip = %pI4 ", 805 &cur->ipv4.dst_ip); 806 break; 807 808 case MLX4_NET_TRANS_RULE_ID_TCP: 809 case MLX4_NET_TRANS_RULE_ID_UDP: 810 if (cur->tcp_udp.src_port) 811 len += snprintf(buf + len, BUF_SIZE - len, 812 "src-port = %d ", 813 be16_to_cpu(cur->tcp_udp.src_port)); 814 if (cur->tcp_udp.dst_port) 815 len += snprintf(buf + len, BUF_SIZE - len, 816 "dst-port = %d ", 817 be16_to_cpu(cur->tcp_udp.dst_port)); 818 break; 819 820 case MLX4_NET_TRANS_RULE_ID_IB: 821 len += snprintf(buf + len, BUF_SIZE - len, 822 "dst-gid = %pI6\n", cur->ib.dst_gid); 823 len += snprintf(buf + len, BUF_SIZE - len, 824 "dst-gid-mask = %pI6\n", 825 cur->ib.dst_gid_msk); 826 break; 827 828 case MLX4_NET_TRANS_RULE_ID_IPV6: 829 break; 830 831 default: 832 break; 833 } 834 } 835 len += snprintf(buf + len, BUF_SIZE - len, "\n"); 836 mlx4_err(dev, "%s", buf); 837 838 if (len >= BUF_SIZE) 839 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); 840} 841 842int mlx4_flow_attach(struct mlx4_dev *dev, 843 struct mlx4_net_trans_rule *rule, u64 *reg_id) 844{ 845 struct mlx4_cmd_mailbox *mailbox; 846 struct mlx4_spec_list *cur; 847 u32 size = 0; 848 int ret; 849 850 mailbox = mlx4_alloc_cmd_mailbox(dev); 851 if (IS_ERR(mailbox)) 852 return PTR_ERR(mailbox); 853 854 memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl)); 855 trans_rule_ctrl_to_hw(rule, mailbox->buf); 856 857 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 858 859 list_for_each_entry(cur, &rule->list, list) { 860 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 861 if (ret < 0) { 862 mlx4_free_cmd_mailbox(dev, mailbox); 863 return -EINVAL; 864 } 865 size += ret; 866 } 867 868 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); 869 if (ret == -ENOMEM) 870 mlx4_err_rule(dev, 871 "mcg table is full. Fail to register network rule.\n", 872 rule); 873 else if (ret) 874 mlx4_err_rule(dev, "Fail to register network rule.\n", rule); 875 876 mlx4_free_cmd_mailbox(dev, mailbox); 877 878 return ret; 879} 880EXPORT_SYMBOL_GPL(mlx4_flow_attach); 881 882int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) 883{ 884 int err; 885 886 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id); 887 if (err) 888 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n", 889 (long long)reg_id); 890 return err; 891} 892EXPORT_SYMBOL_GPL(mlx4_flow_detach); 893 894int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn) 895{ 896 int err; 897 u64 in_param; 898 899 in_param = ((u64) min_range_qpn) << 32; 900 in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF; 901 902 err = mlx4_cmd(dev, in_param, 0, 0, 903 MLX4_FLOW_STEERING_IB_UC_QP_RANGE, 904 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 905 906 return err; 907} 908EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE); 909 910int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 911 int block_mcast_loopback, enum mlx4_protocol prot, 912 enum mlx4_steer_type steer) 913{ 914 struct mlx4_priv *priv = mlx4_priv(dev); 915 struct mlx4_cmd_mailbox *mailbox; 916 struct mlx4_mgm *mgm; 917 u32 members_count; 918 int index, prev; 919 int link = 0; 920 int i; 921 int err; 922 u8 port = gid[5]; 923 u8 new_entry = 0; 924 925 mailbox = mlx4_alloc_cmd_mailbox(dev); 926 if (IS_ERR(mailbox)) 927 return PTR_ERR(mailbox); 928 mgm = mailbox->buf; 929 930 mutex_lock(&priv->mcg_table.mutex); 931 err = find_entry(dev, port, gid, prot, 932 mailbox, &prev, &index); 933 if (err) 934 goto out; 935 936 if (index != -1) { 937 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 938 new_entry = 1; 939 memcpy(mgm->gid, gid, 16); 940 } 941 } else { 942 link = 1; 943 944 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); 945 if (index == -1) { 946 mlx4_err(dev, "No AMGM entries left\n"); 947 err = -ENOMEM; 948 goto out; 949 } 950 index += dev->caps.num_mgms; 951 952 new_entry = 1; 953 memset(mgm, 0, sizeof *mgm); 954 memcpy(mgm->gid, gid, 16); 955 } 956 957 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 958 if (members_count == dev->caps.num_qp_per_mgm) { 959 mlx4_err(dev, "MGM at index %x is full.\n", index); 960 err = -ENOMEM; 961 goto out; 962 } 963 964 for (i = 0; i < members_count; ++i) 965 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { 966 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); 967 err = 0; 968 goto out; 969 } 970 971 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | 972 (!!mlx4_blck_lb << MGM_BLCK_LB_BIT)); 973 974 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); 975 976 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 977 if (err) 978 goto out; 979 980 if (!link) 981 goto out; 982 983 err = mlx4_READ_ENTRY(dev, prev, mailbox); 984 if (err) 985 goto out; 986 987 mgm->next_gid_index = cpu_to_be32(index << 6); 988 989 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 990 if (err) 991 goto out; 992 993 if (prot == MLX4_PROT_ETH) { 994 /* manage the steering entry for promisc mode */ 995 if (new_entry) 996 new_steering_entry(dev, port, steer, index, qp->qpn); 997 else 998 existing_steering_entry(dev, port, steer, 999 index, qp->qpn); 1000 } 1001 1002out: 1003 if (err && link && index != -1) { 1004 if (index < dev->caps.num_mgms) 1005 mlx4_warn(dev, "Got AMGM index %d < %d", 1006 index, dev->caps.num_mgms); 1007 else 1008 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1009 index - dev->caps.num_mgms); 1010 } 1011 mutex_unlock(&priv->mcg_table.mutex); 1012 1013 mlx4_free_cmd_mailbox(dev, mailbox); 1014 return err; 1015} 1016 1017int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1018 enum mlx4_protocol prot, enum mlx4_steer_type steer) 1019{ 1020 struct mlx4_priv *priv = mlx4_priv(dev); 1021 struct mlx4_cmd_mailbox *mailbox; 1022 struct mlx4_mgm *mgm; 1023 u32 members_count; 1024 int prev, index; 1025 int i, loc = -1; 1026 int err; 1027 u8 port = gid[5]; 1028 bool removed_entry = false; 1029 1030 mailbox = mlx4_alloc_cmd_mailbox(dev); 1031 if (IS_ERR(mailbox)) 1032 return PTR_ERR(mailbox); 1033 mgm = mailbox->buf; 1034 1035 mutex_lock(&priv->mcg_table.mutex); 1036 1037 err = find_entry(dev, port, gid, prot, 1038 mailbox, &prev, &index); 1039 if (err) 1040 goto out; 1041 1042 if (index == -1) { 1043 mlx4_err(dev, "MGID %pI6 not found\n", gid); 1044 err = -EINVAL; 1045 goto out; 1046 } 1047 1048 /* if this pq is also a promisc qp, it shouldn't be removed */ 1049 if (prot == MLX4_PROT_ETH && 1050 check_duplicate_entry(dev, port, steer, index, qp->qpn)) 1051 goto out; 1052 1053 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 1054 for (i = 0; i < members_count; ++i) 1055 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { 1056 loc = i; 1057 break; 1058 } 1059 1060 if (loc == -1) { 1061 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); 1062 err = -EINVAL; 1063 goto out; 1064 } 1065 1066 /* copy the last QP in this MGM over removed QP */ 1067 mgm->qp[loc] = mgm->qp[members_count - 1]; 1068 mgm->qp[members_count - 1] = 0; 1069 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); 1070 1071 if (prot == MLX4_PROT_ETH) 1072 removed_entry = can_remove_steering_entry(dev, port, steer, 1073 index, qp->qpn); 1074 if (members_count && (prot != MLX4_PROT_ETH || !removed_entry)) { 1075 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1076 goto out; 1077 } 1078 1079 /* We are going to delete the entry, members count should be 0 */ 1080 mgm->members_count = cpu_to_be32((u32) prot << 30); 1081 1082 if (prev == -1) { 1083 /* Remove entry from MGM */ 1084 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1085 if (amgm_index) { 1086 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); 1087 if (err) 1088 goto out; 1089 } else 1090 memset(mgm->gid, 0, 16); 1091 1092 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1093 if (err) 1094 goto out; 1095 1096 if (amgm_index) { 1097 if (amgm_index < dev->caps.num_mgms) 1098 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", 1099 index, amgm_index, dev->caps.num_mgms); 1100 else 1101 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1102 amgm_index - dev->caps.num_mgms); 1103 } 1104 } else { 1105 /* Remove entry from AMGM */ 1106 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1107 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1108 if (err) 1109 goto out; 1110 1111 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 1112 1113 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1114 if (err) 1115 goto out; 1116 1117 if (index < dev->caps.num_mgms) 1118 mlx4_warn(dev, "entry %d had next AMGM index %d < %d", 1119 prev, index, dev->caps.num_mgms); 1120 else 1121 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1122 index - dev->caps.num_mgms); 1123 } 1124 1125out: 1126 mutex_unlock(&priv->mcg_table.mutex); 1127 1128 mlx4_free_cmd_mailbox(dev, mailbox); 1129 return err; 1130} 1131 1132static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, 1133 u8 gid[16], u8 attach, u8 block_loopback, 1134 enum mlx4_protocol prot) 1135{ 1136 struct mlx4_cmd_mailbox *mailbox; 1137 int err = 0; 1138 int qpn; 1139 1140 if (!mlx4_is_mfunc(dev)) 1141 return -EBADF; 1142 1143 mailbox = mlx4_alloc_cmd_mailbox(dev); 1144 if (IS_ERR(mailbox)) 1145 return PTR_ERR(mailbox); 1146 1147 memcpy(mailbox->buf, gid, 16); 1148 qpn = qp->qpn; 1149 qpn |= (prot << 28); 1150 if (attach && block_loopback) 1151 qpn |= (1U << 31); 1152 1153 err = mlx4_cmd(dev, mailbox->dma, qpn, attach, 1154 MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, 1155 MLX4_CMD_WRAPPED); 1156 1157 mlx4_free_cmd_mailbox(dev, mailbox); 1158 return err; 1159} 1160 1161int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1162 u8 port, int block_mcast_loopback, 1163 enum mlx4_protocol prot, u64 *reg_id) 1164{ 1165 1166 switch (dev->caps.steering_mode) { 1167 case MLX4_STEERING_MODE_A0: 1168 if (prot == MLX4_PROT_ETH) 1169 return 0; 1170 1171 case MLX4_STEERING_MODE_B0: 1172 if (prot == MLX4_PROT_ETH) 1173 gid[7] |= (MLX4_MC_STEER << 1); 1174 1175 if (mlx4_is_mfunc(dev)) 1176 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1177 block_mcast_loopback, prot); 1178 return mlx4_qp_attach_common(dev, qp, gid, 1179 block_mcast_loopback, prot, 1180 MLX4_MC_STEER); 1181 1182 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 1183 struct mlx4_spec_list spec = { {NULL} }; 1184 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 1185 1186 struct mlx4_net_trans_rule rule = { 1187 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1188 .exclusive = 0, 1189 .promisc_mode = MLX4_FS_REGULAR, 1190 .priority = MLX4_DOMAIN_NIC, 1191 }; 1192 1193 rule.allow_loopback = !block_mcast_loopback; 1194 rule.port = port; 1195 rule.qpn = qp->qpn; 1196 INIT_LIST_HEAD(&rule.list); 1197 1198 switch (prot) { 1199 case MLX4_PROT_ETH: 1200 spec.id = MLX4_NET_TRANS_RULE_ID_ETH; 1201 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN); 1202 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 1203 break; 1204 1205 case MLX4_PROT_IB_IPV6: 1206 spec.id = MLX4_NET_TRANS_RULE_ID_IB; 1207 memcpy(spec.ib.dst_gid, gid, 16); 1208 memset(&spec.ib.dst_gid_msk, 0xff, 16); 1209 break; 1210 default: 1211 return -EINVAL; 1212 } 1213 list_add_tail(&spec.list, &rule.list); 1214 1215 return mlx4_flow_attach(dev, &rule, reg_id); 1216 } 1217 1218 default: 1219 return -EINVAL; 1220 } 1221} 1222EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 1223 1224int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1225 enum mlx4_protocol prot, u64 reg_id) 1226{ 1227 switch (dev->caps.steering_mode) { 1228 case MLX4_STEERING_MODE_A0: 1229 if (prot == MLX4_PROT_ETH) 1230 return 0; 1231 1232 case MLX4_STEERING_MODE_B0: 1233 if (prot == MLX4_PROT_ETH) 1234 gid[7] |= (MLX4_MC_STEER << 1); 1235 1236 if (mlx4_is_mfunc(dev)) 1237 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1238 1239 return mlx4_qp_detach_common(dev, qp, gid, prot, 1240 MLX4_MC_STEER); 1241 1242 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1243 return mlx4_flow_detach(dev, reg_id); 1244 1245 default: 1246 return -EINVAL; 1247 } 1248} 1249EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 1250 1251int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, 1252 u32 qpn, enum mlx4_net_trans_promisc_mode mode) 1253{ 1254 struct mlx4_net_trans_rule rule; 1255 u64 *regid_p; 1256 1257 switch (mode) { 1258 case MLX4_FS_ALL_DEFAULT: 1259 regid_p = &dev->regid_promisc_array[port]; 1260 break; 1261 case MLX4_FS_MC_DEFAULT: 1262 regid_p = &dev->regid_allmulti_array[port]; 1263 break; 1264 default: 1265 return -1; 1266 } 1267 1268 if (*regid_p != 0) 1269 return -1; 1270 1271 rule.promisc_mode = mode; 1272 rule.port = port; 1273 rule.qpn = qpn; 1274 INIT_LIST_HEAD(&rule.list); 1275 mlx4_err(dev, "going promisc on %x\n", port); 1276 1277 return mlx4_flow_attach(dev, &rule, regid_p); 1278} 1279EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add); 1280 1281int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, 1282 enum mlx4_net_trans_promisc_mode mode) 1283{ 1284 int ret; 1285 u64 *regid_p; 1286 1287 switch (mode) { 1288 case MLX4_FS_ALL_DEFAULT: 1289 regid_p = &dev->regid_promisc_array[port]; 1290 break; 1291 case MLX4_FS_MC_DEFAULT: 1292 regid_p = &dev->regid_allmulti_array[port]; 1293 break; 1294 default: 1295 return -1; 1296 } 1297 1298 if (*regid_p == 0) 1299 return -1; 1300 1301 ret = mlx4_flow_detach(dev, *regid_p); 1302 if (ret == 0) 1303 *regid_p = 0; 1304 1305 return ret; 1306} 1307EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove); 1308 1309int mlx4_unicast_attach(struct mlx4_dev *dev, 1310 struct mlx4_qp *qp, u8 gid[16], 1311 int block_mcast_loopback, enum mlx4_protocol prot) 1312{ 1313 if (prot == MLX4_PROT_ETH) 1314 gid[7] |= (MLX4_UC_STEER << 1); 1315 1316 if (mlx4_is_mfunc(dev)) 1317 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1318 block_mcast_loopback, prot); 1319 1320 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, 1321 prot, MLX4_UC_STEER); 1322} 1323EXPORT_SYMBOL_GPL(mlx4_unicast_attach); 1324 1325int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1326 u8 gid[16], enum mlx4_protocol prot) 1327{ 1328 if (prot == MLX4_PROT_ETH) 1329 gid[7] |= (MLX4_UC_STEER << 1); 1330 1331 if (mlx4_is_mfunc(dev)) 1332 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1333 1334 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); 1335} 1336EXPORT_SYMBOL_GPL(mlx4_unicast_detach); 1337 1338int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1339 struct mlx4_vhcr *vhcr, 1340 struct mlx4_cmd_mailbox *inbox, 1341 struct mlx4_cmd_mailbox *outbox, 1342 struct mlx4_cmd_info *cmd) 1343{ 1344 u32 qpn = (u32) vhcr->in_param & 0xffffffff; 1345 u8 port = vhcr->in_param >> 62; 1346 enum mlx4_steer_type steer = vhcr->in_modifier; 1347 1348 /* Promiscuous unicast is not allowed in mfunc */ 1349 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) 1350 return 0; 1351 1352 if (vhcr->op_modifier) 1353 return add_promisc_qp(dev, port, steer, qpn); 1354 else 1355 return remove_promisc_qp(dev, port, steer, qpn); 1356} 1357 1358static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, 1359 enum mlx4_steer_type steer, u8 add, u8 port) 1360{ 1361 return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, 1362 MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, 1363 MLX4_CMD_WRAPPED); 1364} 1365 1366int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1367{ 1368 if (mlx4_is_mfunc(dev)) 1369 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); 1370 1371 return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1372} 1373EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); 1374 1375int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1376{ 1377 if (mlx4_is_mfunc(dev)) 1378 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); 1379 1380 return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1381} 1382EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); 1383 1384int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1385{ 1386 if (mlx4_is_mfunc(dev)) 1387 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); 1388 1389 return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1390} 1391EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); 1392 1393int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1394{ 1395 if (mlx4_is_mfunc(dev)) 1396 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); 1397 1398 return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1399} 1400EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); 1401 1402int mlx4_init_mcg_table(struct mlx4_dev *dev) 1403{ 1404 struct mlx4_priv *priv = mlx4_priv(dev); 1405 int err; 1406 1407 /* No need for mcg_table when fw managed the mcg table*/ 1408 if (dev->caps.steering_mode == 1409 MLX4_STEERING_MODE_DEVICE_MANAGED) 1410 return 0; 1411 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, 1412 dev->caps.num_amgms - 1, 0, 0); 1413 if (err) 1414 return err; 1415 1416 mutex_init(&priv->mcg_table.mutex); 1417 1418 return 0; 1419} 1420 1421void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) 1422{ 1423 if (dev->caps.steering_mode != 1424 MLX4_STEERING_MODE_DEVICE_MANAGED) 1425 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); 1426} 1427