1/*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. 5 * Copyright (c) 2005 Intel Corporation. All rights reserved. 6 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 7 * Copyright (c) 2009 HNR Consulting. All rights reserved. 8 * Copyright (c) 2014 Intel Corporation. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39#include <sys/cdefs.h> 40#define LINUXKPI_PARAM_PREFIX ibcore_ 41#define KBUILD_MODNAME "ibcore" 42 43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44 45#include <linux/dma-mapping.h> 46#include <linux/slab.h> 47#include <linux/module.h> 48#include <rdma/ib_cache.h> 49 50#include "mad_priv.h" 51#include "mad_rmpp.h" 52#include "smi.h" 53#include "opa_smi.h" 54#include "agent.h" 55#include "core_priv.h" 56 57static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; 58static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; 59 60module_param_named(send_queue_size, mad_sendq_size, int, 0444); 61MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); 62module_param_named(recv_queue_size, mad_recvq_size, int, 0444); 63MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); 64 65static struct list_head ib_mad_port_list; 66static u32 ib_mad_client_id = 0; 67 68/* Port list lock */ 69static DEFINE_SPINLOCK(ib_mad_port_list_lock); 70 71/* Forward declarations */ 72static int method_in_use(struct ib_mad_mgmt_method_table **method, 73 struct ib_mad_reg_req *mad_reg_req); 74static void remove_mad_reg_req(struct ib_mad_agent_private *priv); 75static struct ib_mad_agent_private *find_mad_agent( 76 struct ib_mad_port_private *port_priv, 77 const struct ib_mad_hdr *mad); 78static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 79 struct ib_mad_private *mad); 80static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 81static void timeout_sends(struct work_struct *work); 82static void local_completions(struct work_struct *work); 83static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 84 struct ib_mad_agent_private *agent_priv, 85 u8 mgmt_class); 86static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 87 struct ib_mad_agent_private *agent_priv); 88static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, 89 struct ib_wc *wc); 90static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc); 91 92/* 93 * Returns a ib_mad_port_private structure or NULL for a device/port 94 * Assumes ib_mad_port_list_lock is being held 95 */ 96static inline struct ib_mad_port_private * 97__ib_get_mad_port(struct ib_device *device, int port_num) 98{ 99 struct ib_mad_port_private *entry; 100 101 list_for_each_entry(entry, &ib_mad_port_list, port_list) { 102 if (entry->device == device && entry->port_num == port_num) 103 return entry; 104 } 105 return NULL; 106} 107 108/* 109 * Wrapper function to return a ib_mad_port_private structure or NULL 110 * for a device/port 111 */ 112static inline struct ib_mad_port_private * 113ib_get_mad_port(struct ib_device *device, int port_num) 114{ 115 struct ib_mad_port_private *entry; 116 unsigned long flags; 117 118 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 119 entry = __ib_get_mad_port(device, port_num); 120 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 121 122 return entry; 123} 124 125static inline u8 convert_mgmt_class(u8 mgmt_class) 126{ 127 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ 128 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? 129 0 : mgmt_class; 130} 131 132static int get_spl_qp_index(enum ib_qp_type qp_type) 133{ 134 switch (qp_type) 135 { 136 case IB_QPT_SMI: 137 return 0; 138 case IB_QPT_GSI: 139 return 1; 140 default: 141 return -1; 142 } 143} 144 145static int vendor_class_index(u8 mgmt_class) 146{ 147 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; 148} 149 150static int is_vendor_class(u8 mgmt_class) 151{ 152 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || 153 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) 154 return 0; 155 return 1; 156} 157 158static int is_vendor_oui(char *oui) 159{ 160 if (oui[0] || oui[1] || oui[2]) 161 return 1; 162 return 0; 163} 164 165static int is_vendor_method_in_use( 166 struct ib_mad_mgmt_vendor_class *vendor_class, 167 struct ib_mad_reg_req *mad_reg_req) 168{ 169 struct ib_mad_mgmt_method_table *method; 170 int i; 171 172 for (i = 0; i < MAX_MGMT_OUI; i++) { 173 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { 174 method = vendor_class->method_table[i]; 175 if (method) { 176 if (method_in_use(&method, mad_reg_req)) 177 return 1; 178 else 179 break; 180 } 181 } 182 } 183 return 0; 184} 185 186int ib_response_mad(const struct ib_mad_hdr *hdr) 187{ 188 return ((hdr->method & IB_MGMT_METHOD_RESP) || 189 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || 190 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && 191 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); 192} 193EXPORT_SYMBOL(ib_response_mad); 194 195/* 196 * ib_register_mad_agent - Register to send/receive MADs 197 */ 198struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, 199 u8 port_num, 200 enum ib_qp_type qp_type, 201 struct ib_mad_reg_req *mad_reg_req, 202 u8 rmpp_version, 203 ib_mad_send_handler send_handler, 204 ib_mad_recv_handler recv_handler, 205 void *context, 206 u32 registration_flags) 207{ 208 struct ib_mad_port_private *port_priv; 209 struct ib_mad_agent *ret = ERR_PTR(-EINVAL); 210 struct ib_mad_agent_private *mad_agent_priv; 211 struct ib_mad_reg_req *reg_req = NULL; 212 struct ib_mad_mgmt_class_table *class; 213 struct ib_mad_mgmt_vendor_class_table *vendor; 214 struct ib_mad_mgmt_vendor_class *vendor_class; 215 struct ib_mad_mgmt_method_table *method; 216 int ret2, qpn; 217 unsigned long flags; 218 u8 mgmt_class, vclass; 219 220 if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) || 221 (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num))) 222 return ERR_PTR(-EPROTONOSUPPORT); 223 224 /* Validate parameters */ 225 qpn = get_spl_qp_index(qp_type); 226 if (qpn == -1) { 227 dev_notice(&device->dev, 228 "ib_register_mad_agent: invalid QP Type %d\n", 229 qp_type); 230 goto error1; 231 } 232 233 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { 234 dev_notice(&device->dev, 235 "ib_register_mad_agent: invalid RMPP Version %u\n", 236 rmpp_version); 237 goto error1; 238 } 239 240 /* Validate MAD registration request if supplied */ 241 if (mad_reg_req) { 242 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { 243 dev_notice(&device->dev, 244 "ib_register_mad_agent: invalid Class Version %u\n", 245 mad_reg_req->mgmt_class_version); 246 goto error1; 247 } 248 if (!recv_handler) { 249 dev_notice(&device->dev, 250 "ib_register_mad_agent: no recv_handler\n"); 251 goto error1; 252 } 253 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { 254 /* 255 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only 256 * one in this range currently allowed 257 */ 258 if (mad_reg_req->mgmt_class != 259 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 260 dev_notice(&device->dev, 261 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", 262 mad_reg_req->mgmt_class); 263 goto error1; 264 } 265 } else if (mad_reg_req->mgmt_class == 0) { 266 /* 267 * Class 0 is reserved in IBA and is used for 268 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 269 */ 270 dev_notice(&device->dev, 271 "ib_register_mad_agent: Invalid Mgmt Class 0\n"); 272 goto error1; 273 } else if (is_vendor_class(mad_reg_req->mgmt_class)) { 274 /* 275 * If class is in "new" vendor range, 276 * ensure supplied OUI is not zero 277 */ 278 if (!is_vendor_oui(mad_reg_req->oui)) { 279 dev_notice(&device->dev, 280 "ib_register_mad_agent: No OUI specified for class 0x%x\n", 281 mad_reg_req->mgmt_class); 282 goto error1; 283 } 284 } 285 /* Make sure class supplied is consistent with RMPP */ 286 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { 287 if (rmpp_version) { 288 dev_notice(&device->dev, 289 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", 290 mad_reg_req->mgmt_class); 291 goto error1; 292 } 293 } 294 295 /* Make sure class supplied is consistent with QP type */ 296 if (qp_type == IB_QPT_SMI) { 297 if ((mad_reg_req->mgmt_class != 298 IB_MGMT_CLASS_SUBN_LID_ROUTED) && 299 (mad_reg_req->mgmt_class != 300 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 301 dev_notice(&device->dev, 302 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", 303 mad_reg_req->mgmt_class); 304 goto error1; 305 } 306 } else { 307 if ((mad_reg_req->mgmt_class == 308 IB_MGMT_CLASS_SUBN_LID_ROUTED) || 309 (mad_reg_req->mgmt_class == 310 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 311 dev_notice(&device->dev, 312 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", 313 mad_reg_req->mgmt_class); 314 goto error1; 315 } 316 } 317 } else { 318 /* No registration request supplied */ 319 if (!send_handler) 320 goto error1; 321 if (registration_flags & IB_MAD_USER_RMPP) 322 goto error1; 323 } 324 325 /* Validate device and port */ 326 port_priv = ib_get_mad_port(device, port_num); 327 if (!port_priv) { 328 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n"); 329 ret = ERR_PTR(-ENODEV); 330 goto error1; 331 } 332 333 /* Verify the QP requested is supported. For example, Ethernet devices 334 * will not have QP0 */ 335 if (!port_priv->qp_info[qpn].qp) { 336 dev_notice(&device->dev, 337 "ib_register_mad_agent: QP %d not supported\n", qpn); 338 ret = ERR_PTR(-EPROTONOSUPPORT); 339 goto error1; 340 } 341 342 /* Allocate structures */ 343 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); 344 if (!mad_agent_priv) { 345 ret = ERR_PTR(-ENOMEM); 346 goto error1; 347 } 348 349 if (mad_reg_req) { 350 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); 351 if (!reg_req) { 352 ret = ERR_PTR(-ENOMEM); 353 goto error3; 354 } 355 } 356 357 /* Now, fill in the various structures */ 358 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; 359 mad_agent_priv->reg_req = reg_req; 360 mad_agent_priv->agent.rmpp_version = rmpp_version; 361 mad_agent_priv->agent.device = device; 362 mad_agent_priv->agent.recv_handler = recv_handler; 363 mad_agent_priv->agent.send_handler = send_handler; 364 mad_agent_priv->agent.context = context; 365 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; 366 mad_agent_priv->agent.port_num = port_num; 367 mad_agent_priv->agent.flags = registration_flags; 368 spin_lock_init(&mad_agent_priv->lock); 369 INIT_LIST_HEAD(&mad_agent_priv->send_list); 370 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 371 INIT_LIST_HEAD(&mad_agent_priv->done_list); 372 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); 373 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); 374 INIT_LIST_HEAD(&mad_agent_priv->local_list); 375 INIT_WORK(&mad_agent_priv->local_work, local_completions); 376 atomic_set(&mad_agent_priv->refcount, 1); 377 init_completion(&mad_agent_priv->comp); 378 379 spin_lock_irqsave(&port_priv->reg_lock, flags); 380 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; 381 382 /* 383 * Make sure MAD registration (if supplied) 384 * is non overlapping with any existing ones 385 */ 386 if (mad_reg_req) { 387 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); 388 if (!is_vendor_class(mgmt_class)) { 389 class = port_priv->version[mad_reg_req-> 390 mgmt_class_version].class; 391 if (class) { 392 method = class->method_table[mgmt_class]; 393 if (method) { 394 if (method_in_use(&method, 395 mad_reg_req)) 396 goto error4; 397 } 398 } 399 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, 400 mgmt_class); 401 } else { 402 /* "New" vendor class range */ 403 vendor = port_priv->version[mad_reg_req-> 404 mgmt_class_version].vendor; 405 if (vendor) { 406 vclass = vendor_class_index(mgmt_class); 407 vendor_class = vendor->vendor_class[vclass]; 408 if (vendor_class) { 409 if (is_vendor_method_in_use( 410 vendor_class, 411 mad_reg_req)) 412 goto error4; 413 } 414 } 415 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); 416 } 417 if (ret2) { 418 ret = ERR_PTR(ret2); 419 goto error4; 420 } 421 } 422 423 /* Add mad agent into port's agent list */ 424 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); 425 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 426 427 return &mad_agent_priv->agent; 428 429error4: 430 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 431 kfree(reg_req); 432error3: 433 kfree(mad_agent_priv); 434error1: 435 return ret; 436} 437EXPORT_SYMBOL(ib_register_mad_agent); 438 439static inline int is_snooping_sends(int mad_snoop_flags) 440{ 441 return (mad_snoop_flags & 442 (/*IB_MAD_SNOOP_POSTED_SENDS | 443 IB_MAD_SNOOP_RMPP_SENDS |*/ 444 IB_MAD_SNOOP_SEND_COMPLETIONS /*| 445 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/)); 446} 447 448static inline int is_snooping_recvs(int mad_snoop_flags) 449{ 450 return (mad_snoop_flags & 451 (IB_MAD_SNOOP_RECVS /*| 452 IB_MAD_SNOOP_RMPP_RECVS*/)); 453} 454 455static int register_snoop_agent(struct ib_mad_qp_info *qp_info, 456 struct ib_mad_snoop_private *mad_snoop_priv) 457{ 458 struct ib_mad_snoop_private **new_snoop_table; 459 unsigned long flags; 460 int i; 461 462 spin_lock_irqsave(&qp_info->snoop_lock, flags); 463 /* Check for empty slot in array. */ 464 for (i = 0; i < qp_info->snoop_table_size; i++) 465 if (!qp_info->snoop_table[i]) 466 break; 467 468 if (i == qp_info->snoop_table_size) { 469 /* Grow table. */ 470 new_snoop_table = krealloc(qp_info->snoop_table, 471 sizeof mad_snoop_priv * 472 (qp_info->snoop_table_size + 1), 473 GFP_ATOMIC); 474 if (!new_snoop_table) { 475 i = -ENOMEM; 476 goto out; 477 } 478 479 qp_info->snoop_table = new_snoop_table; 480 qp_info->snoop_table_size++; 481 } 482 qp_info->snoop_table[i] = mad_snoop_priv; 483 atomic_inc(&qp_info->snoop_count); 484out: 485 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 486 return i; 487} 488 489struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, 490 u8 port_num, 491 enum ib_qp_type qp_type, 492 int mad_snoop_flags, 493 ib_mad_snoop_handler snoop_handler, 494 ib_mad_recv_handler recv_handler, 495 void *context) 496{ 497 struct ib_mad_port_private *port_priv; 498 struct ib_mad_agent *ret; 499 struct ib_mad_snoop_private *mad_snoop_priv; 500 int qpn; 501 502 /* Validate parameters */ 503 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || 504 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) { 505 ret = ERR_PTR(-EINVAL); 506 goto error1; 507 } 508 qpn = get_spl_qp_index(qp_type); 509 if (qpn == -1) { 510 ret = ERR_PTR(-EINVAL); 511 goto error1; 512 } 513 port_priv = ib_get_mad_port(device, port_num); 514 if (!port_priv) { 515 ret = ERR_PTR(-ENODEV); 516 goto error1; 517 } 518 /* Allocate structures */ 519 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL); 520 if (!mad_snoop_priv) { 521 ret = ERR_PTR(-ENOMEM); 522 goto error1; 523 } 524 525 /* Now, fill in the various structures */ 526 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; 527 mad_snoop_priv->agent.device = device; 528 mad_snoop_priv->agent.recv_handler = recv_handler; 529 mad_snoop_priv->agent.snoop_handler = snoop_handler; 530 mad_snoop_priv->agent.context = context; 531 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; 532 mad_snoop_priv->agent.port_num = port_num; 533 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; 534 init_completion(&mad_snoop_priv->comp); 535 mad_snoop_priv->snoop_index = register_snoop_agent( 536 &port_priv->qp_info[qpn], 537 mad_snoop_priv); 538 if (mad_snoop_priv->snoop_index < 0) { 539 ret = ERR_PTR(mad_snoop_priv->snoop_index); 540 goto error2; 541 } 542 543 atomic_set(&mad_snoop_priv->refcount, 1); 544 return &mad_snoop_priv->agent; 545 546error2: 547 kfree(mad_snoop_priv); 548error1: 549 return ret; 550} 551EXPORT_SYMBOL(ib_register_mad_snoop); 552 553static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 554{ 555 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 556 complete(&mad_agent_priv->comp); 557} 558 559static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) 560{ 561 if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 562 complete(&mad_snoop_priv->comp); 563} 564 565static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 566{ 567 struct ib_mad_port_private *port_priv; 568 unsigned long flags; 569 570 /* Note that we could still be handling received MADs */ 571 572 /* 573 * Canceling all sends results in dropping received response 574 * MADs, preventing us from queuing additional work 575 */ 576 cancel_mads(mad_agent_priv); 577 port_priv = mad_agent_priv->qp_info->port_priv; 578 cancel_delayed_work_sync(&mad_agent_priv->timed_work); 579 580 spin_lock_irqsave(&port_priv->reg_lock, flags); 581 remove_mad_reg_req(mad_agent_priv); 582 list_del(&mad_agent_priv->agent_list); 583 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 584 585 flush_workqueue(port_priv->wq); 586 ib_cancel_rmpp_recvs(mad_agent_priv); 587 588 deref_mad_agent(mad_agent_priv); 589 wait_for_completion(&mad_agent_priv->comp); 590 591 kfree(mad_agent_priv->reg_req); 592 kfree(mad_agent_priv); 593} 594 595static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) 596{ 597 struct ib_mad_qp_info *qp_info; 598 unsigned long flags; 599 600 qp_info = mad_snoop_priv->qp_info; 601 spin_lock_irqsave(&qp_info->snoop_lock, flags); 602 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; 603 atomic_dec(&qp_info->snoop_count); 604 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 605 606 deref_snoop_agent(mad_snoop_priv); 607 wait_for_completion(&mad_snoop_priv->comp); 608 609 kfree(mad_snoop_priv); 610} 611 612/* 613 * ib_unregister_mad_agent - Unregisters a client from using MAD services 614 */ 615int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) 616{ 617 struct ib_mad_agent_private *mad_agent_priv; 618 struct ib_mad_snoop_private *mad_snoop_priv; 619 620 /* If the TID is zero, the agent can only snoop. */ 621 if (mad_agent->hi_tid) { 622 mad_agent_priv = container_of(mad_agent, 623 struct ib_mad_agent_private, 624 agent); 625 unregister_mad_agent(mad_agent_priv); 626 } else { 627 mad_snoop_priv = container_of(mad_agent, 628 struct ib_mad_snoop_private, 629 agent); 630 unregister_mad_snoop(mad_snoop_priv); 631 } 632 return 0; 633} 634EXPORT_SYMBOL(ib_unregister_mad_agent); 635 636static void dequeue_mad(struct ib_mad_list_head *mad_list) 637{ 638 struct ib_mad_queue *mad_queue; 639 unsigned long flags; 640 641 BUG_ON(!mad_list->mad_queue); 642 mad_queue = mad_list->mad_queue; 643 spin_lock_irqsave(&mad_queue->lock, flags); 644 list_del(&mad_list->list); 645 mad_queue->count--; 646 spin_unlock_irqrestore(&mad_queue->lock, flags); 647} 648 649static void snoop_send(struct ib_mad_qp_info *qp_info, 650 struct ib_mad_send_buf *send_buf, 651 struct ib_mad_send_wc *mad_send_wc, 652 int mad_snoop_flags) 653{ 654 struct ib_mad_snoop_private *mad_snoop_priv; 655 unsigned long flags; 656 int i; 657 658 spin_lock_irqsave(&qp_info->snoop_lock, flags); 659 for (i = 0; i < qp_info->snoop_table_size; i++) { 660 mad_snoop_priv = qp_info->snoop_table[i]; 661 if (!mad_snoop_priv || 662 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 663 continue; 664 665 atomic_inc(&mad_snoop_priv->refcount); 666 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 667 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, 668 send_buf, mad_send_wc); 669 deref_snoop_agent(mad_snoop_priv); 670 spin_lock_irqsave(&qp_info->snoop_lock, flags); 671 } 672 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 673} 674 675static void snoop_recv(struct ib_mad_qp_info *qp_info, 676 struct ib_mad_recv_wc *mad_recv_wc, 677 int mad_snoop_flags) 678{ 679 struct ib_mad_snoop_private *mad_snoop_priv; 680 unsigned long flags; 681 int i; 682 683 spin_lock_irqsave(&qp_info->snoop_lock, flags); 684 for (i = 0; i < qp_info->snoop_table_size; i++) { 685 mad_snoop_priv = qp_info->snoop_table[i]; 686 if (!mad_snoop_priv || 687 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 688 continue; 689 690 atomic_inc(&mad_snoop_priv->refcount); 691 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 692 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL, 693 mad_recv_wc); 694 deref_snoop_agent(mad_snoop_priv); 695 spin_lock_irqsave(&qp_info->snoop_lock, flags); 696 } 697 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 698} 699 700static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, 701 u16 pkey_index, u8 port_num, struct ib_wc *wc) 702{ 703 memset(wc, 0, sizeof *wc); 704 wc->wr_cqe = cqe; 705 wc->status = IB_WC_SUCCESS; 706 wc->opcode = IB_WC_RECV; 707 wc->pkey_index = pkey_index; 708 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); 709 wc->src_qp = IB_QP0; 710 wc->qp = qp; 711 wc->slid = slid; 712 wc->sl = 0; 713 wc->dlid_path_bits = 0; 714 wc->port_num = port_num; 715} 716 717static size_t mad_priv_size(const struct ib_mad_private *mp) 718{ 719 return sizeof(struct ib_mad_private) + mp->mad_size; 720} 721 722static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags) 723{ 724 size_t size = sizeof(struct ib_mad_private) + mad_size; 725 struct ib_mad_private *ret = kzalloc(size, flags); 726 727 if (ret) 728 ret->mad_size = mad_size; 729 730 return ret; 731} 732 733static size_t port_mad_size(const struct ib_mad_port_private *port_priv) 734{ 735 return rdma_max_mad_size(port_priv->device, port_priv->port_num); 736} 737 738static size_t mad_priv_dma_size(const struct ib_mad_private *mp) 739{ 740 return sizeof(struct ib_grh) + mp->mad_size; 741} 742 743/* 744 * Return 0 if SMP is to be sent 745 * Return 1 if SMP was consumed locally (whether or not solicited) 746 * Return < 0 if error 747 */ 748static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, 749 struct ib_mad_send_wr_private *mad_send_wr) 750{ 751 int ret = 0; 752 struct ib_smp *smp = mad_send_wr->send_buf.mad; 753 struct opa_smp *opa_smp = (struct opa_smp *)smp; 754 unsigned long flags; 755 struct ib_mad_local_private *local; 756 struct ib_mad_private *mad_priv; 757 struct ib_mad_port_private *port_priv; 758 struct ib_mad_agent_private *recv_mad_agent = NULL; 759 struct ib_device *device = mad_agent_priv->agent.device; 760 u8 port_num; 761 struct ib_wc mad_wc; 762 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; 763 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); 764 u16 out_mad_pkey_index = 0; 765 u16 drslid; 766 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 767 mad_agent_priv->qp_info->port_priv->port_num); 768 769 if (rdma_cap_ib_switch(device) && 770 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 771 port_num = send_wr->port_num; 772 else 773 port_num = mad_agent_priv->agent.port_num; 774 775 /* 776 * Directed route handling starts if the initial LID routed part of 777 * a request or the ending LID routed part of a response is empty. 778 * If we are at the start of the LID routed part, don't update the 779 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. 780 */ 781 if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) { 782 u32 opa_drslid; 783 784 if ((opa_get_smp_direction(opa_smp) 785 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == 786 OPA_LID_PERMISSIVE && 787 opa_smi_handle_dr_smp_send(opa_smp, 788 rdma_cap_ib_switch(device), 789 port_num) == IB_SMI_DISCARD) { 790 ret = -EINVAL; 791 dev_err(&device->dev, "OPA Invalid directed route\n"); 792 goto out; 793 } 794 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); 795 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) && 796 opa_drslid & 0xffff0000) { 797 ret = -EINVAL; 798 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", 799 opa_drslid); 800 goto out; 801 } 802 drslid = (u16)(opa_drslid & 0x0000ffff); 803 804 /* Check to post send on QP or process locally */ 805 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD && 806 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) 807 goto out; 808 } else { 809 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == 810 IB_LID_PERMISSIVE && 811 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == 812 IB_SMI_DISCARD) { 813 ret = -EINVAL; 814 dev_err(&device->dev, "Invalid directed route\n"); 815 goto out; 816 } 817 drslid = be16_to_cpu(smp->dr_slid); 818 819 /* Check to post send on QP or process locally */ 820 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && 821 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) 822 goto out; 823 } 824 825 local = kmalloc(sizeof *local, GFP_ATOMIC); 826 if (!local) { 827 ret = -ENOMEM; 828 goto out; 829 } 830 local->mad_priv = NULL; 831 local->recv_mad_agent = NULL; 832 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); 833 if (!mad_priv) { 834 ret = -ENOMEM; 835 kfree(local); 836 goto out; 837 } 838 839 build_smp_wc(mad_agent_priv->agent.qp, 840 send_wr->wr.wr_cqe, drslid, 841 send_wr->pkey_index, 842 send_wr->port_num, &mad_wc); 843 844 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { 845 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len 846 + mad_send_wr->send_buf.data_len 847 + sizeof(struct ib_grh); 848 } 849 850 /* No GRH for DR SMP */ 851 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, 852 (const struct ib_mad_hdr *)smp, mad_size, 853 (struct ib_mad_hdr *)mad_priv->mad, 854 &mad_size, &out_mad_pkey_index); 855 switch (ret) 856 { 857 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: 858 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && 859 mad_agent_priv->agent.recv_handler) { 860 local->mad_priv = mad_priv; 861 local->recv_mad_agent = mad_agent_priv; 862 /* 863 * Reference MAD agent until receive 864 * side of local completion handled 865 */ 866 atomic_inc(&mad_agent_priv->refcount); 867 } else 868 kfree(mad_priv); 869 break; 870 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: 871 kfree(mad_priv); 872 break; 873 case IB_MAD_RESULT_SUCCESS: 874 /* Treat like an incoming receive MAD */ 875 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, 876 mad_agent_priv->agent.port_num); 877 if (port_priv) { 878 memcpy(mad_priv->mad, smp, mad_priv->mad_size); 879 recv_mad_agent = find_mad_agent(port_priv, 880 (const struct ib_mad_hdr *)mad_priv->mad); 881 } 882 if (!port_priv || !recv_mad_agent) { 883 /* 884 * No receiving agent so drop packet and 885 * generate send completion. 886 */ 887 kfree(mad_priv); 888 break; 889 } 890 local->mad_priv = mad_priv; 891 local->recv_mad_agent = recv_mad_agent; 892 break; 893 default: 894 kfree(mad_priv); 895 kfree(local); 896 ret = -EINVAL; 897 goto out; 898 } 899 900 local->mad_send_wr = mad_send_wr; 901 if (opa) { 902 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index; 903 local->return_wc_byte_len = mad_size; 904 } 905 /* Reference MAD agent until send side of local completion handled */ 906 atomic_inc(&mad_agent_priv->refcount); 907 /* Queue local completion to local list */ 908 spin_lock_irqsave(&mad_agent_priv->lock, flags); 909 list_add_tail(&local->completion_list, &mad_agent_priv->local_list); 910 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 911 queue_work(mad_agent_priv->qp_info->port_priv->wq, 912 &mad_agent_priv->local_work); 913 ret = 1; 914out: 915 return ret; 916} 917 918static int get_pad_size(int hdr_len, int data_len, size_t mad_size) 919{ 920 int seg_size, pad; 921 922 seg_size = mad_size - hdr_len; 923 if (data_len && seg_size) { 924 pad = seg_size - data_len % seg_size; 925 return pad == seg_size ? 0 : pad; 926 } else 927 return seg_size; 928} 929 930static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) 931{ 932 struct ib_rmpp_segment *s, *t; 933 934 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { 935 list_del(&s->list); 936 kfree(s); 937 } 938} 939 940static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, 941 size_t mad_size, gfp_t gfp_mask) 942{ 943 struct ib_mad_send_buf *send_buf = &send_wr->send_buf; 944 struct ib_rmpp_mad *rmpp_mad = send_buf->mad; 945 struct ib_rmpp_segment *seg = NULL; 946 int left, seg_size, pad; 947 948 send_buf->seg_size = mad_size - send_buf->hdr_len; 949 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR; 950 seg_size = send_buf->seg_size; 951 pad = send_wr->pad; 952 953 /* Allocate data segments. */ 954 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { 955 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); 956 if (!seg) { 957 free_send_rmpp_list(send_wr); 958 return -ENOMEM; 959 } 960 seg->num = ++send_buf->seg_count; 961 list_add_tail(&seg->list, &send_wr->rmpp_list); 962 } 963 964 /* Zero any padding */ 965 if (pad) 966 memset(seg->data + seg_size - pad, 0, pad); 967 968 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> 969 agent.rmpp_version; 970 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; 971 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 972 973 send_wr->cur_seg = container_of(send_wr->rmpp_list.next, 974 struct ib_rmpp_segment, list); 975 send_wr->last_ack_seg = send_wr->cur_seg; 976 return 0; 977} 978 979int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) 980{ 981 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); 982} 983EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); 984 985struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, 986 u32 remote_qpn, u16 pkey_index, 987 int rmpp_active, 988 int hdr_len, int data_len, 989 gfp_t gfp_mask, 990 u8 base_version) 991{ 992 struct ib_mad_agent_private *mad_agent_priv; 993 struct ib_mad_send_wr_private *mad_send_wr; 994 int pad, message_size, ret, size; 995 void *buf; 996 size_t mad_size; 997 bool opa; 998 999 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 1000 agent); 1001 1002 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num); 1003 1004 if (opa && base_version == OPA_MGMT_BASE_VERSION) 1005 mad_size = sizeof(struct opa_mad); 1006 else 1007 mad_size = sizeof(struct ib_mad); 1008 1009 pad = get_pad_size(hdr_len, data_len, mad_size); 1010 message_size = hdr_len + data_len + pad; 1011 1012 if (ib_mad_kernel_rmpp_agent(mad_agent)) { 1013 if (!rmpp_active && message_size > mad_size) 1014 return ERR_PTR(-EINVAL); 1015 } else 1016 if (rmpp_active || message_size > mad_size) 1017 return ERR_PTR(-EINVAL); 1018 1019 size = rmpp_active ? hdr_len : mad_size; 1020 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); 1021 if (!buf) 1022 return ERR_PTR(-ENOMEM); 1023 1024 mad_send_wr = (struct ib_mad_send_wr_private *)((char *)buf + size); 1025 INIT_LIST_HEAD(&mad_send_wr->rmpp_list); 1026 mad_send_wr->send_buf.mad = buf; 1027 mad_send_wr->send_buf.hdr_len = hdr_len; 1028 mad_send_wr->send_buf.data_len = data_len; 1029 mad_send_wr->pad = pad; 1030 1031 mad_send_wr->mad_agent_priv = mad_agent_priv; 1032 mad_send_wr->sg_list[0].length = hdr_len; 1033 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; 1034 1035 /* OPA MADs don't have to be the full 2048 bytes */ 1036 if (opa && base_version == OPA_MGMT_BASE_VERSION && 1037 data_len < mad_size - hdr_len) 1038 mad_send_wr->sg_list[1].length = data_len; 1039 else 1040 mad_send_wr->sg_list[1].length = mad_size - hdr_len; 1041 1042 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; 1043 1044 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; 1045 1046 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; 1047 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; 1048 mad_send_wr->send_wr.wr.num_sge = 2; 1049 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND; 1050 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED; 1051 mad_send_wr->send_wr.remote_qpn = remote_qpn; 1052 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY; 1053 mad_send_wr->send_wr.pkey_index = pkey_index; 1054 1055 if (rmpp_active) { 1056 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); 1057 if (ret) { 1058 kfree(buf); 1059 return ERR_PTR(ret); 1060 } 1061 } 1062 1063 mad_send_wr->send_buf.mad_agent = mad_agent; 1064 atomic_inc(&mad_agent_priv->refcount); 1065 return &mad_send_wr->send_buf; 1066} 1067EXPORT_SYMBOL(ib_create_send_mad); 1068 1069int ib_get_mad_data_offset(u8 mgmt_class) 1070{ 1071 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 1072 return IB_MGMT_SA_HDR; 1073 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1074 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1075 (mgmt_class == IB_MGMT_CLASS_BIS)) 1076 return IB_MGMT_DEVICE_HDR; 1077 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1078 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 1079 return IB_MGMT_VENDOR_HDR; 1080 else 1081 return IB_MGMT_MAD_HDR; 1082} 1083EXPORT_SYMBOL(ib_get_mad_data_offset); 1084 1085int ib_is_mad_class_rmpp(u8 mgmt_class) 1086{ 1087 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || 1088 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1089 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1090 (mgmt_class == IB_MGMT_CLASS_BIS) || 1091 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1092 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) 1093 return 1; 1094 return 0; 1095} 1096EXPORT_SYMBOL(ib_is_mad_class_rmpp); 1097 1098void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) 1099{ 1100 struct ib_mad_send_wr_private *mad_send_wr; 1101 struct list_head *list; 1102 1103 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1104 send_buf); 1105 list = &mad_send_wr->cur_seg->list; 1106 1107 if (mad_send_wr->cur_seg->num < seg_num) { 1108 list_for_each_entry(mad_send_wr->cur_seg, list, list) 1109 if (mad_send_wr->cur_seg->num == seg_num) 1110 break; 1111 } else if (mad_send_wr->cur_seg->num > seg_num) { 1112 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) 1113 if (mad_send_wr->cur_seg->num == seg_num) 1114 break; 1115 } 1116 return mad_send_wr->cur_seg->data; 1117} 1118EXPORT_SYMBOL(ib_get_rmpp_segment); 1119 1120static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) 1121{ 1122 if (mad_send_wr->send_buf.seg_count) 1123 return ib_get_rmpp_segment(&mad_send_wr->send_buf, 1124 mad_send_wr->seg_num); 1125 else 1126 return (char *)mad_send_wr->send_buf.mad + 1127 mad_send_wr->send_buf.hdr_len; 1128} 1129 1130void ib_free_send_mad(struct ib_mad_send_buf *send_buf) 1131{ 1132 struct ib_mad_agent_private *mad_agent_priv; 1133 struct ib_mad_send_wr_private *mad_send_wr; 1134 1135 mad_agent_priv = container_of(send_buf->mad_agent, 1136 struct ib_mad_agent_private, agent); 1137 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1138 send_buf); 1139 1140 free_send_rmpp_list(mad_send_wr); 1141 kfree(send_buf->mad); 1142 deref_mad_agent(mad_agent_priv); 1143} 1144EXPORT_SYMBOL(ib_free_send_mad); 1145 1146int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) 1147{ 1148 struct ib_mad_qp_info *qp_info; 1149 struct list_head *list; 1150 const struct ib_send_wr *bad_send_wr; 1151 struct ib_mad_agent *mad_agent; 1152 struct ib_sge *sge; 1153 unsigned long flags; 1154 int ret; 1155 1156 /* Set WR ID to find mad_send_wr upon completion */ 1157 qp_info = mad_send_wr->mad_agent_priv->qp_info; 1158 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; 1159 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; 1160 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; 1161 1162 mad_agent = mad_send_wr->send_buf.mad_agent; 1163 sge = mad_send_wr->sg_list; 1164 sge[0].addr = ib_dma_map_single(mad_agent->device, 1165 mad_send_wr->send_buf.mad, 1166 sge[0].length, 1167 DMA_TO_DEVICE); 1168 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) 1169 return -ENOMEM; 1170 1171 mad_send_wr->header_mapping = sge[0].addr; 1172 1173 sge[1].addr = ib_dma_map_single(mad_agent->device, 1174 ib_get_payload(mad_send_wr), 1175 sge[1].length, 1176 DMA_TO_DEVICE); 1177 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { 1178 ib_dma_unmap_single(mad_agent->device, 1179 mad_send_wr->header_mapping, 1180 sge[0].length, DMA_TO_DEVICE); 1181 return -ENOMEM; 1182 } 1183 mad_send_wr->payload_mapping = sge[1].addr; 1184 1185 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1186 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 1187 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, 1188 &bad_send_wr); 1189 list = &qp_info->send_queue.list; 1190 } else { 1191 ret = 0; 1192 list = &qp_info->overflow_list; 1193 } 1194 1195 if (!ret) { 1196 qp_info->send_queue.count++; 1197 list_add_tail(&mad_send_wr->mad_list.list, list); 1198 } 1199 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 1200 if (ret) { 1201 ib_dma_unmap_single(mad_agent->device, 1202 mad_send_wr->header_mapping, 1203 sge[0].length, DMA_TO_DEVICE); 1204 ib_dma_unmap_single(mad_agent->device, 1205 mad_send_wr->payload_mapping, 1206 sge[1].length, DMA_TO_DEVICE); 1207 } 1208 return ret; 1209} 1210 1211/* 1212 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated 1213 * with the registered client 1214 */ 1215int ib_post_send_mad(struct ib_mad_send_buf *send_buf, 1216 struct ib_mad_send_buf **bad_send_buf) 1217{ 1218 struct ib_mad_agent_private *mad_agent_priv; 1219 struct ib_mad_send_buf *next_send_buf; 1220 struct ib_mad_send_wr_private *mad_send_wr; 1221 unsigned long flags; 1222 int ret = -EINVAL; 1223 1224 /* Walk list of send WRs and post each on send list */ 1225 for (; send_buf; send_buf = next_send_buf) { 1226 1227 mad_send_wr = container_of(send_buf, 1228 struct ib_mad_send_wr_private, 1229 send_buf); 1230 mad_agent_priv = mad_send_wr->mad_agent_priv; 1231 1232 if (!send_buf->mad_agent->send_handler || 1233 (send_buf->timeout_ms && 1234 !send_buf->mad_agent->recv_handler)) { 1235 ret = -EINVAL; 1236 goto error; 1237 } 1238 1239 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { 1240 if (mad_agent_priv->agent.rmpp_version) { 1241 ret = -EINVAL; 1242 goto error; 1243 } 1244 } 1245 1246 /* 1247 * Save pointer to next work request to post in case the 1248 * current one completes, and the user modifies the work 1249 * request associated with the completion 1250 */ 1251 next_send_buf = send_buf->next; 1252 mad_send_wr->send_wr.ah = send_buf->ah; 1253 1254 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == 1255 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 1256 ret = handle_outgoing_dr_smp(mad_agent_priv, 1257 mad_send_wr); 1258 if (ret < 0) /* error */ 1259 goto error; 1260 else if (ret == 1) /* locally consumed */ 1261 continue; 1262 } 1263 1264 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; 1265 /* Timeout will be updated after send completes */ 1266 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); 1267 mad_send_wr->max_retries = send_buf->retries; 1268 mad_send_wr->retries_left = send_buf->retries; 1269 send_buf->retries = 0; 1270 /* Reference for work request to QP + response */ 1271 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); 1272 mad_send_wr->status = IB_WC_SUCCESS; 1273 1274 /* Reference MAD agent until send completes */ 1275 atomic_inc(&mad_agent_priv->refcount); 1276 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1277 list_add_tail(&mad_send_wr->agent_list, 1278 &mad_agent_priv->send_list); 1279 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1280 1281 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 1282 ret = ib_send_rmpp_mad(mad_send_wr); 1283 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) 1284 ret = ib_send_mad(mad_send_wr); 1285 } else 1286 ret = ib_send_mad(mad_send_wr); 1287 if (ret < 0) { 1288 /* Fail send request */ 1289 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1290 list_del(&mad_send_wr->agent_list); 1291 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1292 atomic_dec(&mad_agent_priv->refcount); 1293 goto error; 1294 } 1295 } 1296 return 0; 1297error: 1298 if (bad_send_buf) 1299 *bad_send_buf = send_buf; 1300 return ret; 1301} 1302EXPORT_SYMBOL(ib_post_send_mad); 1303 1304/* 1305 * ib_free_recv_mad - Returns data buffers used to receive 1306 * a MAD to the access layer 1307 */ 1308void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) 1309{ 1310 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; 1311 struct ib_mad_private_header *mad_priv_hdr; 1312 struct ib_mad_private *priv; 1313 struct list_head free_list; 1314 1315 INIT_LIST_HEAD(&free_list); 1316 list_splice_init(&mad_recv_wc->rmpp_list, &free_list); 1317 1318 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, 1319 &free_list, list) { 1320 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, 1321 recv_buf); 1322 mad_priv_hdr = container_of(mad_recv_wc, 1323 struct ib_mad_private_header, 1324 recv_wc); 1325 priv = container_of(mad_priv_hdr, struct ib_mad_private, 1326 header); 1327 kfree(priv); 1328 } 1329} 1330EXPORT_SYMBOL(ib_free_recv_mad); 1331 1332struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, 1333 u8 rmpp_version, 1334 ib_mad_send_handler send_handler, 1335 ib_mad_recv_handler recv_handler, 1336 void *context) 1337{ 1338 return ERR_PTR(-EINVAL); /* XXX: for now */ 1339} 1340EXPORT_SYMBOL(ib_redirect_mad_qp); 1341 1342int ib_process_mad_wc(struct ib_mad_agent *mad_agent, 1343 struct ib_wc *wc) 1344{ 1345 dev_err(&mad_agent->device->dev, 1346 "ib_process_mad_wc() not implemented yet\n"); 1347 return 0; 1348} 1349EXPORT_SYMBOL(ib_process_mad_wc); 1350 1351static int method_in_use(struct ib_mad_mgmt_method_table **method, 1352 struct ib_mad_reg_req *mad_reg_req) 1353{ 1354 int i; 1355 1356 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { 1357 if ((*method)->agent[i]) { 1358 pr_err("Method %d already in use\n", i); 1359 return -EINVAL; 1360 } 1361 } 1362 return 0; 1363} 1364 1365static int allocate_method_table(struct ib_mad_mgmt_method_table **method) 1366{ 1367 /* Allocate management method table */ 1368 *method = kzalloc(sizeof **method, GFP_ATOMIC); 1369 return (*method) ? 0 : (-ENOMEM); 1370} 1371 1372/* 1373 * Check to see if there are any methods still in use 1374 */ 1375static int check_method_table(struct ib_mad_mgmt_method_table *method) 1376{ 1377 int i; 1378 1379 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) 1380 if (method->agent[i]) 1381 return 1; 1382 return 0; 1383} 1384 1385/* 1386 * Check to see if there are any method tables for this class still in use 1387 */ 1388static int check_class_table(struct ib_mad_mgmt_class_table *class) 1389{ 1390 int i; 1391 1392 for (i = 0; i < MAX_MGMT_CLASS; i++) 1393 if (class->method_table[i]) 1394 return 1; 1395 return 0; 1396} 1397 1398static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) 1399{ 1400 int i; 1401 1402 for (i = 0; i < MAX_MGMT_OUI; i++) 1403 if (vendor_class->method_table[i]) 1404 return 1; 1405 return 0; 1406} 1407 1408static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, 1409 const char *oui) 1410{ 1411 int i; 1412 1413 for (i = 0; i < MAX_MGMT_OUI; i++) 1414 /* Is there matching OUI for this vendor class ? */ 1415 if (!memcmp(vendor_class->oui[i], oui, 3)) 1416 return i; 1417 1418 return -1; 1419} 1420 1421static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) 1422{ 1423 int i; 1424 1425 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) 1426 if (vendor->vendor_class[i]) 1427 return 1; 1428 1429 return 0; 1430} 1431 1432static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, 1433 struct ib_mad_agent_private *agent) 1434{ 1435 int i; 1436 1437 /* Remove any methods for this mad agent */ 1438 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) { 1439 if (method->agent[i] == agent) { 1440 method->agent[i] = NULL; 1441 } 1442 } 1443} 1444 1445static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1446 struct ib_mad_agent_private *agent_priv, 1447 u8 mgmt_class) 1448{ 1449 struct ib_mad_port_private *port_priv; 1450 struct ib_mad_mgmt_class_table **class; 1451 struct ib_mad_mgmt_method_table **method; 1452 int i, ret; 1453 1454 port_priv = agent_priv->qp_info->port_priv; 1455 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; 1456 if (!*class) { 1457 /* Allocate management class table for "new" class version */ 1458 *class = kzalloc(sizeof **class, GFP_ATOMIC); 1459 if (!*class) { 1460 ret = -ENOMEM; 1461 goto error1; 1462 } 1463 1464 /* Allocate method table for this management class */ 1465 method = &(*class)->method_table[mgmt_class]; 1466 if ((ret = allocate_method_table(method))) 1467 goto error2; 1468 } else { 1469 method = &(*class)->method_table[mgmt_class]; 1470 if (!*method) { 1471 /* Allocate method table for this management class */ 1472 if ((ret = allocate_method_table(method))) 1473 goto error1; 1474 } 1475 } 1476 1477 /* Now, make sure methods are not already in use */ 1478 if (method_in_use(method, mad_reg_req)) 1479 goto error3; 1480 1481 /* Finally, add in methods being registered */ 1482 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1483 (*method)->agent[i] = agent_priv; 1484 1485 return 0; 1486 1487error3: 1488 /* Remove any methods for this mad agent */ 1489 remove_methods_mad_agent(*method, agent_priv); 1490 /* Now, check to see if there are any methods in use */ 1491 if (!check_method_table(*method)) { 1492 /* If not, release management method table */ 1493 kfree(*method); 1494 *method = NULL; 1495 } 1496 ret = -EINVAL; 1497 goto error1; 1498error2: 1499 kfree(*class); 1500 *class = NULL; 1501error1: 1502 return ret; 1503} 1504 1505static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1506 struct ib_mad_agent_private *agent_priv) 1507{ 1508 struct ib_mad_port_private *port_priv; 1509 struct ib_mad_mgmt_vendor_class_table **vendor_table; 1510 struct ib_mad_mgmt_vendor_class_table *vendor = NULL; 1511 struct ib_mad_mgmt_vendor_class *vendor_class = NULL; 1512 struct ib_mad_mgmt_method_table **method; 1513 int i, ret = -ENOMEM; 1514 u8 vclass; 1515 1516 /* "New" vendor (with OUI) class */ 1517 vclass = vendor_class_index(mad_reg_req->mgmt_class); 1518 port_priv = agent_priv->qp_info->port_priv; 1519 vendor_table = &port_priv->version[ 1520 mad_reg_req->mgmt_class_version].vendor; 1521 if (!*vendor_table) { 1522 /* Allocate mgmt vendor class table for "new" class version */ 1523 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); 1524 if (!vendor) 1525 goto error1; 1526 1527 *vendor_table = vendor; 1528 } 1529 if (!(*vendor_table)->vendor_class[vclass]) { 1530 /* Allocate table for this management vendor class */ 1531 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); 1532 if (!vendor_class) 1533 goto error2; 1534 1535 (*vendor_table)->vendor_class[vclass] = vendor_class; 1536 } 1537 for (i = 0; i < MAX_MGMT_OUI; i++) { 1538 /* Is there matching OUI for this vendor class ? */ 1539 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], 1540 mad_reg_req->oui, 3)) { 1541 method = &(*vendor_table)->vendor_class[ 1542 vclass]->method_table[i]; 1543 if (!*method) 1544 goto error3; 1545 goto check_in_use; 1546 } 1547 } 1548 for (i = 0; i < MAX_MGMT_OUI; i++) { 1549 /* OUI slot available ? */ 1550 if (!is_vendor_oui((*vendor_table)->vendor_class[ 1551 vclass]->oui[i])) { 1552 method = &(*vendor_table)->vendor_class[ 1553 vclass]->method_table[i]; 1554 /* Allocate method table for this OUI */ 1555 if (!*method) { 1556 ret = allocate_method_table(method); 1557 if (ret) 1558 goto error3; 1559 } 1560 memcpy((*vendor_table)->vendor_class[vclass]->oui[i], 1561 mad_reg_req->oui, 3); 1562 goto check_in_use; 1563 } 1564 } 1565 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); 1566 goto error3; 1567 1568check_in_use: 1569 /* Now, make sure methods are not already in use */ 1570 if (method_in_use(method, mad_reg_req)) 1571 goto error4; 1572 1573 /* Finally, add in methods being registered */ 1574 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1575 (*method)->agent[i] = agent_priv; 1576 1577 return 0; 1578 1579error4: 1580 /* Remove any methods for this mad agent */ 1581 remove_methods_mad_agent(*method, agent_priv); 1582 /* Now, check to see if there are any methods in use */ 1583 if (!check_method_table(*method)) { 1584 /* If not, release management method table */ 1585 kfree(*method); 1586 *method = NULL; 1587 } 1588 ret = -EINVAL; 1589error3: 1590 if (vendor_class) { 1591 (*vendor_table)->vendor_class[vclass] = NULL; 1592 kfree(vendor_class); 1593 } 1594error2: 1595 if (vendor) { 1596 *vendor_table = NULL; 1597 kfree(vendor); 1598 } 1599error1: 1600 return ret; 1601} 1602 1603static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) 1604{ 1605 struct ib_mad_port_private *port_priv; 1606 struct ib_mad_mgmt_class_table *class; 1607 struct ib_mad_mgmt_method_table *method; 1608 struct ib_mad_mgmt_vendor_class_table *vendor; 1609 struct ib_mad_mgmt_vendor_class *vendor_class; 1610 int index; 1611 u8 mgmt_class; 1612 1613 /* 1614 * Was MAD registration request supplied 1615 * with original registration ? 1616 */ 1617 if (!agent_priv->reg_req) { 1618 goto out; 1619 } 1620 1621 port_priv = agent_priv->qp_info->port_priv; 1622 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); 1623 class = port_priv->version[ 1624 agent_priv->reg_req->mgmt_class_version].class; 1625 if (!class) 1626 goto vendor_check; 1627 1628 method = class->method_table[mgmt_class]; 1629 if (method) { 1630 /* Remove any methods for this mad agent */ 1631 remove_methods_mad_agent(method, agent_priv); 1632 /* Now, check to see if there are any methods still in use */ 1633 if (!check_method_table(method)) { 1634 /* If not, release management method table */ 1635 kfree(method); 1636 class->method_table[mgmt_class] = NULL; 1637 /* Any management classes left ? */ 1638 if (!check_class_table(class)) { 1639 /* If not, release management class table */ 1640 kfree(class); 1641 port_priv->version[ 1642 agent_priv->reg_req-> 1643 mgmt_class_version].class = NULL; 1644 } 1645 } 1646 } 1647 1648vendor_check: 1649 if (!is_vendor_class(mgmt_class)) 1650 goto out; 1651 1652 /* normalize mgmt_class to vendor range 2 */ 1653 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); 1654 vendor = port_priv->version[ 1655 agent_priv->reg_req->mgmt_class_version].vendor; 1656 1657 if (!vendor) 1658 goto out; 1659 1660 vendor_class = vendor->vendor_class[mgmt_class]; 1661 if (vendor_class) { 1662 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); 1663 if (index < 0) 1664 goto out; 1665 method = vendor_class->method_table[index]; 1666 if (method) { 1667 /* Remove any methods for this mad agent */ 1668 remove_methods_mad_agent(method, agent_priv); 1669 /* 1670 * Now, check to see if there are 1671 * any methods still in use 1672 */ 1673 if (!check_method_table(method)) { 1674 /* If not, release management method table */ 1675 kfree(method); 1676 vendor_class->method_table[index] = NULL; 1677 memset(vendor_class->oui[index], 0, 3); 1678 /* Any OUIs left ? */ 1679 if (!check_vendor_class(vendor_class)) { 1680 /* If not, release vendor class table */ 1681 kfree(vendor_class); 1682 vendor->vendor_class[mgmt_class] = NULL; 1683 /* Any other vendor classes left ? */ 1684 if (!check_vendor_table(vendor)) { 1685 kfree(vendor); 1686 port_priv->version[ 1687 agent_priv->reg_req-> 1688 mgmt_class_version]. 1689 vendor = NULL; 1690 } 1691 } 1692 } 1693 } 1694 } 1695 1696out: 1697 return; 1698} 1699 1700static struct ib_mad_agent_private * 1701find_mad_agent(struct ib_mad_port_private *port_priv, 1702 const struct ib_mad_hdr *mad_hdr) 1703{ 1704 struct ib_mad_agent_private *mad_agent = NULL; 1705 unsigned long flags; 1706 1707 spin_lock_irqsave(&port_priv->reg_lock, flags); 1708 if (ib_response_mad(mad_hdr)) { 1709 u32 hi_tid; 1710 struct ib_mad_agent_private *entry; 1711 1712 /* 1713 * Routing is based on high 32 bits of transaction ID 1714 * of MAD. 1715 */ 1716 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; 1717 list_for_each_entry(entry, &port_priv->agent_list, agent_list) { 1718 if (entry->agent.hi_tid == hi_tid) { 1719 mad_agent = entry; 1720 break; 1721 } 1722 } 1723 } else { 1724 struct ib_mad_mgmt_class_table *class; 1725 struct ib_mad_mgmt_method_table *method; 1726 struct ib_mad_mgmt_vendor_class_table *vendor; 1727 struct ib_mad_mgmt_vendor_class *vendor_class; 1728 const struct ib_vendor_mad *vendor_mad; 1729 int index; 1730 1731 /* 1732 * Routing is based on version, class, and method 1733 * For "newer" vendor MADs, also based on OUI 1734 */ 1735 if (mad_hdr->class_version >= MAX_MGMT_VERSION) 1736 goto out; 1737 if (!is_vendor_class(mad_hdr->mgmt_class)) { 1738 class = port_priv->version[ 1739 mad_hdr->class_version].class; 1740 if (!class) 1741 goto out; 1742 if (convert_mgmt_class(mad_hdr->mgmt_class) >= 1743 ARRAY_SIZE(class->method_table)) 1744 goto out; 1745 method = class->method_table[convert_mgmt_class( 1746 mad_hdr->mgmt_class)]; 1747 if (method) 1748 mad_agent = method->agent[mad_hdr->method & 1749 ~IB_MGMT_METHOD_RESP]; 1750 } else { 1751 vendor = port_priv->version[ 1752 mad_hdr->class_version].vendor; 1753 if (!vendor) 1754 goto out; 1755 vendor_class = vendor->vendor_class[vendor_class_index( 1756 mad_hdr->mgmt_class)]; 1757 if (!vendor_class) 1758 goto out; 1759 /* Find matching OUI */ 1760 vendor_mad = (const struct ib_vendor_mad *)mad_hdr; 1761 index = find_vendor_oui(vendor_class, vendor_mad->oui); 1762 if (index == -1) 1763 goto out; 1764 method = vendor_class->method_table[index]; 1765 if (method) { 1766 mad_agent = method->agent[mad_hdr->method & 1767 ~IB_MGMT_METHOD_RESP]; 1768 } 1769 } 1770 } 1771 1772 if (mad_agent) { 1773 if (mad_agent->agent.recv_handler) 1774 atomic_inc(&mad_agent->refcount); 1775 else { 1776 dev_notice(&port_priv->device->dev, 1777 "No receive handler for client %p on port %d\n", 1778 &mad_agent->agent, port_priv->port_num); 1779 mad_agent = NULL; 1780 } 1781 } 1782out: 1783 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 1784 1785 return mad_agent; 1786} 1787 1788static int validate_mad(const struct ib_mad_hdr *mad_hdr, 1789 const struct ib_mad_qp_info *qp_info, 1790 bool opa) 1791{ 1792 int valid = 0; 1793 u32 qp_num = qp_info->qp->qp_num; 1794 1795 /* Make sure MAD base version is understood */ 1796 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION && 1797 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) { 1798 pr_err("MAD received with unsupported base version %d %s\n", 1799 mad_hdr->base_version, opa ? "(opa)" : ""); 1800 goto out; 1801 } 1802 1803 /* Filter SMI packets sent to other than QP0 */ 1804 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || 1805 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 1806 if (qp_num == 0) 1807 valid = 1; 1808 } else { 1809 /* CM attributes other than ClassPortInfo only use Send method */ 1810 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && 1811 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && 1812 (mad_hdr->method != IB_MGMT_METHOD_SEND)) 1813 goto out; 1814 /* Filter GSI packets sent to QP0 */ 1815 if (qp_num != 0) 1816 valid = 1; 1817 } 1818 1819out: 1820 return valid; 1821} 1822 1823static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, 1824 const struct ib_mad_hdr *mad_hdr) 1825{ 1826 const struct ib_rmpp_mad *rmpp_mad; 1827 1828 rmpp_mad = (const struct ib_rmpp_mad *)mad_hdr; 1829 return !mad_agent_priv->agent.rmpp_version || 1830 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || 1831 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 1832 IB_MGMT_RMPP_FLAG_ACTIVE) || 1833 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); 1834} 1835 1836static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, 1837 const struct ib_mad_recv_wc *rwc) 1838{ 1839 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == 1840 rwc->recv_buf.mad->mad_hdr.mgmt_class; 1841} 1842 1843static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, 1844 const struct ib_mad_send_wr_private *wr, 1845 const struct ib_mad_recv_wc *rwc ) 1846{ 1847 struct ib_ah_attr attr; 1848 u8 send_resp, rcv_resp; 1849 union ib_gid sgid; 1850 struct ib_device *device = mad_agent_priv->agent.device; 1851 u8 port_num = mad_agent_priv->agent.port_num; 1852 u8 lmc; 1853 1854 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); 1855 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); 1856 1857 if (send_resp == rcv_resp) 1858 /* both requests, or both responses. GIDs different */ 1859 return 0; 1860 1861 if (ib_query_ah(wr->send_buf.ah, &attr)) 1862 /* Assume not equal, to avoid false positives. */ 1863 return 0; 1864 1865 if (!!(attr.ah_flags & IB_AH_GRH) != 1866 !!(rwc->wc->wc_flags & IB_WC_GRH)) 1867 /* one has GID, other does not. Assume different */ 1868 return 0; 1869 1870 if (!send_resp && rcv_resp) { 1871 /* is request/response. */ 1872 if (!(attr.ah_flags & IB_AH_GRH)) { 1873 if (ib_get_cached_lmc(device, port_num, &lmc)) 1874 return 0; 1875 return (!lmc || !((attr.src_path_bits ^ 1876 rwc->wc->dlid_path_bits) & 1877 ((1 << lmc) - 1))); 1878 } else { 1879 if (ib_get_cached_gid(device, port_num, 1880 attr.grh.sgid_index, &sgid, NULL)) 1881 return 0; 1882 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, 1883 16); 1884 } 1885 } 1886 1887 if (!(attr.ah_flags & IB_AH_GRH)) 1888 return attr.dlid == rwc->wc->slid; 1889 else 1890 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw, 1891 16); 1892} 1893 1894static inline int is_direct(u8 class) 1895{ 1896 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); 1897} 1898 1899struct ib_mad_send_wr_private* 1900ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, 1901 const struct ib_mad_recv_wc *wc) 1902{ 1903 struct ib_mad_send_wr_private *wr; 1904 const struct ib_mad_hdr *mad_hdr; 1905 1906 mad_hdr = &wc->recv_buf.mad->mad_hdr; 1907 1908 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { 1909 if ((wr->tid == mad_hdr->tid) && 1910 rcv_has_same_class(wr, wc) && 1911 /* 1912 * Don't check GID for direct routed MADs. 1913 * These might have permissive LIDs. 1914 */ 1915 (is_direct(mad_hdr->mgmt_class) || 1916 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1917 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1918 } 1919 1920 /* 1921 * It's possible to receive the response before we've 1922 * been notified that the send has completed 1923 */ 1924 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { 1925 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && 1926 wr->tid == mad_hdr->tid && 1927 wr->timeout && 1928 rcv_has_same_class(wr, wc) && 1929 /* 1930 * Don't check GID for direct routed MADs. 1931 * These might have permissive LIDs. 1932 */ 1933 (is_direct(mad_hdr->mgmt_class) || 1934 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1935 /* Verify request has not been canceled */ 1936 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1937 } 1938 return NULL; 1939} 1940 1941void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) 1942{ 1943 mad_send_wr->timeout = 0; 1944 if (mad_send_wr->refcount == 1) 1945 list_move_tail(&mad_send_wr->agent_list, 1946 &mad_send_wr->mad_agent_priv->done_list); 1947} 1948 1949static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, 1950 struct ib_mad_recv_wc *mad_recv_wc) 1951{ 1952 struct ib_mad_send_wr_private *mad_send_wr; 1953 struct ib_mad_send_wc mad_send_wc; 1954 unsigned long flags; 1955 1956 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1957 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); 1958 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 1959 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, 1960 mad_recv_wc); 1961 if (!mad_recv_wc) { 1962 deref_mad_agent(mad_agent_priv); 1963 return; 1964 } 1965 } 1966 1967 /* Complete corresponding request */ 1968 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { 1969 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1970 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); 1971 if (!mad_send_wr) { 1972 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1973 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) 1974 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) 1975 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) 1976 & IB_MGMT_RMPP_FLAG_ACTIVE)) { 1977 /* user rmpp is in effect 1978 * and this is an active RMPP MAD 1979 */ 1980 mad_agent_priv->agent.recv_handler( 1981 &mad_agent_priv->agent, NULL, 1982 mad_recv_wc); 1983 atomic_dec(&mad_agent_priv->refcount); 1984 } else { 1985 /* not user rmpp, revert to normal behavior and 1986 * drop the mad */ 1987 ib_free_recv_mad(mad_recv_wc); 1988 deref_mad_agent(mad_agent_priv); 1989 return; 1990 } 1991 } else { 1992 ib_mark_mad_done(mad_send_wr); 1993 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1994 1995 /* Defined behavior is to complete response before request */ 1996 mad_agent_priv->agent.recv_handler( 1997 &mad_agent_priv->agent, 1998 &mad_send_wr->send_buf, 1999 mad_recv_wc); 2000 atomic_dec(&mad_agent_priv->refcount); 2001 2002 mad_send_wc.status = IB_WC_SUCCESS; 2003 mad_send_wc.vendor_err = 0; 2004 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2005 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2006 } 2007 } else { 2008 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL, 2009 mad_recv_wc); 2010 deref_mad_agent(mad_agent_priv); 2011 } 2012} 2013 2014static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, 2015 const struct ib_mad_qp_info *qp_info, 2016 const struct ib_wc *wc, 2017 int port_num, 2018 struct ib_mad_private *recv, 2019 struct ib_mad_private *response) 2020{ 2021 enum smi_forward_action retsmi; 2022 struct ib_smp *smp = (struct ib_smp *)recv->mad; 2023 2024 if (smi_handle_dr_smp_recv(smp, 2025 rdma_cap_ib_switch(port_priv->device), 2026 port_num, 2027 port_priv->device->phys_port_cnt) == 2028 IB_SMI_DISCARD) 2029 return IB_SMI_DISCARD; 2030 2031 retsmi = smi_check_forward_dr_smp(smp); 2032 if (retsmi == IB_SMI_LOCAL) 2033 return IB_SMI_HANDLE; 2034 2035 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2036 if (smi_handle_dr_smp_send(smp, 2037 rdma_cap_ib_switch(port_priv->device), 2038 port_num) == IB_SMI_DISCARD) 2039 return IB_SMI_DISCARD; 2040 2041 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) 2042 return IB_SMI_DISCARD; 2043 } else if (rdma_cap_ib_switch(port_priv->device)) { 2044 /* forward case for switches */ 2045 memcpy(response, recv, mad_priv_size(response)); 2046 response->header.recv_wc.wc = &response->header.wc; 2047 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2048 response->header.recv_wc.recv_buf.grh = &response->grh; 2049 2050 agent_send_response((const struct ib_mad_hdr *)response->mad, 2051 &response->grh, wc, 2052 port_priv->device, 2053 smi_get_fwd_port(smp), 2054 qp_info->qp->qp_num, 2055 response->mad_size, 2056 false); 2057 2058 return IB_SMI_DISCARD; 2059 } 2060 return IB_SMI_HANDLE; 2061} 2062 2063static bool generate_unmatched_resp(const struct ib_mad_private *recv, 2064 struct ib_mad_private *response, 2065 size_t *resp_len, bool opa) 2066{ 2067 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad; 2068 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad; 2069 2070 if (recv_hdr->method == IB_MGMT_METHOD_GET || 2071 recv_hdr->method == IB_MGMT_METHOD_SET) { 2072 memcpy(response, recv, mad_priv_size(response)); 2073 response->header.recv_wc.wc = &response->header.wc; 2074 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2075 response->header.recv_wc.recv_buf.grh = &response->grh; 2076 resp_hdr->method = IB_MGMT_METHOD_GET_RESP; 2077 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); 2078 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2079 resp_hdr->status |= IB_SMP_DIRECTION; 2080 2081 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) { 2082 if (recv_hdr->mgmt_class == 2083 IB_MGMT_CLASS_SUBN_LID_ROUTED || 2084 recv_hdr->mgmt_class == 2085 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2086 *resp_len = opa_get_smp_header_size( 2087 (const struct opa_smp *)recv->mad); 2088 else 2089 *resp_len = sizeof(struct ib_mad_hdr); 2090 } 2091 2092 return true; 2093 } else { 2094 return false; 2095 } 2096} 2097 2098static enum smi_action 2099handle_opa_smi(struct ib_mad_port_private *port_priv, 2100 struct ib_mad_qp_info *qp_info, 2101 struct ib_wc *wc, 2102 int port_num, 2103 struct ib_mad_private *recv, 2104 struct ib_mad_private *response) 2105{ 2106 enum smi_forward_action retsmi; 2107 struct opa_smp *smp = (struct opa_smp *)recv->mad; 2108 2109 if (opa_smi_handle_dr_smp_recv(smp, 2110 rdma_cap_ib_switch(port_priv->device), 2111 port_num, 2112 port_priv->device->phys_port_cnt) == 2113 IB_SMI_DISCARD) 2114 return IB_SMI_DISCARD; 2115 2116 retsmi = opa_smi_check_forward_dr_smp(smp); 2117 if (retsmi == IB_SMI_LOCAL) 2118 return IB_SMI_HANDLE; 2119 2120 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2121 if (opa_smi_handle_dr_smp_send(smp, 2122 rdma_cap_ib_switch(port_priv->device), 2123 port_num) == IB_SMI_DISCARD) 2124 return IB_SMI_DISCARD; 2125 2126 if (opa_smi_check_local_smp(smp, port_priv->device) == 2127 IB_SMI_DISCARD) 2128 return IB_SMI_DISCARD; 2129 2130 } else if (rdma_cap_ib_switch(port_priv->device)) { 2131 /* forward case for switches */ 2132 memcpy(response, recv, mad_priv_size(response)); 2133 response->header.recv_wc.wc = &response->header.wc; 2134 response->header.recv_wc.recv_buf.opa_mad = 2135 (struct opa_mad *)response->mad; 2136 response->header.recv_wc.recv_buf.grh = &response->grh; 2137 2138 agent_send_response((const struct ib_mad_hdr *)response->mad, 2139 &response->grh, wc, 2140 port_priv->device, 2141 opa_smi_get_fwd_port(smp), 2142 qp_info->qp->qp_num, 2143 recv->header.wc.byte_len, 2144 true); 2145 2146 return IB_SMI_DISCARD; 2147 } 2148 2149 return IB_SMI_HANDLE; 2150} 2151 2152static enum smi_action 2153handle_smi(struct ib_mad_port_private *port_priv, 2154 struct ib_mad_qp_info *qp_info, 2155 struct ib_wc *wc, 2156 int port_num, 2157 struct ib_mad_private *recv, 2158 struct ib_mad_private *response, 2159 bool opa) 2160{ 2161 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; 2162 2163 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && 2164 mad_hdr->class_version == OPA_SMI_CLASS_VERSION) 2165 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, 2166 response); 2167 2168 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); 2169} 2170 2171static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc) 2172{ 2173 struct ib_mad_port_private *port_priv = cq->cq_context; 2174 struct ib_mad_list_head *mad_list = 2175 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2176 struct ib_mad_qp_info *qp_info; 2177 struct ib_mad_private_header *mad_priv_hdr; 2178 struct ib_mad_private *recv, *response = NULL; 2179 struct ib_mad_agent_private *mad_agent; 2180 int port_num; 2181 int ret = IB_MAD_RESULT_SUCCESS; 2182 size_t mad_size; 2183 u16 resp_mad_pkey_index = 0; 2184 bool opa; 2185 2186 if (list_empty_careful(&port_priv->port_list)) 2187 return; 2188 2189 if (wc->status != IB_WC_SUCCESS) { 2190 /* 2191 * Receive errors indicate that the QP has entered the error 2192 * state - error handling/shutdown code will cleanup 2193 */ 2194 return; 2195 } 2196 2197 qp_info = mad_list->mad_queue->qp_info; 2198 dequeue_mad(mad_list); 2199 2200 opa = rdma_cap_opa_mad(qp_info->port_priv->device, 2201 qp_info->port_priv->port_num); 2202 2203 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, 2204 mad_list); 2205 recv = container_of(mad_priv_hdr, struct ib_mad_private, header); 2206 ib_dma_unmap_single(port_priv->device, 2207 recv->header.mapping, 2208 mad_priv_dma_size(recv), 2209 DMA_FROM_DEVICE); 2210 2211 /* Setup MAD receive work completion from "normal" work completion */ 2212 recv->header.wc = *wc; 2213 recv->header.recv_wc.wc = &recv->header.wc; 2214 2215 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) { 2216 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh); 2217 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2218 } else { 2219 recv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2220 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2221 } 2222 2223 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; 2224 recv->header.recv_wc.recv_buf.grh = &recv->grh; 2225 2226 if (atomic_read(&qp_info->snoop_count)) 2227 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); 2228 2229 /* Validate MAD */ 2230 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) 2231 goto out; 2232 2233 mad_size = recv->mad_size; 2234 response = alloc_mad_private(mad_size, GFP_KERNEL); 2235 if (!response) 2236 goto out; 2237 2238 if (rdma_cap_ib_switch(port_priv->device)) 2239 port_num = wc->port_num; 2240 else 2241 port_num = port_priv->port_num; 2242 2243 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class == 2244 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 2245 if (handle_smi(port_priv, qp_info, wc, port_num, recv, 2246 response, opa) 2247 == IB_SMI_DISCARD) 2248 goto out; 2249 } 2250 2251 /* Give driver "right of first refusal" on incoming MAD */ 2252 if (port_priv->device->process_mad) { 2253 ret = port_priv->device->process_mad(port_priv->device, 0, 2254 port_priv->port_num, 2255 wc, &recv->grh, 2256 (const struct ib_mad_hdr *)recv->mad, 2257 recv->mad_size, 2258 (struct ib_mad_hdr *)response->mad, 2259 &mad_size, &resp_mad_pkey_index); 2260 2261 if (opa) 2262 wc->pkey_index = resp_mad_pkey_index; 2263 2264 if (ret & IB_MAD_RESULT_SUCCESS) { 2265 if (ret & IB_MAD_RESULT_CONSUMED) 2266 goto out; 2267 if (ret & IB_MAD_RESULT_REPLY) { 2268 agent_send_response((const struct ib_mad_hdr *)response->mad, 2269 &recv->grh, wc, 2270 port_priv->device, 2271 port_num, 2272 qp_info->qp->qp_num, 2273 mad_size, opa); 2274 goto out; 2275 } 2276 } 2277 } 2278 2279 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); 2280 if (mad_agent) { 2281 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); 2282 /* 2283 * recv is freed up in error cases in ib_mad_complete_recv 2284 * or via recv_handler in ib_mad_complete_recv() 2285 */ 2286 recv = NULL; 2287 } else if ((ret & IB_MAD_RESULT_SUCCESS) && 2288 generate_unmatched_resp(recv, response, &mad_size, opa)) { 2289 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, 2290 port_priv->device, port_num, 2291 qp_info->qp->qp_num, mad_size, opa); 2292 } 2293 2294out: 2295 /* Post another receive request for this QP */ 2296 if (response) { 2297 ib_mad_post_receive_mads(qp_info, response); 2298 kfree(recv); 2299 } else 2300 ib_mad_post_receive_mads(qp_info, recv); 2301} 2302 2303static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) 2304{ 2305 struct ib_mad_send_wr_private *mad_send_wr; 2306 unsigned long delay; 2307 2308 if (list_empty(&mad_agent_priv->wait_list)) { 2309 cancel_delayed_work(&mad_agent_priv->timed_work); 2310 } else { 2311 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2312 struct ib_mad_send_wr_private, 2313 agent_list); 2314 2315 if (time_after(mad_agent_priv->timeout, 2316 mad_send_wr->timeout)) { 2317 mad_agent_priv->timeout = mad_send_wr->timeout; 2318 delay = mad_send_wr->timeout - jiffies; 2319 if ((long)delay <= 0) 2320 delay = 1; 2321 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2322 &mad_agent_priv->timed_work, delay); 2323 } 2324 } 2325} 2326 2327static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) 2328{ 2329 struct ib_mad_agent_private *mad_agent_priv; 2330 struct ib_mad_send_wr_private *temp_mad_send_wr; 2331 struct list_head *list_item; 2332 unsigned long delay; 2333 2334 mad_agent_priv = mad_send_wr->mad_agent_priv; 2335 list_del(&mad_send_wr->agent_list); 2336 2337 delay = mad_send_wr->timeout; 2338 mad_send_wr->timeout += jiffies; 2339 2340 if (delay) { 2341 list_for_each_prev(list_item, &mad_agent_priv->wait_list) { 2342 temp_mad_send_wr = list_entry(list_item, 2343 struct ib_mad_send_wr_private, 2344 agent_list); 2345 if (time_after(mad_send_wr->timeout, 2346 temp_mad_send_wr->timeout)) 2347 break; 2348 } 2349 } 2350 else 2351 list_item = &mad_agent_priv->wait_list; 2352 list_add(&mad_send_wr->agent_list, list_item); 2353 2354 /* Reschedule a work item if we have a shorter timeout */ 2355 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) 2356 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2357 &mad_agent_priv->timed_work, delay); 2358} 2359 2360void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, 2361 int timeout_ms) 2362{ 2363 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2364 wait_for_response(mad_send_wr); 2365} 2366 2367/* 2368 * Process a send work completion 2369 */ 2370void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 2371 struct ib_mad_send_wc *mad_send_wc) 2372{ 2373 struct ib_mad_agent_private *mad_agent_priv; 2374 unsigned long flags; 2375 int ret; 2376 2377 mad_agent_priv = mad_send_wr->mad_agent_priv; 2378 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2379 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 2380 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); 2381 if (ret == IB_RMPP_RESULT_CONSUMED) 2382 goto done; 2383 } else 2384 ret = IB_RMPP_RESULT_UNHANDLED; 2385 2386 if (mad_send_wc->status != IB_WC_SUCCESS && 2387 mad_send_wr->status == IB_WC_SUCCESS) { 2388 mad_send_wr->status = mad_send_wc->status; 2389 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2390 } 2391 2392 if (--mad_send_wr->refcount > 0) { 2393 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && 2394 mad_send_wr->status == IB_WC_SUCCESS) { 2395 wait_for_response(mad_send_wr); 2396 } 2397 goto done; 2398 } 2399 2400 /* Remove send from MAD agent and notify client of completion */ 2401 list_del(&mad_send_wr->agent_list); 2402 adjust_timeout(mad_agent_priv); 2403 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2404 2405 if (mad_send_wr->status != IB_WC_SUCCESS ) 2406 mad_send_wc->status = mad_send_wr->status; 2407 if (ret == IB_RMPP_RESULT_INTERNAL) 2408 ib_rmpp_send_handler(mad_send_wc); 2409 else 2410 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2411 mad_send_wc); 2412 2413 /* Release reference on agent taken when sending */ 2414 deref_mad_agent(mad_agent_priv); 2415 return; 2416done: 2417 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2418} 2419 2420static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc) 2421{ 2422 struct ib_mad_port_private *port_priv = cq->cq_context; 2423 struct ib_mad_list_head *mad_list = 2424 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2425 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; 2426 struct ib_mad_qp_info *qp_info; 2427 struct ib_mad_queue *send_queue; 2428 const struct ib_send_wr *bad_send_wr; 2429 struct ib_mad_send_wc mad_send_wc; 2430 unsigned long flags; 2431 int ret; 2432 2433 if (list_empty_careful(&port_priv->port_list)) 2434 return; 2435 2436 if (wc->status != IB_WC_SUCCESS) { 2437 if (!ib_mad_send_error(port_priv, wc)) 2438 return; 2439 } 2440 2441 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2442 mad_list); 2443 send_queue = mad_list->mad_queue; 2444 qp_info = send_queue->qp_info; 2445 2446retry: 2447 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2448 mad_send_wr->header_mapping, 2449 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 2450 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2451 mad_send_wr->payload_mapping, 2452 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); 2453 queued_send_wr = NULL; 2454 spin_lock_irqsave(&send_queue->lock, flags); 2455 list_del(&mad_list->list); 2456 2457 /* Move queued send to the send queue */ 2458 if (send_queue->count-- > send_queue->max_active) { 2459 mad_list = container_of(qp_info->overflow_list.next, 2460 struct ib_mad_list_head, list); 2461 queued_send_wr = container_of(mad_list, 2462 struct ib_mad_send_wr_private, 2463 mad_list); 2464 list_move_tail(&mad_list->list, &send_queue->list); 2465 } 2466 spin_unlock_irqrestore(&send_queue->lock, flags); 2467 2468 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2469 mad_send_wc.status = wc->status; 2470 mad_send_wc.vendor_err = wc->vendor_err; 2471 if (atomic_read(&qp_info->snoop_count)) 2472 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, 2473 IB_MAD_SNOOP_SEND_COMPLETIONS); 2474 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2475 2476 if (queued_send_wr) { 2477 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, 2478 &bad_send_wr); 2479 if (ret) { 2480 dev_err(&port_priv->device->dev, 2481 "ib_post_send failed: %d\n", ret); 2482 mad_send_wr = queued_send_wr; 2483 wc->status = IB_WC_LOC_QP_OP_ERR; 2484 goto retry; 2485 } 2486 } 2487} 2488 2489static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) 2490{ 2491 struct ib_mad_send_wr_private *mad_send_wr; 2492 struct ib_mad_list_head *mad_list; 2493 unsigned long flags; 2494 2495 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 2496 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { 2497 mad_send_wr = container_of(mad_list, 2498 struct ib_mad_send_wr_private, 2499 mad_list); 2500 mad_send_wr->retry = 1; 2501 } 2502 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 2503} 2504 2505static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, 2506 struct ib_wc *wc) 2507{ 2508 struct ib_mad_list_head *mad_list = 2509 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2510 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; 2511 struct ib_mad_send_wr_private *mad_send_wr; 2512 int ret; 2513 2514 /* 2515 * Send errors will transition the QP to SQE - move 2516 * QP to RTS and repost flushed work requests 2517 */ 2518 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2519 mad_list); 2520 if (wc->status == IB_WC_WR_FLUSH_ERR) { 2521 if (mad_send_wr->retry) { 2522 /* Repost send */ 2523 const struct ib_send_wr *bad_send_wr; 2524 2525 mad_send_wr->retry = 0; 2526 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, 2527 &bad_send_wr); 2528 if (!ret) 2529 return false; 2530 } 2531 } else { 2532 struct ib_qp_attr *attr; 2533 2534 /* Transition QP to RTS and fail offending send */ 2535 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2536 if (attr) { 2537 attr->qp_state = IB_QPS_RTS; 2538 attr->cur_qp_state = IB_QPS_SQE; 2539 ret = ib_modify_qp(qp_info->qp, attr, 2540 IB_QP_STATE | IB_QP_CUR_STATE); 2541 kfree(attr); 2542 if (ret) 2543 dev_err(&port_priv->device->dev, 2544 "%s - ib_modify_qp to RTS: %d\n", 2545 __func__, ret); 2546 else 2547 mark_sends_for_retry(qp_info); 2548 } 2549 } 2550 2551 return true; 2552} 2553 2554static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) 2555{ 2556 unsigned long flags; 2557 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; 2558 struct ib_mad_send_wc mad_send_wc; 2559 struct list_head cancel_list; 2560 2561 INIT_LIST_HEAD(&cancel_list); 2562 2563 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2564 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2565 &mad_agent_priv->send_list, agent_list) { 2566 if (mad_send_wr->status == IB_WC_SUCCESS) { 2567 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2568 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2569 } 2570 } 2571 2572 /* Empty wait list to prevent receives from finding a request */ 2573 list_splice_init(&mad_agent_priv->wait_list, &cancel_list); 2574 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2575 2576 /* Report all cancelled requests */ 2577 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 2578 mad_send_wc.vendor_err = 0; 2579 2580 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2581 &cancel_list, agent_list) { 2582 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2583 list_del(&mad_send_wr->agent_list); 2584 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2585 &mad_send_wc); 2586 atomic_dec(&mad_agent_priv->refcount); 2587 } 2588} 2589 2590static struct ib_mad_send_wr_private* 2591find_send_wr(struct ib_mad_agent_private *mad_agent_priv, 2592 struct ib_mad_send_buf *send_buf) 2593{ 2594 struct ib_mad_send_wr_private *mad_send_wr; 2595 2596 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, 2597 agent_list) { 2598 if (&mad_send_wr->send_buf == send_buf) 2599 return mad_send_wr; 2600 } 2601 2602 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 2603 agent_list) { 2604 if (is_rmpp_data_mad(mad_agent_priv, 2605 mad_send_wr->send_buf.mad) && 2606 &mad_send_wr->send_buf == send_buf) 2607 return mad_send_wr; 2608 } 2609 return NULL; 2610} 2611 2612int ib_modify_mad(struct ib_mad_agent *mad_agent, 2613 struct ib_mad_send_buf *send_buf, u32 timeout_ms) 2614{ 2615 struct ib_mad_agent_private *mad_agent_priv; 2616 struct ib_mad_send_wr_private *mad_send_wr; 2617 unsigned long flags; 2618 int active; 2619 2620 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 2621 agent); 2622 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2623 mad_send_wr = find_send_wr(mad_agent_priv, send_buf); 2624 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { 2625 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2626 return -EINVAL; 2627 } 2628 2629 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); 2630 if (!timeout_ms) { 2631 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2632 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2633 } 2634 2635 mad_send_wr->send_buf.timeout_ms = timeout_ms; 2636 if (active) 2637 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2638 else 2639 ib_reset_mad_timeout(mad_send_wr, timeout_ms); 2640 2641 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2642 return 0; 2643} 2644EXPORT_SYMBOL(ib_modify_mad); 2645 2646void ib_cancel_mad(struct ib_mad_agent *mad_agent, 2647 struct ib_mad_send_buf *send_buf) 2648{ 2649 ib_modify_mad(mad_agent, send_buf, 0); 2650} 2651EXPORT_SYMBOL(ib_cancel_mad); 2652 2653static void local_completions(struct work_struct *work) 2654{ 2655 struct ib_mad_agent_private *mad_agent_priv; 2656 struct ib_mad_local_private *local; 2657 struct ib_mad_agent_private *recv_mad_agent; 2658 unsigned long flags; 2659 int free_mad; 2660 struct ib_wc wc; 2661 struct ib_mad_send_wc mad_send_wc; 2662 bool opa; 2663 2664 mad_agent_priv = 2665 container_of(work, struct ib_mad_agent_private, local_work); 2666 2667 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 2668 mad_agent_priv->qp_info->port_priv->port_num); 2669 2670 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2671 while (!list_empty(&mad_agent_priv->local_list)) { 2672 local = list_entry(mad_agent_priv->local_list.next, 2673 struct ib_mad_local_private, 2674 completion_list); 2675 list_del(&local->completion_list); 2676 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2677 free_mad = 0; 2678 if (local->mad_priv) { 2679 u8 base_version; 2680 recv_mad_agent = local->recv_mad_agent; 2681 if (!recv_mad_agent) { 2682 dev_err(&mad_agent_priv->agent.device->dev, 2683 "No receive MAD agent for local completion\n"); 2684 free_mad = 1; 2685 goto local_send_completion; 2686 } 2687 2688 /* 2689 * Defined behavior is to complete response 2690 * before request 2691 */ 2692 build_smp_wc(recv_mad_agent->agent.qp, 2693 local->mad_send_wr->send_wr.wr.wr_cqe, 2694 be16_to_cpu(IB_LID_PERMISSIVE), 2695 local->mad_send_wr->send_wr.pkey_index, 2696 recv_mad_agent->agent.port_num, &wc); 2697 2698 local->mad_priv->header.recv_wc.wc = &wc; 2699 2700 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version; 2701 if (opa && base_version == OPA_MGMT_BASE_VERSION) { 2702 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len; 2703 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2704 } else { 2705 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2706 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2707 } 2708 2709 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); 2710 list_add(&local->mad_priv->header.recv_wc.recv_buf.list, 2711 &local->mad_priv->header.recv_wc.rmpp_list); 2712 local->mad_priv->header.recv_wc.recv_buf.grh = NULL; 2713 local->mad_priv->header.recv_wc.recv_buf.mad = 2714 (struct ib_mad *)local->mad_priv->mad; 2715 if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) 2716 snoop_recv(recv_mad_agent->qp_info, 2717 &local->mad_priv->header.recv_wc, 2718 IB_MAD_SNOOP_RECVS); 2719 recv_mad_agent->agent.recv_handler( 2720 &recv_mad_agent->agent, 2721 &local->mad_send_wr->send_buf, 2722 &local->mad_priv->header.recv_wc); 2723 spin_lock_irqsave(&recv_mad_agent->lock, flags); 2724 atomic_dec(&recv_mad_agent->refcount); 2725 spin_unlock_irqrestore(&recv_mad_agent->lock, flags); 2726 } 2727 2728local_send_completion: 2729 /* Complete send */ 2730 mad_send_wc.status = IB_WC_SUCCESS; 2731 mad_send_wc.vendor_err = 0; 2732 mad_send_wc.send_buf = &local->mad_send_wr->send_buf; 2733 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) 2734 snoop_send(mad_agent_priv->qp_info, 2735 &local->mad_send_wr->send_buf, 2736 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); 2737 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2738 &mad_send_wc); 2739 2740 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2741 atomic_dec(&mad_agent_priv->refcount); 2742 if (free_mad) 2743 kfree(local->mad_priv); 2744 kfree(local); 2745 } 2746 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2747} 2748 2749static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) 2750{ 2751 int ret; 2752 2753 if (!mad_send_wr->retries_left) 2754 return -ETIMEDOUT; 2755 2756 mad_send_wr->retries_left--; 2757 mad_send_wr->send_buf.retries++; 2758 2759 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); 2760 2761 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { 2762 ret = ib_retry_rmpp(mad_send_wr); 2763 switch (ret) { 2764 case IB_RMPP_RESULT_UNHANDLED: 2765 ret = ib_send_mad(mad_send_wr); 2766 break; 2767 case IB_RMPP_RESULT_CONSUMED: 2768 ret = 0; 2769 break; 2770 default: 2771 ret = -ECOMM; 2772 break; 2773 } 2774 } else 2775 ret = ib_send_mad(mad_send_wr); 2776 2777 if (!ret) { 2778 mad_send_wr->refcount++; 2779 list_add_tail(&mad_send_wr->agent_list, 2780 &mad_send_wr->mad_agent_priv->send_list); 2781 } 2782 return ret; 2783} 2784 2785static void timeout_sends(struct work_struct *work) 2786{ 2787 struct ib_mad_agent_private *mad_agent_priv; 2788 struct ib_mad_send_wr_private *mad_send_wr; 2789 struct ib_mad_send_wc mad_send_wc; 2790 unsigned long flags, delay; 2791 2792 mad_agent_priv = container_of(work, struct ib_mad_agent_private, 2793 timed_work.work); 2794 mad_send_wc.vendor_err = 0; 2795 2796 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2797 while (!list_empty(&mad_agent_priv->wait_list)) { 2798 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2799 struct ib_mad_send_wr_private, 2800 agent_list); 2801 2802 if (time_after(mad_send_wr->timeout, jiffies)) { 2803 delay = mad_send_wr->timeout - jiffies; 2804 if ((long)delay <= 0) 2805 delay = 1; 2806 queue_delayed_work(mad_agent_priv->qp_info-> 2807 port_priv->wq, 2808 &mad_agent_priv->timed_work, delay); 2809 break; 2810 } 2811 2812 list_del(&mad_send_wr->agent_list); 2813 if (mad_send_wr->status == IB_WC_SUCCESS && 2814 !retry_send(mad_send_wr)) 2815 continue; 2816 2817 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2818 2819 if (mad_send_wr->status == IB_WC_SUCCESS) 2820 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; 2821 else 2822 mad_send_wc.status = mad_send_wr->status; 2823 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2824 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2825 &mad_send_wc); 2826 2827 atomic_dec(&mad_agent_priv->refcount); 2828 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2829 } 2830 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2831} 2832 2833/* 2834 * Allocate receive MADs and post receive WRs for them 2835 */ 2836static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 2837 struct ib_mad_private *mad) 2838{ 2839 unsigned long flags; 2840 int post, ret; 2841 struct ib_mad_private *mad_priv; 2842 struct ib_sge sg_list; 2843 struct ib_recv_wr recv_wr; 2844 const struct ib_recv_wr *bad_recv_wr; 2845 struct ib_mad_queue *recv_queue = &qp_info->recv_queue; 2846 2847 /* Initialize common scatter list fields */ 2848 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; 2849 2850 /* Initialize common receive WR fields */ 2851 recv_wr.next = NULL; 2852 recv_wr.sg_list = &sg_list; 2853 recv_wr.num_sge = 1; 2854 2855 do { 2856 /* Allocate and map receive buffer */ 2857 if (mad) { 2858 mad_priv = mad; 2859 mad = NULL; 2860 } else { 2861 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), 2862 GFP_ATOMIC); 2863 if (!mad_priv) { 2864 ret = -ENOMEM; 2865 break; 2866 } 2867 } 2868 sg_list.length = mad_priv_dma_size(mad_priv); 2869 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, 2870 &mad_priv->grh, 2871 mad_priv_dma_size(mad_priv), 2872 DMA_FROM_DEVICE); 2873 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, 2874 sg_list.addr))) { 2875 kfree(mad_priv); 2876 ret = -ENOMEM; 2877 break; 2878 } 2879 mad_priv->header.mapping = sg_list.addr; 2880 mad_priv->header.mad_list.mad_queue = recv_queue; 2881 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; 2882 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; 2883 2884 /* Post receive WR */ 2885 spin_lock_irqsave(&recv_queue->lock, flags); 2886 post = (++recv_queue->count < recv_queue->max_active); 2887 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); 2888 spin_unlock_irqrestore(&recv_queue->lock, flags); 2889 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); 2890 if (ret) { 2891 spin_lock_irqsave(&recv_queue->lock, flags); 2892 list_del(&mad_priv->header.mad_list.list); 2893 recv_queue->count--; 2894 spin_unlock_irqrestore(&recv_queue->lock, flags); 2895 ib_dma_unmap_single(qp_info->port_priv->device, 2896 mad_priv->header.mapping, 2897 mad_priv_dma_size(mad_priv), 2898 DMA_FROM_DEVICE); 2899 kfree(mad_priv); 2900 dev_err(&qp_info->port_priv->device->dev, 2901 "ib_post_recv failed: %d\n", ret); 2902 break; 2903 } 2904 } while (post); 2905 2906 return ret; 2907} 2908 2909/* 2910 * Return all the posted receive MADs 2911 */ 2912static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) 2913{ 2914 struct ib_mad_private_header *mad_priv_hdr; 2915 struct ib_mad_private *recv; 2916 struct ib_mad_list_head *mad_list; 2917 2918 if (!qp_info->qp) 2919 return; 2920 2921 while (!list_empty(&qp_info->recv_queue.list)) { 2922 2923 mad_list = list_entry(qp_info->recv_queue.list.next, 2924 struct ib_mad_list_head, list); 2925 mad_priv_hdr = container_of(mad_list, 2926 struct ib_mad_private_header, 2927 mad_list); 2928 recv = container_of(mad_priv_hdr, struct ib_mad_private, 2929 header); 2930 2931 /* Remove from posted receive MAD list */ 2932 list_del(&mad_list->list); 2933 2934 ib_dma_unmap_single(qp_info->port_priv->device, 2935 recv->header.mapping, 2936 mad_priv_dma_size(recv), 2937 DMA_FROM_DEVICE); 2938 kfree(recv); 2939 } 2940 2941 qp_info->recv_queue.count = 0; 2942} 2943 2944/* 2945 * Start the port 2946 */ 2947static int ib_mad_port_start(struct ib_mad_port_private *port_priv) 2948{ 2949 int ret, i; 2950 struct ib_qp_attr *attr; 2951 struct ib_qp *qp; 2952 u16 pkey_index; 2953 2954 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2955 if (!attr) 2956 return -ENOMEM; 2957 2958 ret = ib_find_pkey(port_priv->device, port_priv->port_num, 2959 IB_DEFAULT_PKEY_FULL, &pkey_index); 2960 if (ret) 2961 pkey_index = 0; 2962 2963 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 2964 qp = port_priv->qp_info[i].qp; 2965 if (!qp) 2966 continue; 2967 2968 /* 2969 * PKey index for QP1 is irrelevant but 2970 * one is needed for the Reset to Init transition 2971 */ 2972 attr->qp_state = IB_QPS_INIT; 2973 attr->pkey_index = pkey_index; 2974 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; 2975 ret = ib_modify_qp(qp, attr, IB_QP_STATE | 2976 IB_QP_PKEY_INDEX | IB_QP_QKEY); 2977 if (ret) { 2978 dev_err(&port_priv->device->dev, 2979 "Couldn't change QP%d state to INIT: %d\n", 2980 i, ret); 2981 goto out; 2982 } 2983 2984 attr->qp_state = IB_QPS_RTR; 2985 ret = ib_modify_qp(qp, attr, IB_QP_STATE); 2986 if (ret) { 2987 dev_err(&port_priv->device->dev, 2988 "Couldn't change QP%d state to RTR: %d\n", 2989 i, ret); 2990 goto out; 2991 } 2992 2993 attr->qp_state = IB_QPS_RTS; 2994 attr->sq_psn = IB_MAD_SEND_Q_PSN; 2995 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); 2996 if (ret) { 2997 dev_err(&port_priv->device->dev, 2998 "Couldn't change QP%d state to RTS: %d\n", 2999 i, ret); 3000 goto out; 3001 } 3002 } 3003 3004 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); 3005 if (ret) { 3006 dev_err(&port_priv->device->dev, 3007 "Failed to request completion notification: %d\n", 3008 ret); 3009 goto out; 3010 } 3011 3012 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 3013 if (!port_priv->qp_info[i].qp) 3014 continue; 3015 3016 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); 3017 if (ret) { 3018 dev_err(&port_priv->device->dev, 3019 "Couldn't post receive WRs\n"); 3020 goto out; 3021 } 3022 } 3023out: 3024 kfree(attr); 3025 return ret; 3026} 3027 3028static void qp_event_handler(struct ib_event *event, void *qp_context) 3029{ 3030 struct ib_mad_qp_info *qp_info = qp_context; 3031 3032 /* It's worse than that! He's dead, Jim! */ 3033 dev_err(&qp_info->port_priv->device->dev, 3034 "Fatal error (%d) on MAD QP (%d)\n", 3035 event->event, qp_info->qp->qp_num); 3036} 3037 3038static void init_mad_queue(struct ib_mad_qp_info *qp_info, 3039 struct ib_mad_queue *mad_queue) 3040{ 3041 mad_queue->qp_info = qp_info; 3042 mad_queue->count = 0; 3043 spin_lock_init(&mad_queue->lock); 3044 INIT_LIST_HEAD(&mad_queue->list); 3045} 3046 3047static void init_mad_qp(struct ib_mad_port_private *port_priv, 3048 struct ib_mad_qp_info *qp_info) 3049{ 3050 qp_info->port_priv = port_priv; 3051 init_mad_queue(qp_info, &qp_info->send_queue); 3052 init_mad_queue(qp_info, &qp_info->recv_queue); 3053 INIT_LIST_HEAD(&qp_info->overflow_list); 3054 spin_lock_init(&qp_info->snoop_lock); 3055 qp_info->snoop_table = NULL; 3056 qp_info->snoop_table_size = 0; 3057 atomic_set(&qp_info->snoop_count, 0); 3058} 3059 3060static int create_mad_qp(struct ib_mad_qp_info *qp_info, 3061 enum ib_qp_type qp_type) 3062{ 3063 struct ib_qp_init_attr qp_init_attr; 3064 int ret; 3065 3066 memset(&qp_init_attr, 0, sizeof qp_init_attr); 3067 qp_init_attr.send_cq = qp_info->port_priv->cq; 3068 qp_init_attr.recv_cq = qp_info->port_priv->cq; 3069 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 3070 qp_init_attr.cap.max_send_wr = mad_sendq_size; 3071 qp_init_attr.cap.max_recv_wr = mad_recvq_size; 3072 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; 3073 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; 3074 qp_init_attr.qp_type = qp_type; 3075 qp_init_attr.port_num = qp_info->port_priv->port_num; 3076 qp_init_attr.qp_context = qp_info; 3077 qp_init_attr.event_handler = qp_event_handler; 3078 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); 3079 if (IS_ERR(qp_info->qp)) { 3080 dev_err(&qp_info->port_priv->device->dev, 3081 "Couldn't create ib_mad QP%d\n", 3082 get_spl_qp_index(qp_type)); 3083 ret = PTR_ERR(qp_info->qp); 3084 goto error; 3085 } 3086 /* Use minimum queue sizes unless the CQ is resized */ 3087 qp_info->send_queue.max_active = mad_sendq_size; 3088 qp_info->recv_queue.max_active = mad_recvq_size; 3089 return 0; 3090 3091error: 3092 return ret; 3093} 3094 3095static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) 3096{ 3097 if (!qp_info->qp) 3098 return; 3099 3100 ib_destroy_qp(qp_info->qp); 3101 kfree(qp_info->snoop_table); 3102} 3103 3104/* 3105 * Open the port 3106 * Create the QP, PD, MR, and CQ if needed 3107 */ 3108static int ib_mad_port_open(struct ib_device *device, 3109 int port_num) 3110{ 3111 int ret, cq_size; 3112 struct ib_mad_port_private *port_priv; 3113 unsigned long flags; 3114 char name[sizeof "ib_mad123"]; 3115 int has_smi; 3116 3117 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE)) 3118 return -EFAULT; 3119 3120 if (WARN_ON(rdma_cap_opa_mad(device, port_num) && 3121 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE)) 3122 return -EFAULT; 3123 3124 /* Create new device info */ 3125 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); 3126 if (!port_priv) 3127 return -ENOMEM; 3128 3129 port_priv->device = device; 3130 port_priv->port_num = port_num; 3131 spin_lock_init(&port_priv->reg_lock); 3132 INIT_LIST_HEAD(&port_priv->agent_list); 3133 init_mad_qp(port_priv, &port_priv->qp_info[0]); 3134 init_mad_qp(port_priv, &port_priv->qp_info[1]); 3135 3136 cq_size = mad_sendq_size + mad_recvq_size; 3137 has_smi = rdma_cap_ib_smi(device, port_num); 3138 if (has_smi) 3139 cq_size *= 2; 3140 3141 port_priv->pd = ib_alloc_pd(device, 0); 3142 if (IS_ERR(port_priv->pd)) { 3143 dev_err(&device->dev, "Couldn't create ib_mad PD\n"); 3144 ret = PTR_ERR(port_priv->pd); 3145 goto error3; 3146 } 3147 3148 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, 3149 IB_POLL_WORKQUEUE); 3150 if (IS_ERR(port_priv->cq)) { 3151 dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); 3152 ret = PTR_ERR(port_priv->cq); 3153 goto error4; 3154 } 3155 3156 if (has_smi) { 3157 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); 3158 if (ret) 3159 goto error6; 3160 } 3161 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); 3162 if (ret) 3163 goto error7; 3164 3165 snprintf(name, sizeof name, "ib_mad%d", port_num); 3166 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); 3167 if (!port_priv->wq) { 3168 ret = -ENOMEM; 3169 goto error8; 3170 } 3171 3172 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3173 list_add_tail(&port_priv->port_list, &ib_mad_port_list); 3174 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3175 3176 ret = ib_mad_port_start(port_priv); 3177 if (ret) { 3178 dev_err(&device->dev, "Couldn't start port\n"); 3179 goto error9; 3180 } 3181 3182 return 0; 3183 3184error9: 3185 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3186 list_del_init(&port_priv->port_list); 3187 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3188 3189 destroy_workqueue(port_priv->wq); 3190error8: 3191 destroy_mad_qp(&port_priv->qp_info[1]); 3192error7: 3193 destroy_mad_qp(&port_priv->qp_info[0]); 3194error6: 3195 ib_free_cq(port_priv->cq); 3196 cleanup_recv_queue(&port_priv->qp_info[1]); 3197 cleanup_recv_queue(&port_priv->qp_info[0]); 3198error4: 3199 ib_dealloc_pd(port_priv->pd); 3200error3: 3201 kfree(port_priv); 3202 3203 return ret; 3204} 3205 3206/* 3207 * Close the port 3208 * If there are no classes using the port, free the port 3209 * resources (CQ, MR, PD, QP) and remove the port's info structure 3210 */ 3211static int ib_mad_port_close(struct ib_device *device, int port_num) 3212{ 3213 struct ib_mad_port_private *port_priv; 3214 unsigned long flags; 3215 3216 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3217 port_priv = __ib_get_mad_port(device, port_num); 3218 if (port_priv == NULL) { 3219 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3220 dev_err(&device->dev, "Port %d not found\n", port_num); 3221 return -ENODEV; 3222 } 3223 list_del_init(&port_priv->port_list); 3224 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3225 3226 destroy_workqueue(port_priv->wq); 3227 destroy_mad_qp(&port_priv->qp_info[1]); 3228 destroy_mad_qp(&port_priv->qp_info[0]); 3229 ib_free_cq(port_priv->cq); 3230 ib_dealloc_pd(port_priv->pd); 3231 cleanup_recv_queue(&port_priv->qp_info[1]); 3232 cleanup_recv_queue(&port_priv->qp_info[0]); 3233 /* XXX: Handle deallocation of MAD registration tables */ 3234 3235 kfree(port_priv); 3236 3237 return 0; 3238} 3239 3240static void ib_mad_init_device(struct ib_device *device) 3241{ 3242 int start, i; 3243 3244 start = rdma_start_port(device); 3245 3246 for (i = start; i <= rdma_end_port(device); i++) { 3247 if (!rdma_cap_ib_mad(device, i)) 3248 continue; 3249 3250 if (ib_mad_port_open(device, i)) { 3251 dev_err(&device->dev, "Couldn't open port %d\n", i); 3252 goto error; 3253 } 3254 if (ib_agent_port_open(device, i)) { 3255 dev_err(&device->dev, 3256 "Couldn't open port %d for agents\n", i); 3257 goto error_agent; 3258 } 3259 } 3260 return; 3261 3262error_agent: 3263 if (ib_mad_port_close(device, i)) 3264 dev_err(&device->dev, "Couldn't close port %d\n", i); 3265 3266error: 3267 while (--i >= start) { 3268 if (!rdma_cap_ib_mad(device, i)) 3269 continue; 3270 3271 if (ib_agent_port_close(device, i)) 3272 dev_err(&device->dev, 3273 "Couldn't close port %d for agents\n", i); 3274 if (ib_mad_port_close(device, i)) 3275 dev_err(&device->dev, "Couldn't close port %d\n", i); 3276 } 3277} 3278 3279static void ib_mad_remove_device(struct ib_device *device, void *client_data) 3280{ 3281 int i; 3282 3283 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 3284 if (!rdma_cap_ib_mad(device, i)) 3285 continue; 3286 3287 if (ib_agent_port_close(device, i)) 3288 dev_err(&device->dev, 3289 "Couldn't close port %d for agents\n", i); 3290 if (ib_mad_port_close(device, i)) 3291 dev_err(&device->dev, "Couldn't close port %d\n", i); 3292 } 3293} 3294 3295static struct ib_client mad_client = { 3296 .name = "mad", 3297 .add = ib_mad_init_device, 3298 .remove = ib_mad_remove_device 3299}; 3300 3301int ib_mad_init(void) 3302{ 3303 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); 3304 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); 3305 3306 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); 3307 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); 3308 3309 INIT_LIST_HEAD(&ib_mad_port_list); 3310 3311 if (ib_register_client(&mad_client)) { 3312 pr_err("Couldn't register ib_mad client\n"); 3313 return -EINVAL; 3314 } 3315 3316 return 0; 3317} 3318 3319void ib_mad_cleanup(void) 3320{ 3321 ib_unregister_client(&mad_client); 3322} 3323