1/* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35#include <linux/mlx4/cmd.h> 36#include <linux/module.h> 37 38#include "fw.h" 39#include "icm.h" 40 41enum { 42 MLX4_COMMAND_INTERFACE_MIN_REV = 2, 43 MLX4_COMMAND_INTERFACE_MAX_REV = 3, 44 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3, 45}; 46 47extern void __buggy_use_of_MLX4_GET(void); 48extern void __buggy_use_of_MLX4_PUT(void); 49 50static bool enable_qos; 51module_param(enable_qos, bool, 0444); 52MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)"); 53 54#define MLX4_GET(dest, source, offset) \ 55 do { \ 56 void *__p = (char *) (source) + (offset); \ 57 switch (sizeof (dest)) { \ 58 case 1: (dest) = *(u8 *) __p; break; \ 59 case 2: (dest) = be16_to_cpup(__p); break; \ 60 case 4: (dest) = be32_to_cpup(__p); break; \ 61 case 8: (dest) = be64_to_cpup(__p); break; \ 62 default: __buggy_use_of_MLX4_GET(); \ 63 } \ 64 } while (0) 65 66#define MLX4_PUT(dest, source, offset) \ 67 do { \ 68 void *__d = ((char *) (dest) + (offset)); \ 69 switch (sizeof(source)) { \ 70 case 1: *(u8 *) __d = (source); break; \ 71 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ 72 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ 73 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ 74 default: __buggy_use_of_MLX4_PUT(); \ 75 } \ 76 } while (0) 77 78static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) 79{ 80 static const char *fname[] = { 81 [ 0] = "RC transport", 82 [ 1] = "UC transport", 83 [ 2] = "UD transport", 84 [ 3] = "XRC transport", 85 [ 4] = "reliable multicast", 86 [ 5] = "FCoIB support", 87 [ 6] = "SRQ support", 88 [ 7] = "IPoIB checksum offload", 89 [ 8] = "P_Key violation counter", 90 [ 9] = "Q_Key violation counter", 91 [10] = "VMM", 92 [12] = "DPDP", 93 [15] = "Big LSO headers", 94 [16] = "MW support", 95 [17] = "APM support", 96 [18] = "Atomic ops support", 97 [19] = "Raw multicast support", 98 [20] = "Address vector port checking support", 99 [21] = "UD multicast support", 100 [24] = "Demand paging support", 101 [25] = "Router support", 102 [30] = "IBoE support", 103 [32] = "Unicast loopback support", 104 [34] = "FCS header control", 105 [38] = "Wake On LAN support", 106 [40] = "UDP RSS support", 107 [41] = "Unicast VEP steering support", 108 [42] = "Multicast VEP steering support", 109 [48] = "Counters support", 110 [59] = "Port management change event support", 111 [60] = "eSwitch support", 112 [61] = "64 byte EQE support", 113 [62] = "64 byte CQE support", 114 }; 115 int i; 116 117 mlx4_dbg(dev, "DEV_CAP flags:\n"); 118 for (i = 0; i < ARRAY_SIZE(fname); ++i) 119 if (fname[i] && (flags & (1LL << i))) 120 mlx4_dbg(dev, " %s\n", fname[i]); 121} 122 123static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) 124{ 125 static const char * const fname[] = { 126 [0] = "RSS support", 127 [1] = "RSS Toeplitz Hash Function support", 128 [2] = "RSS XOR Hash Function support", 129 [3] = "Device manage flow steering support" 130 }; 131 int i; 132 133 for (i = 0; i < ARRAY_SIZE(fname); ++i) 134 if (fname[i] && (flags & (1LL << i))) 135 mlx4_dbg(dev, " %s\n", fname[i]); 136} 137 138int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) 139{ 140 struct mlx4_cmd_mailbox *mailbox; 141 u32 *inbox; 142 int err = 0; 143 144#define MOD_STAT_CFG_IN_SIZE 0x100 145 146#define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002 147#define MOD_STAT_CFG_PG_SZ_OFFSET 0x003 148 149 mailbox = mlx4_alloc_cmd_mailbox(dev); 150 if (IS_ERR(mailbox)) 151 return PTR_ERR(mailbox); 152 inbox = mailbox->buf; 153 154 memset(inbox, 0, MOD_STAT_CFG_IN_SIZE); 155 156 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); 157 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); 158 159 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG, 160 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 161 162 mlx4_free_cmd_mailbox(dev, mailbox); 163 return err; 164} 165 166int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, 167 struct mlx4_vhcr *vhcr, 168 struct mlx4_cmd_mailbox *inbox, 169 struct mlx4_cmd_mailbox *outbox, 170 struct mlx4_cmd_info *cmd) 171{ 172 struct mlx4_priv *priv = mlx4_priv(dev); 173 u8 field; 174 u32 size; 175 int err = 0; 176 177#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 178#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 179#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 180#define QUERY_FUNC_CAP_FMR_OFFSET 0x8 181#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10 182#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14 183#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18 184#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20 185#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24 186#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28 187#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c 188#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 189 190#define QUERY_FUNC_CAP_FMR_FLAG 0x80 191#define QUERY_FUNC_CAP_FLAG_RDMA 0x40 192#define QUERY_FUNC_CAP_FLAG_ETH 0x80 193 194/* when opcode modifier = 1 */ 195#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 196#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8 197#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc 198 199#define QUERY_FUNC_CAP_QP0_TUNNEL 0x10 200#define QUERY_FUNC_CAP_QP0_PROXY 0x14 201#define QUERY_FUNC_CAP_QP1_TUNNEL 0x18 202#define QUERY_FUNC_CAP_QP1_PROXY 0x1c 203 204#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40 205#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80 206 207#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80 208 209 if (vhcr->op_modifier == 1) { 210 field = 0; 211 /* ensure force vlan and force mac bits are not set */ 212 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); 213 /* ensure that phy_wqe_gid bit is not set */ 214 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET); 215 216 field = vhcr->in_modifier; /* phys-port = logical-port */ 217 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 218 219 /* size is now the QP number */ 220 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1; 221 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); 222 223 size += 2; 224 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL); 225 226 size = dev->phys_caps.base_proxy_sqpn + 8 * slave + field - 1; 227 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_PROXY); 228 229 size += 2; 230 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY); 231 232 } else if (vhcr->op_modifier == 0) { 233 /* enable rdma and ethernet interfaces */ 234 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA); 235 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 236 237 field = dev->caps.num_ports; 238 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 239 240 size = dev->caps.function_caps; /* set PF behaviours */ 241 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); 242 243 field = 0; /* protected FMR support not available as yet */ 244 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); 245 246 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave]; 247 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 248 249 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave]; 250 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 251 252 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave]; 253 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 254 255 size = dev->caps.num_eqs; 256 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 257 258 size = dev->caps.reserved_eqs; 259 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 260 261 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave]; 262 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 263 264 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave]; 265 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 266 267 size = dev->caps.num_mgms + dev->caps.num_amgms; 268 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 269 270 } else 271 err = -EINVAL; 272 273 return err; 274} 275 276int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, 277 struct mlx4_func_cap *func_cap) 278{ 279 struct mlx4_cmd_mailbox *mailbox; 280 u32 *outbox; 281 u8 field, op_modifier; 282 u32 size; 283 int err = 0; 284 285 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ 286 287 mailbox = mlx4_alloc_cmd_mailbox(dev); 288 if (IS_ERR(mailbox)) 289 return PTR_ERR(mailbox); 290 291 err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier, 292 MLX4_CMD_QUERY_FUNC_CAP, 293 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 294 if (err) 295 goto out; 296 297 outbox = mailbox->buf; 298 299 if (!op_modifier) { 300 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); 301 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) { 302 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n"); 303 err = -EPROTONOSUPPORT; 304 goto out; 305 } 306 func_cap->flags = field; 307 308 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 309 func_cap->num_ports = field; 310 311 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); 312 func_cap->pf_context_behaviour = size; 313 314 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 315 func_cap->qp_quota = size & 0xFFFFFF; 316 317 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 318 func_cap->srq_quota = size & 0xFFFFFF; 319 320 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 321 func_cap->cq_quota = size & 0xFFFFFF; 322 323 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 324 func_cap->max_eq = size & 0xFFFFFF; 325 326 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 327 func_cap->reserved_eq = size & 0xFFFFFF; 328 329 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 330 func_cap->mpt_quota = size & 0xFFFFFF; 331 332 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 333 func_cap->mtt_quota = size & 0xFFFFFF; 334 335 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 336 func_cap->mcg_quota = size & 0xFFFFFF; 337 goto out; 338 } 339 340 /* logical port query */ 341 if (gen_or_port > dev->caps.num_ports) { 342 err = -EINVAL; 343 goto out; 344 } 345 346 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) { 347 MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); 348 if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) { 349 mlx4_err(dev, "VLAN is enforced on this port\n"); 350 err = -EPROTONOSUPPORT; 351 goto out; 352 } 353 354 if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) { 355 mlx4_err(dev, "Force mac is enabled on this port\n"); 356 err = -EPROTONOSUPPORT; 357 goto out; 358 } 359 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) { 360 MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET); 361 if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) { 362 mlx4_err(dev, "phy_wqe_gid is " 363 "enforced on this ib port\n"); 364 err = -EPROTONOSUPPORT; 365 goto out; 366 } 367 } 368 369 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 370 func_cap->physical_port = field; 371 if (func_cap->physical_port != gen_or_port) { 372 err = -ENOSYS; 373 goto out; 374 } 375 376 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL); 377 func_cap->qp0_tunnel_qpn = size & 0xFFFFFF; 378 379 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY); 380 func_cap->qp0_proxy_qpn = size & 0xFFFFFF; 381 382 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL); 383 func_cap->qp1_tunnel_qpn = size & 0xFFFFFF; 384 385 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY); 386 func_cap->qp1_proxy_qpn = size & 0xFFFFFF; 387 388 /* All other resources are allocated by the master, but we still report 389 * 'num' and 'reserved' capabilities as follows: 390 * - num remains the maximum resource index 391 * - 'num - reserved' is the total available objects of a resource, but 392 * resource indices may be less than 'reserved' 393 * TODO: set per-resource quotas */ 394 395out: 396 mlx4_free_cmd_mailbox(dev, mailbox); 397 398 return err; 399} 400 401int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 402{ 403 struct mlx4_cmd_mailbox *mailbox; 404 u32 *outbox; 405 u8 field; 406 u32 field32, flags, ext_flags; 407 u16 size; 408 u16 stat_rate; 409 int err; 410 int i; 411 412#define QUERY_DEV_CAP_OUT_SIZE 0x100 413#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 414#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11 415#define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12 416#define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13 417#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14 418#define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15 419#define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16 420#define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17 421#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19 422#define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a 423#define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b 424#define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d 425#define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e 426#define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f 427#define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20 428#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 429#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 430#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 431#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 432#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 433#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b 434#define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d 435#define QUERY_DEV_CAP_RSS_OFFSET 0x2e 436#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f 437#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 438#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 439#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 440#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 441#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 442#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b 443#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c 444#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e 445#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 446#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 447#define QUERY_DEV_CAP_SYNC_QP_OFFSET 0x42 448#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 449#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 450#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 451#define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b 452#define QUERY_DEV_CAP_BF_OFFSET 0x4c 453#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d 454#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e 455#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f 456#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51 457#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52 458#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55 459#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56 460#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61 461#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62 462#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 463#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 464#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 465#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 466#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 467#define QUERY_DEV_CAP_MAX_BASIC_COUNTERS_OFFSET 0x68 468#define QUERY_DEV_CAP_MAX_EXTENDED_COUNTERS_OFFSET 0x6c 469#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 470#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 471#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 472#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 473#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 474#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86 475#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88 476#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a 477#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c 478#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e 479#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 480#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 481#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 482#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 483#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 484 485 dev_cap->flags2 = 0; 486 mailbox = mlx4_alloc_cmd_mailbox(dev); 487 if (IS_ERR(mailbox)) 488 return PTR_ERR(mailbox); 489 outbox = mailbox->buf; 490 491 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 492 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 493 if (err) 494 goto out; 495 496 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); 497 dev_cap->reserved_qps = 1 << (field & 0xf); 498 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); 499 dev_cap->max_qps = 1 << (field & 0x1f); 500 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET); 501 dev_cap->reserved_srqs = 1 << (field >> 4); 502 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET); 503 dev_cap->max_srqs = 1 << (field & 0x1f); 504 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET); 505 dev_cap->max_cq_sz = 1 << field; 506 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET); 507 dev_cap->reserved_cqs = 1 << (field & 0xf); 508 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET); 509 dev_cap->max_cqs = 1 << (field & 0x1f); 510 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); 511 dev_cap->max_mpts = 1 << (field & 0x3f); 512 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); 513 dev_cap->reserved_eqs = field & 0xf; 514 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); 515 dev_cap->max_eqs = 1 << (field & 0xf); 516 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); 517 dev_cap->reserved_mtts = 1 << (field >> 4); 518 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET); 519 dev_cap->max_mrw_sz = 1 << field; 520 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET); 521 dev_cap->reserved_mrws = 1 << (field & 0xf); 522 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET); 523 dev_cap->max_mtt_seg = 1 << (field & 0x3f); 524 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); 525 dev_cap->max_requester_per_qp = 1 << (field & 0x3f); 526 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); 527 dev_cap->max_responder_per_qp = 1 << (field & 0x3f); 528 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET); 529 field &= 0x1f; 530 if (!field) 531 dev_cap->max_gso_sz = 0; 532 else 533 dev_cap->max_gso_sz = 1 << field; 534 535 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET); 536 if (field & 0x20) 537 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR; 538 if (field & 0x10) 539 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP; 540 field &= 0xf; 541 if (field) { 542 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS; 543 dev_cap->max_rss_tbl_sz = 1 << field; 544 } else 545 dev_cap->max_rss_tbl_sz = 0; 546 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); 547 dev_cap->max_rdma_global = 1 << (field & 0x3f); 548 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); 549 dev_cap->local_ca_ack_delay = field & 0x1f; 550 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 551 dev_cap->num_ports = field & 0xf; 552 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); 553 dev_cap->max_msg_sz = 1 << (field & 0x1f); 554 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); 555 if (field & 0x80) 556 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; 557 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; 558 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); 559 dev_cap->fs_max_num_qp_per_entry = field; 560 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 561 dev_cap->stat_rate_support = stat_rate; 562 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 563 dev_cap->timestamp_support = field & 0x80; 564 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 565 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 566 dev_cap->flags = flags | (u64)ext_flags << 32; 567 MLX4_GET(field, outbox, QUERY_DEV_CAP_SYNC_QP_OFFSET); 568 dev_cap->sync_qp = field & 0x10; 569 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 570 dev_cap->reserved_uars = field >> 4; 571 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); 572 dev_cap->uar_size = 1 << ((field & 0x3f) + 20); 573 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET); 574 dev_cap->min_page_sz = 1 << field; 575 576 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET); 577 if (field & 0x80) { 578 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); 579 dev_cap->bf_reg_size = 1 << (field & 0x1f); 580 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); 581 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) 582 field = 3; 583 dev_cap->bf_regs_per_page = 1 << (field & 0x3f); 584 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", 585 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); 586 } else { 587 dev_cap->bf_reg_size = 0; 588 mlx4_dbg(dev, "BlueFlame not available\n"); 589 } 590 591 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); 592 dev_cap->max_sq_sg = field; 593 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET); 594 dev_cap->max_sq_desc_sz = size; 595 596 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET); 597 dev_cap->max_qp_per_mcg = 1 << field; 598 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET); 599 dev_cap->reserved_mgms = field & 0xf; 600 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET); 601 dev_cap->max_mcgs = 1 << field; 602 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET); 603 dev_cap->reserved_pds = field >> 4; 604 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); 605 dev_cap->max_pds = 1 << (field & 0x3f); 606 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET); 607 dev_cap->reserved_xrcds = field >> 4; 608 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET); 609 dev_cap->max_xrcds = 1 << (field & 0x1f); 610 611 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); 612 dev_cap->rdmarc_entry_sz = size; 613 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET); 614 dev_cap->qpc_entry_sz = size; 615 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET); 616 dev_cap->aux_entry_sz = size; 617 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET); 618 dev_cap->altc_entry_sz = size; 619 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET); 620 dev_cap->eqc_entry_sz = size; 621 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET); 622 dev_cap->cqc_entry_sz = size; 623 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET); 624 dev_cap->srq_entry_sz = size; 625 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET); 626 dev_cap->cmpt_entry_sz = size; 627 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET); 628 dev_cap->mtt_entry_sz = size; 629 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET); 630 dev_cap->dmpt_entry_sz = size; 631 632 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET); 633 dev_cap->max_srq_sz = 1 << field; 634 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET); 635 dev_cap->max_qp_sz = 1 << field; 636 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET); 637 dev_cap->resize_srq = field & 1; 638 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET); 639 dev_cap->max_rq_sg = field; 640 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); 641 dev_cap->max_rq_desc_sz = size; 642 643 MLX4_GET(dev_cap->bmme_flags, outbox, 644 QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 645 MLX4_GET(dev_cap->reserved_lkey, outbox, 646 QUERY_DEV_CAP_RSVD_LKEY_OFFSET); 647 MLX4_GET(dev_cap->max_icm_sz, outbox, 648 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); 649 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) 650 MLX4_GET(dev_cap->max_basic_counters, outbox, 651 QUERY_DEV_CAP_MAX_BASIC_COUNTERS_OFFSET); 652 /* FW reports 256 however real value is 255 */ 653 dev_cap->max_basic_counters = min_t(u32, dev_cap->max_basic_counters, 255); 654 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS_EXT) 655 MLX4_GET(dev_cap->max_extended_counters, outbox, 656 QUERY_DEV_CAP_MAX_EXTENDED_COUNTERS_OFFSET); 657 658 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 659 for (i = 1; i <= dev_cap->num_ports; ++i) { 660 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 661 dev_cap->max_vl[i] = field >> 4; 662 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); 663 dev_cap->ib_mtu[i] = field >> 4; 664 dev_cap->max_port_width[i] = field & 0xf; 665 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); 666 dev_cap->max_gids[i] = 1 << (field & 0xf); 667 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); 668 dev_cap->max_pkeys[i] = 1 << (field & 0xf); 669 } 670 } else { 671#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 672#define QUERY_PORT_MTU_OFFSET 0x01 673#define QUERY_PORT_ETH_MTU_OFFSET 0x02 674#define QUERY_PORT_WIDTH_OFFSET 0x06 675#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 676#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a 677#define QUERY_PORT_MAX_VL_OFFSET 0x0b 678#define QUERY_PORT_MAC_OFFSET 0x10 679#define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18 680#define QUERY_PORT_WAVELENGTH_OFFSET 0x1c 681#define QUERY_PORT_TRANS_CODE_OFFSET 0x20 682 683 for (i = 1; i <= dev_cap->num_ports; ++i) { 684 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, 685 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 686 if (err) 687 goto out; 688 689 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); 690 dev_cap->supported_port_types[i] = field & 3; 691 dev_cap->suggested_type[i] = (field >> 3) & 1; 692 dev_cap->default_sense[i] = (field >> 4) & 1; 693 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); 694 dev_cap->ib_mtu[i] = field & 0xf; 695 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); 696 dev_cap->max_port_width[i] = field & 0xf; 697 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); 698 dev_cap->max_gids[i] = 1 << (field >> 4); 699 dev_cap->max_pkeys[i] = 1 << (field & 0xf); 700 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); 701 dev_cap->max_vl[i] = field & 0xf; 702 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); 703 dev_cap->log_max_macs[i] = field & 0xf; 704 dev_cap->log_max_vlans[i] = field >> 4; 705 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET); 706 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET); 707 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET); 708 dev_cap->trans_type[i] = field32 >> 24; 709 dev_cap->vendor_oui[i] = field32 & 0xffffff; 710 MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET); 711 MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET); 712 } 713 } 714 715 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n", 716 dev_cap->bmme_flags, dev_cap->reserved_lkey); 717 718 /* 719 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then 720 * we can't use any EQs whose doorbell falls on that page, 721 * even if the EQ itself isn't reserved. 722 */ 723 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, 724 dev_cap->reserved_eqs); 725 726 mlx4_dbg(dev, "Max ICM size %lld MB\n", 727 (unsigned long long) dev_cap->max_icm_sz >> 20); 728 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", 729 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz); 730 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", 731 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); 732 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", 733 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); 734 mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", 735 dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz); 736 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", 737 dev_cap->reserved_mrws, dev_cap->reserved_mtts); 738 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", 739 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars); 740 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", 741 dev_cap->max_pds, dev_cap->reserved_mgms); 742 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", 743 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); 744 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", 745 dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1], 746 dev_cap->max_port_width[1]); 747 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", 748 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); 749 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", 750 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); 751 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); 752 mlx4_dbg(dev, "Max basic counters: %d\n", dev_cap->max_basic_counters); 753 mlx4_dbg(dev, "Max extended counters: %d\n", dev_cap->max_extended_counters); 754 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz); 755 756 dump_dev_cap_flags(dev, dev_cap->flags); 757 dump_dev_cap_flags2(dev, dev_cap->flags2); 758 759out: 760 mlx4_free_cmd_mailbox(dev, mailbox); 761 return err; 762} 763 764int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, 765 struct mlx4_vhcr *vhcr, 766 struct mlx4_cmd_mailbox *inbox, 767 struct mlx4_cmd_mailbox *outbox, 768 struct mlx4_cmd_info *cmd) 769{ 770 u64 flags; 771 int err = 0; 772 u8 field; 773 774 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 775 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 776 if (err) 777 return err; 778 779 /* add port mng change event capability unconditionally to slaves */ 780 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 781 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; 782 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 783 784 /* For guests, report Blueflame disabled */ 785 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); 786 field &= 0x7f; 787 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); 788 789 return 0; 790} 791 792int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, 793 struct mlx4_vhcr *vhcr, 794 struct mlx4_cmd_mailbox *inbox, 795 struct mlx4_cmd_mailbox *outbox, 796 struct mlx4_cmd_info *cmd) 797{ 798 struct mlx4_priv *priv = mlx4_priv(dev); 799 u64 def_mac; 800 u8 port_type; 801 u16 short_field; 802 int err; 803 804#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 805#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c 806#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e 807 808 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, 809 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 810 MLX4_CMD_NATIVE); 811 812 if (!err && dev->caps.function != slave) { 813 /* set slave default_mac address */ 814 MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET); 815 def_mac += slave << 8; 816 /* if config MAC in DB use it */ 817 if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac) 818 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac; 819 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); 820 821 /* get port type - currently only eth is enabled */ 822 MLX4_GET(port_type, outbox->buf, 823 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 824 825 /* No link sensing allowed */ 826 port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK; 827 /* set port type to currently operating port type */ 828 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); 829 830 MLX4_PUT(outbox->buf, port_type, 831 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 832 833 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH) 834 short_field = mlx4_get_slave_num_gids(dev, slave); 835 else 836 short_field = 1; /* slave max gids */ 837 MLX4_PUT(outbox->buf, short_field, 838 QUERY_PORT_CUR_MAX_GID_OFFSET); 839 840 short_field = dev->caps.pkey_table_len[vhcr->in_modifier]; 841 MLX4_PUT(outbox->buf, short_field, 842 QUERY_PORT_CUR_MAX_PKEY_OFFSET); 843 } 844 845 return err; 846} 847 848int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, 849 int *gid_tbl_len, int *pkey_tbl_len) 850{ 851 struct mlx4_cmd_mailbox *mailbox; 852 u32 *outbox; 853 u16 field; 854 int err; 855 856 mailbox = mlx4_alloc_cmd_mailbox(dev); 857 if (IS_ERR(mailbox)) 858 return PTR_ERR(mailbox); 859 860 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, 861 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 862 MLX4_CMD_WRAPPED); 863 if (err) 864 goto out; 865 866 outbox = mailbox->buf; 867 868 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET); 869 *gid_tbl_len = field; 870 871 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET); 872 *pkey_tbl_len = field; 873 874out: 875 mlx4_free_cmd_mailbox(dev, mailbox); 876 return err; 877} 878EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len); 879 880int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) 881{ 882 struct mlx4_cmd_mailbox *mailbox; 883 struct mlx4_icm_iter iter; 884 __be64 *pages; 885 int lg; 886 int nent = 0; 887 int i; 888 int err = 0; 889 int ts = 0, tc = 0; 890 891 mailbox = mlx4_alloc_cmd_mailbox(dev); 892 if (IS_ERR(mailbox)) 893 return PTR_ERR(mailbox); 894 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); 895 pages = mailbox->buf; 896 897 for (mlx4_icm_first(icm, &iter); 898 !mlx4_icm_last(&iter); 899 mlx4_icm_next(&iter)) { 900 /* 901 * We have to pass pages that are aligned to their 902 * size, so find the least significant 1 in the 903 * address or size and use that as our log2 size. 904 */ 905 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; 906 if (lg < MLX4_ICM_PAGE_SHIFT) { 907 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n", 908 MLX4_ICM_PAGE_SIZE, 909 (unsigned long long) mlx4_icm_addr(&iter), 910 mlx4_icm_size(&iter)); 911 err = -EINVAL; 912 goto out; 913 } 914 915 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) { 916 if (virt != -1) { 917 pages[nent * 2] = cpu_to_be64(virt); 918 virt += 1 << lg; 919 } 920 921 pages[nent * 2 + 1] = 922 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) | 923 (lg - MLX4_ICM_PAGE_SHIFT)); 924 ts += 1 << (lg - 10); 925 ++tc; 926 927 if (++nent == MLX4_MAILBOX_SIZE / 16) { 928 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, 929 MLX4_CMD_TIME_CLASS_B, 930 MLX4_CMD_NATIVE); 931 if (err) 932 goto out; 933 nent = 0; 934 } 935 } 936 } 937 938 if (nent) 939 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, 940 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 941 if (err) 942 goto out; 943 944 switch (op) { 945 case MLX4_CMD_MAP_FA: 946 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); 947 break; 948 case MLX4_CMD_MAP_ICM_AUX: 949 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); 950 break; 951 case MLX4_CMD_MAP_ICM: 952 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n", 953 tc, ts, (unsigned long long) virt - (ts << 10)); 954 break; 955 } 956 957out: 958 mlx4_free_cmd_mailbox(dev, mailbox); 959 return err; 960} 961 962int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) 963{ 964 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1); 965} 966 967int mlx4_UNMAP_FA(struct mlx4_dev *dev) 968{ 969 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, 970 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 971} 972 973 974int mlx4_RUN_FW(struct mlx4_dev *dev) 975{ 976 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, 977 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 978} 979 980int mlx4_QUERY_FW(struct mlx4_dev *dev) 981{ 982 struct mlx4_fw *fw = &mlx4_priv(dev)->fw; 983 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 984 struct mlx4_cmd_mailbox *mailbox; 985 u32 *outbox; 986 int err = 0; 987 u64 fw_ver; 988 u16 cmd_if_rev; 989 u8 lg; 990 991#define QUERY_FW_OUT_SIZE 0x100 992#define QUERY_FW_VER_OFFSET 0x00 993#define QUERY_FW_PPF_ID 0x09 994#define QUERY_FW_CMD_IF_REV_OFFSET 0x0a 995#define QUERY_FW_MAX_CMD_OFFSET 0x0f 996#define QUERY_FW_ERR_START_OFFSET 0x30 997#define QUERY_FW_ERR_SIZE_OFFSET 0x38 998#define QUERY_FW_ERR_BAR_OFFSET 0x3c 999 1000#define QUERY_FW_SIZE_OFFSET 0x00 1001#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 1002#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 1003 1004#define QUERY_FW_COMM_BASE_OFFSET 0x40 1005#define QUERY_FW_COMM_BAR_OFFSET 0x48 1006 1007#define QUERY_FW_CLOCK_OFFSET 0x50 1008#define QUERY_FW_CLOCK_BAR 0x58 1009 1010 mailbox = mlx4_alloc_cmd_mailbox(dev); 1011 if (IS_ERR(mailbox)) 1012 return PTR_ERR(mailbox); 1013 outbox = mailbox->buf; 1014 1015 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, 1016 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1017 if (err) 1018 goto out; 1019 1020 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET); 1021 /* 1022 * FW subminor version is at more significant bits than minor 1023 * version, so swap here. 1024 */ 1025 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) | 1026 ((fw_ver & 0xffff0000ull) >> 16) | 1027 ((fw_ver & 0x0000ffffull) << 16); 1028 1029 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); 1030 dev->caps.function = lg; 1031 1032 if (mlx4_is_slave(dev)) 1033 goto out; 1034 1035 1036 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); 1037 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || 1038 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { 1039 mlx4_err(dev, "Installed FW has unsupported " 1040 "command interface revision %d.\n", 1041 cmd_if_rev); 1042 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", 1043 (int) (dev->caps.fw_ver >> 32), 1044 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1045 (int) dev->caps.fw_ver & 0xffff); 1046 mlx4_err(dev, "This driver version supports only revisions %d to %d.\n", 1047 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); 1048 err = -ENODEV; 1049 goto out; 1050 } 1051 1052 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS) 1053 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS; 1054 1055 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); 1056 cmd->max_cmds = 1 << lg; 1057 1058 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n", 1059 (int) (dev->caps.fw_ver >> 32), 1060 (int) (dev->caps.fw_ver >> 16) & 0xffff, 1061 (int) dev->caps.fw_ver & 0xffff, 1062 cmd_if_rev, cmd->max_cmds); 1063 1064 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET); 1065 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET); 1066 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET); 1067 fw->catas_bar = (fw->catas_bar >> 6) * 2; 1068 1069 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n", 1070 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar); 1071 1072 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET); 1073 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); 1074 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); 1075 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; 1076 1077 MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET); 1078 MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET); 1079 fw->comm_bar = (fw->comm_bar >> 6) * 2; 1080 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n", 1081 fw->comm_bar, fw->comm_base); 1082 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); 1083 1084 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET); 1085 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR); 1086 fw->clock_bar = (fw->clock_bar >> 6) * 2; 1087 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n", 1088 fw->comm_bar, fw->comm_base); 1089 1090 /* 1091 * Round up number of system pages needed in case 1092 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. 1093 */ 1094 fw->fw_pages = 1095 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> 1096 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); 1097 1098 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n", 1099 (unsigned long long) fw->clr_int_base, fw->clr_int_bar); 1100 1101out: 1102 mlx4_free_cmd_mailbox(dev, mailbox); 1103 return err; 1104} 1105 1106int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, 1107 struct mlx4_vhcr *vhcr, 1108 struct mlx4_cmd_mailbox *inbox, 1109 struct mlx4_cmd_mailbox *outbox, 1110 struct mlx4_cmd_info *cmd) 1111{ 1112 u8 *outbuf; 1113 int err; 1114 1115 outbuf = outbox->buf; 1116 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW, 1117 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1118 if (err) 1119 return err; 1120 1121 /* for slaves, set pci PPF ID to invalid and zero out everything 1122 * else except FW version */ 1123 outbuf[0] = outbuf[1] = 0; 1124 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); 1125 outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID; 1126 1127 return 0; 1128} 1129 1130static void get_board_id(void *vsd, char *board_id) 1131{ 1132 int i; 1133 1134#define VSD_OFFSET_SIG1 0x00 1135#define VSD_OFFSET_SIG2 0xde 1136#define VSD_OFFSET_MLX_BOARD_ID 0xd0 1137#define VSD_OFFSET_TS_BOARD_ID 0x20 1138 1139#define VSD_SIGNATURE_TOPSPIN 0x5ad 1140 1141 memset(board_id, 0, MLX4_BOARD_ID_LEN); 1142 1143 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && 1144 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { 1145 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN); 1146 } else { 1147 /* 1148 * The board ID is a string but the firmware byte 1149 * swaps each 4-byte word before passing it back to 1150 * us. Therefore we need to swab it before printing. 1151 */ 1152 for (i = 0; i < 4; ++i) 1153 ((u32 *) board_id)[i] = 1154 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); 1155 } 1156} 1157 1158int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) 1159{ 1160 struct mlx4_cmd_mailbox *mailbox; 1161 u32 *outbox; 1162 int err; 1163 1164#define QUERY_ADAPTER_OUT_SIZE 0x100 1165#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 1166#define QUERY_ADAPTER_VSD_OFFSET 0x20 1167 1168 mailbox = mlx4_alloc_cmd_mailbox(dev); 1169 if (IS_ERR(mailbox)) 1170 return PTR_ERR(mailbox); 1171 outbox = mailbox->buf; 1172 1173 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, 1174 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1175 if (err) 1176 goto out; 1177 1178 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); 1179 1180 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, 1181 adapter->board_id); 1182 1183out: 1184 mlx4_free_cmd_mailbox(dev, mailbox); 1185 return err; 1186} 1187 1188int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) 1189{ 1190 struct mlx4_cmd_mailbox *mailbox; 1191 __be32 *inbox; 1192 int err; 1193 1194#define INIT_HCA_IN_SIZE 0x200 1195#define INIT_HCA_VERSION_OFFSET 0x000 1196#define INIT_HCA_VERSION 2 1197#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e 1198#define INIT_HCA_FLAGS_OFFSET 0x014 1199#define INIT_HCA_QPC_OFFSET 0x020 1200#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) 1201#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) 1202#define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) 1203#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) 1204#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) 1205#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) 1206#define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38) 1207#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) 1208#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) 1209#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) 1210#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) 1211#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) 1212#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) 1213#define INIT_HCA_MCAST_OFFSET 0x0c0 1214#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) 1215#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) 1216#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 1217#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) 1218#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 1219#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6 1220#define INIT_HCA_FS_PARAM_OFFSET 0x1d0 1221#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) 1222#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) 1223#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b) 1224#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21) 1225#define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22) 1226#define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25) 1227#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26) 1228#define INIT_HCA_TPT_OFFSET 0x0f0 1229#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) 1230#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) 1231#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) 1232#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) 1233#define INIT_HCA_UAR_OFFSET 0x120 1234#define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) 1235#define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) 1236 1237 mailbox = mlx4_alloc_cmd_mailbox(dev); 1238 if (IS_ERR(mailbox)) 1239 return PTR_ERR(mailbox); 1240 inbox = mailbox->buf; 1241 1242 memset(inbox, 0, INIT_HCA_IN_SIZE); 1243 1244 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; 1245 1246 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = 1247 ((ilog2(CACHE_LINE_SIZE) - 4) << 5) | (1 << 4); 1248 1249#if defined(__LITTLE_ENDIAN) 1250 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); 1251#elif defined(__BIG_ENDIAN) 1252 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); 1253#else 1254#error Host endianness not defined 1255#endif 1256 /* Check port for UD address vector: */ 1257 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); 1258 1259 /* Enable IPoIB checksumming if we can: */ 1260 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) 1261 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); 1262 1263 /* Enable QoS support if module parameter set */ 1264 if (enable_qos) 1265 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2); 1266 1267 /* Enable fast drop performance optimization */ 1268 if (dev->caps.fast_drop) 1269 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 7); 1270 1271 /* enable counters */ 1272 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) 1273 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4); 1274 1275 /* CX3 is capable of extending CQEs\EQEs from 32 to 64 bytes */ 1276 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) { 1277 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29); 1278 dev->caps.eqe_size = 64; 1279 dev->caps.eqe_factor = 1; 1280 } else { 1281 dev->caps.eqe_size = 32; 1282 dev->caps.eqe_factor = 0; 1283 } 1284 1285 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) { 1286 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30); 1287 dev->caps.cqe_size = 64; 1288 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE; 1289 } else { 1290 dev->caps.cqe_size = 32; 1291 } 1292 1293 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 1294 1295 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 1296 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); 1297 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); 1298 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); 1299 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); 1300 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); 1301 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET); 1302 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); 1303 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); 1304 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); 1305 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); 1306 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); 1307 1308 /* steering attributes */ 1309 if (dev->caps.steering_mode == 1310 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1311 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= 1312 cpu_to_be32(1 << 1313 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN); 1314 1315 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET); 1316 MLX4_PUT(inbox, param->log_mc_entry_sz, 1317 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 1318 MLX4_PUT(inbox, param->log_mc_table_sz, 1319 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 1320 /* Enable Ethernet flow steering 1321 * with udp unicast and tcp unicast 1322 */ 1323 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), 1324 INIT_HCA_FS_ETH_BITS_OFFSET); 1325 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 1326 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET); 1327 /* Enable IPoIB flow steering 1328 * with udp unicast and tcp unicast 1329 */ 1330 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), 1331 INIT_HCA_FS_IB_BITS_OFFSET); 1332 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, 1333 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET); 1334 } else { 1335 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 1336 MLX4_PUT(inbox, param->log_mc_entry_sz, 1337 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 1338 MLX4_PUT(inbox, param->log_mc_hash_sz, 1339 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 1340 MLX4_PUT(inbox, param->log_mc_table_sz, 1341 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 1342 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0) { 1343 MLX4_PUT(inbox, (u8) (1 << 3), 1344 INIT_HCA_UC_STEERING_OFFSET); 1345 } 1346 } 1347 1348 /* TPT attributes */ 1349 1350 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); 1351 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); 1352 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); 1353 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); 1354 1355 /* UAR attributes */ 1356 1357 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); 1358 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); 1359 1360 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000, 1361 MLX4_CMD_NATIVE); 1362 1363 if (err) 1364 mlx4_err(dev, "INIT_HCA returns %d\n", err); 1365 1366 mlx4_free_cmd_mailbox(dev, mailbox); 1367 return err; 1368} 1369 1370int mlx4_QUERY_HCA(struct mlx4_dev *dev, 1371 struct mlx4_init_hca_param *param) 1372{ 1373 struct mlx4_cmd_mailbox *mailbox; 1374 __be32 *outbox; 1375 u32 dword_field; 1376 int err; 1377 u8 byte_field; 1378 1379#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 1380#define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c 1381 1382 mailbox = mlx4_alloc_cmd_mailbox(dev); 1383 if (IS_ERR(mailbox)) 1384 return PTR_ERR(mailbox); 1385 outbox = mailbox->buf; 1386 1387 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 1388 MLX4_CMD_QUERY_HCA, 1389 MLX4_CMD_TIME_CLASS_B, 1390 !mlx4_is_slave(dev)); 1391 if (err) 1392 goto out; 1393 1394 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET); 1395 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); 1396 1397 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 1398 1399 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); 1400 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); 1401 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); 1402 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); 1403 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); 1404 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); 1405 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); 1406 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); 1407 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); 1408 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); 1409 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 1410 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 1411 1412 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); 1413 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { 1414 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 1415 } else { 1416 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET); 1417 if (byte_field & 0x8) { 1418 param->steering_mode = MLX4_STEERING_MODE_B0; 1419 } 1420 else { 1421 param->steering_mode = MLX4_STEERING_MODE_A0; 1422 } 1423 } 1424 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 1425 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); 1426 MLX4_GET(param->log_mc_entry_sz, outbox, 1427 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 1428 MLX4_GET(param->log_mc_table_sz, outbox, 1429 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 1430 } else { 1431 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); 1432 MLX4_GET(param->log_mc_entry_sz, outbox, 1433 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 1434 MLX4_GET(param->log_mc_hash_sz, outbox, 1435 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 1436 MLX4_GET(param->log_mc_table_sz, outbox, 1437 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 1438 } 1439 1440 /* CX3 is capable of extending CQEs\EQEs from 32 to 64 bytes */ 1441 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS); 1442 if (byte_field & 0x20) /* 64-bytes eqe enabled */ 1443 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED; 1444 if (byte_field & 0x40) /* 64-bytes cqe enabled */ 1445 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; 1446 1447 /* TPT attributes */ 1448 1449 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); 1450 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); 1451 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); 1452 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); 1453 1454 /* UAR attributes */ 1455 1456 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); 1457 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); 1458 1459out: 1460 mlx4_free_cmd_mailbox(dev, mailbox); 1461 1462 return err; 1463} 1464 1465/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0 1466 * and real QP0 are active, so that the paravirtualized QP0 is ready 1467 * to operate */ 1468static int check_qp0_state(struct mlx4_dev *dev, int function, int port) 1469{ 1470 struct mlx4_priv *priv = mlx4_priv(dev); 1471 /* irrelevant if not infiniband */ 1472 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active && 1473 priv->mfunc.master.qp0_state[port].qp0_active) 1474 return 1; 1475 return 0; 1476} 1477 1478int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, 1479 struct mlx4_vhcr *vhcr, 1480 struct mlx4_cmd_mailbox *inbox, 1481 struct mlx4_cmd_mailbox *outbox, 1482 struct mlx4_cmd_info *cmd) 1483{ 1484 struct mlx4_priv *priv = mlx4_priv(dev); 1485 int port = vhcr->in_modifier; 1486 int err; 1487 1488 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) 1489 return 0; 1490 1491 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 1492 /* Enable port only if it was previously disabled */ 1493 if (!priv->mfunc.master.init_port_ref[port]) { 1494 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 1495 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1496 if (err) 1497 return err; 1498 } 1499 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 1500 } else { 1501 if (slave == mlx4_master_func_num(dev)) { 1502 if (check_qp0_state(dev, slave, port) && 1503 !priv->mfunc.master.qp0_state[port].port_active) { 1504 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 1505 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1506 if (err) 1507 return err; 1508 priv->mfunc.master.qp0_state[port].port_active = 1; 1509 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 1510 } 1511 } else 1512 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); 1513 } 1514 ++priv->mfunc.master.init_port_ref[port]; 1515 return 0; 1516} 1517 1518int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) 1519{ 1520 struct mlx4_cmd_mailbox *mailbox; 1521 u32 *inbox; 1522 int err; 1523 u32 flags; 1524 u16 field; 1525 1526 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 1527#define INIT_PORT_IN_SIZE 256 1528#define INIT_PORT_FLAGS_OFFSET 0x00 1529#define INIT_PORT_FLAG_SIG (1 << 18) 1530#define INIT_PORT_FLAG_NG (1 << 17) 1531#define INIT_PORT_FLAG_G0 (1 << 16) 1532#define INIT_PORT_VL_SHIFT 4 1533#define INIT_PORT_PORT_WIDTH_SHIFT 8 1534#define INIT_PORT_MTU_OFFSET 0x04 1535#define INIT_PORT_MAX_GID_OFFSET 0x06 1536#define INIT_PORT_MAX_PKEY_OFFSET 0x0a 1537#define INIT_PORT_GUID0_OFFSET 0x10 1538#define INIT_PORT_NODE_GUID_OFFSET 0x18 1539#define INIT_PORT_SI_GUID_OFFSET 0x20 1540 1541 mailbox = mlx4_alloc_cmd_mailbox(dev); 1542 if (IS_ERR(mailbox)) 1543 return PTR_ERR(mailbox); 1544 inbox = mailbox->buf; 1545 1546 memset(inbox, 0, INIT_PORT_IN_SIZE); 1547 1548 flags = 0; 1549 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; 1550 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; 1551 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); 1552 1553 field = 128 << dev->caps.ib_mtu_cap[port]; 1554 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); 1555 field = dev->caps.gid_table_len[port]; 1556 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); 1557 field = dev->caps.pkey_table_len[port]; 1558 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); 1559 1560 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, 1561 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1562 1563 mlx4_free_cmd_mailbox(dev, mailbox); 1564 } else 1565 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 1566 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 1567 1568 return err; 1569} 1570EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); 1571 1572int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, 1573 struct mlx4_vhcr *vhcr, 1574 struct mlx4_cmd_mailbox *inbox, 1575 struct mlx4_cmd_mailbox *outbox, 1576 struct mlx4_cmd_info *cmd) 1577{ 1578 struct mlx4_priv *priv = mlx4_priv(dev); 1579 int port = vhcr->in_modifier; 1580 int err; 1581 1582 if (!(priv->mfunc.master.slave_state[slave].init_port_mask & 1583 (1 << port))) 1584 return 0; 1585 1586 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 1587 if (priv->mfunc.master.init_port_ref[port] == 1) { 1588 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1589 1000, MLX4_CMD_NATIVE); 1590 if (err) 1591 return err; 1592 } 1593 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 1594 } else { 1595 /* infiniband port */ 1596 if (slave == mlx4_master_func_num(dev)) { 1597 if (!priv->mfunc.master.qp0_state[port].qp0_active && 1598 priv->mfunc.master.qp0_state[port].port_active) { 1599 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1600 1000, MLX4_CMD_NATIVE); 1601 if (err) 1602 return err; 1603 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 1604 priv->mfunc.master.qp0_state[port].port_active = 0; 1605 } 1606 } else 1607 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 1608 } 1609 --priv->mfunc.master.init_port_ref[port]; 1610 return 0; 1611} 1612 1613int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) 1614{ 1615 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, 1616 MLX4_CMD_WRAPPED); 1617} 1618EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); 1619 1620int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) 1621{ 1622 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000, 1623 MLX4_CMD_NATIVE); 1624} 1625 1626int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) 1627{ 1628 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, 1629 MLX4_CMD_SET_ICM_SIZE, 1630 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1631 if (ret) 1632 return ret; 1633 1634 /* 1635 * Round up number of system pages needed in case 1636 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. 1637 */ 1638 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> 1639 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); 1640 1641 return 0; 1642} 1643 1644int mlx4_NOP(struct mlx4_dev *dev) 1645{ 1646 /* Input modifier of 0x1f means "finish as soon as possible." */ 1647 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1648} 1649 1650int mlx4_query_diag_counters(struct mlx4_dev *dev, int array_length, 1651 u8 op_modifier, u32 in_offset[], 1652 u32 counter_out[]) 1653{ 1654 struct mlx4_cmd_mailbox *mailbox; 1655 u32 *outbox; 1656 int ret; 1657 int i; 1658 1659 mailbox = mlx4_alloc_cmd_mailbox(dev); 1660 if (IS_ERR(mailbox)) 1661 return PTR_ERR(mailbox); 1662 outbox = mailbox->buf; 1663 1664 ret = mlx4_cmd_box(dev, 0, mailbox->dma, 0, op_modifier, 1665 MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A, 1666 MLX4_CMD_NATIVE); 1667 if (ret) 1668 goto out; 1669 1670 for (i = 0; i < array_length; i++) { 1671 if (in_offset[i] > MLX4_MAILBOX_SIZE) { 1672 ret = -EINVAL; 1673 goto out; 1674 } 1675 1676 MLX4_GET(counter_out[i], outbox, in_offset[i]); 1677 } 1678 1679out: 1680 mlx4_free_cmd_mailbox(dev, mailbox); 1681 return ret; 1682} 1683EXPORT_SYMBOL_GPL(mlx4_query_diag_counters); 1684 1685#define MLX4_WOL_SETUP_MODE (5 << 28) 1686int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) 1687{ 1688 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; 1689 1690 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, 1691 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, 1692 MLX4_CMD_NATIVE); 1693} 1694EXPORT_SYMBOL_GPL(mlx4_wol_read); 1695 1696int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) 1697{ 1698 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; 1699 1700 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, 1701 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1702} 1703EXPORT_SYMBOL_GPL(mlx4_wol_write); 1704 1705enum { 1706 ADD_TO_MCG = 0x26, 1707}; 1708 1709 1710void mlx4_opreq_action(struct work_struct *work) 1711{ 1712 struct mlx4_priv *priv = container_of(work, struct mlx4_priv, opreq_task); 1713 struct mlx4_dev *dev = &priv->dev; 1714 int num_tasks = atomic_read(&priv->opreq_count); 1715 struct mlx4_cmd_mailbox *mailbox; 1716 struct mlx4_mgm *mgm; 1717 u32 *outbox; 1718 u32 modifier; 1719 u16 token; 1720 u16 type_m; 1721 u16 type; 1722 int err; 1723 u32 num_qps; 1724 struct mlx4_qp qp; 1725 int i; 1726 u8 rem_mcg; 1727 u8 prot; 1728 1729#define GET_OP_REQ_MODIFIER_OFFSET 0x08 1730#define GET_OP_REQ_TOKEN_OFFSET 0x14 1731#define GET_OP_REQ_TYPE_OFFSET 0x1a 1732#define GET_OP_REQ_DATA_OFFSET 0x20 1733 1734 mailbox = mlx4_alloc_cmd_mailbox(dev); 1735 if (IS_ERR(mailbox)) { 1736 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n"); 1737 return; 1738 } 1739 outbox = mailbox->buf; 1740 1741 while (num_tasks) { 1742 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 1743 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, 1744 MLX4_CMD_NATIVE); 1745 if (err) { 1746 mlx4_err(dev, "Failed to retreive required operation: %d\n", err); 1747 return; 1748 } 1749 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); 1750 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); 1751 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET); 1752 type_m = type >> 12; 1753 type &= 0xfff; 1754 1755 switch (type) { 1756 case ADD_TO_MCG: 1757 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 1758 mlx4_warn(dev, "ADD MCG operation is not supported in " 1759 "DEVICE_MANAGED steerign mode\n"); 1760 err = EPERM; 1761 break; 1762 } 1763 mgm = (struct mlx4_mgm *) ((u8 *) (outbox) + GET_OP_REQ_DATA_OFFSET); 1764 num_qps = be32_to_cpu(mgm->members_count) & MGM_QPN_MASK; 1765 rem_mcg = ((u8 *) (&mgm->members_count))[0] & 1; 1766 prot = ((u8 *) (&mgm->members_count))[0] >> 6; 1767 1768 for (i = 0; i < num_qps; i++) { 1769 qp.qpn = be32_to_cpu(mgm->qp[i]); 1770 if (rem_mcg) 1771 err = mlx4_multicast_detach(dev, &qp, mgm->gid, prot, 0); 1772 else 1773 err = mlx4_multicast_attach(dev, &qp, mgm->gid, mgm->gid[5] ,0, prot, NULL); 1774 if (err) 1775 break; 1776 } 1777 break; 1778 default: 1779 mlx4_warn(dev, "Bad type for required operation\n"); 1780 err = EINVAL; 1781 break; 1782 } 1783 err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16), 1, 1784 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, 1785 MLX4_CMD_NATIVE); 1786 if (err) { 1787 mlx4_err(dev, "Failed to acknowledge required request: %d\n", err); 1788 goto out; 1789 } 1790 memset(outbox, 0, 0xffc); 1791 num_tasks = atomic_dec_return(&priv->opreq_count); 1792 } 1793 1794out: 1795 mlx4_free_cmd_mailbox(dev, mailbox); 1796} 1797