srq.c revision 271127
1/* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/mlx4/cmd.h> 35#include <linux/gfp.h> 36 37#include "mlx4.h" 38#include "icm.h" 39 40void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) 41{ 42 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; 43 struct mlx4_srq *srq; 44 45 spin_lock(&srq_table->lock); 46 47 srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1)); 48 if (srq) 49 atomic_inc(&srq->refcount); 50 51 spin_unlock(&srq_table->lock); 52 53 if (!srq) { 54 mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn); 55 return; 56 } 57 58 srq->event(srq, event_type); 59 60 if (atomic_dec_and_test(&srq->refcount)) 61 complete(&srq->free); 62} 63 64static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 65 int srq_num) 66{ 67 return mlx4_cmd(dev, mailbox->dma, srq_num, 0, 68 MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A, 69 MLX4_CMD_WRAPPED); 70} 71 72static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 73 int srq_num) 74{ 75 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num, 76 mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ, 77 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 78} 79 80static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark) 81{ 82 return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ, 83 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 84} 85 86static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 87 int srq_num) 88{ 89 return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ, 90 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 91} 92 93int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn) 94{ 95 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; 96 int err; 97 98 99 *srqn = mlx4_bitmap_alloc(&srq_table->bitmap); 100 if (*srqn == -1) 101 return -ENOMEM; 102 103 err = mlx4_table_get(dev, &srq_table->table, *srqn); 104 if (err) 105 goto err_out; 106 107 err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn); 108 if (err) 109 goto err_put; 110 return 0; 111 112err_put: 113 mlx4_table_put(dev, &srq_table->table, *srqn); 114 115err_out: 116 mlx4_bitmap_free(&srq_table->bitmap, *srqn); 117 return err; 118} 119 120static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn) 121{ 122 u64 out_param; 123 int err; 124 125 if (mlx4_is_mfunc(dev)) { 126 err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ, 127 RES_OP_RESERVE_AND_MAP, 128 MLX4_CMD_ALLOC_RES, 129 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 130 if (!err) 131 *srqn = get_param_l(&out_param); 132 133 return err; 134 } 135 return __mlx4_srq_alloc_icm(dev, srqn); 136} 137 138void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) 139{ 140 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; 141 142 mlx4_table_put(dev, &srq_table->cmpt_table, srqn); 143 mlx4_table_put(dev, &srq_table->table, srqn); 144 mlx4_bitmap_free(&srq_table->bitmap, srqn); 145} 146 147static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) 148{ 149 u64 in_param = 0; 150 151 if (mlx4_is_mfunc(dev)) { 152 set_param_l(&in_param, srqn); 153 if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP, 154 MLX4_CMD_FREE_RES, 155 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) 156 mlx4_warn(dev, "Failed freeing cq:%d\n", srqn); 157 return; 158 } 159 __mlx4_srq_free_icm(dev, srqn); 160} 161 162int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, 163 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) 164{ 165 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; 166 struct mlx4_cmd_mailbox *mailbox; 167 struct mlx4_srq_context *srq_context; 168 u64 mtt_addr; 169 int err; 170 171 err = mlx4_srq_alloc_icm(dev, &srq->srqn); 172 if (err) 173 return err; 174 175 spin_lock_irq(&srq_table->lock); 176 err = radix_tree_insert(&srq_table->tree, srq->srqn, srq); 177 spin_unlock_irq(&srq_table->lock); 178 if (err) 179 goto err_icm; 180 181 mailbox = mlx4_alloc_cmd_mailbox(dev); 182 if (IS_ERR(mailbox)) { 183 err = PTR_ERR(mailbox); 184 goto err_radix; 185 } 186 187 srq_context = mailbox->buf; 188 memset(srq_context, 0, sizeof *srq_context); 189 190 srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | 191 srq->srqn); 192 srq_context->logstride = srq->wqe_shift - 4; 193 srq_context->xrcd = cpu_to_be16(xrcd); 194 srq_context->pg_offset_cqn = cpu_to_be32(cqn & 0xffffff); 195 srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; 196 197 mtt_addr = mlx4_mtt_addr(dev, mtt); 198 srq_context->mtt_base_addr_h = mtt_addr >> 32; 199 srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 200 srq_context->pd = cpu_to_be32(pdn); 201 srq_context->db_rec_addr = cpu_to_be64(db_rec); 202 203 err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn); 204 mlx4_free_cmd_mailbox(dev, mailbox); 205 if (err) 206 goto err_radix; 207 208 atomic_set(&srq->refcount, 1); 209 init_completion(&srq->free); 210 211 return 0; 212 213err_radix: 214 spin_lock_irq(&srq_table->lock); 215 radix_tree_delete(&srq_table->tree, srq->srqn); 216 spin_unlock_irq(&srq_table->lock); 217 218err_icm: 219 mlx4_srq_free_icm(dev, srq->srqn); 220 return err; 221} 222EXPORT_SYMBOL_GPL(mlx4_srq_alloc); 223 224void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq) 225{ 226 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; 227 int err; 228 229 err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn); 230 if (err) 231 mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn); 232 233 spin_lock_irq(&srq_table->lock); 234 radix_tree_delete(&srq_table->tree, srq->srqn); 235 spin_unlock_irq(&srq_table->lock); 236 237 if (atomic_dec_and_test(&srq->refcount)) 238 complete(&srq->free); 239 wait_for_completion(&srq->free); 240 241 mlx4_srq_free_icm(dev, srq->srqn); 242} 243EXPORT_SYMBOL_GPL(mlx4_srq_free); 244 245int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark) 246{ 247 return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark); 248} 249EXPORT_SYMBOL_GPL(mlx4_srq_arm); 250 251int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark) 252{ 253 struct mlx4_cmd_mailbox *mailbox; 254 struct mlx4_srq_context *srq_context; 255 int err; 256 257 mailbox = mlx4_alloc_cmd_mailbox(dev); 258 if (IS_ERR(mailbox)) 259 return PTR_ERR(mailbox); 260 261 srq_context = mailbox->buf; 262 263 err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn); 264 if (err) 265 goto err_out; 266 *limit_watermark = be16_to_cpu(srq_context->limit_watermark); 267 268err_out: 269 mlx4_free_cmd_mailbox(dev, mailbox); 270 return err; 271} 272EXPORT_SYMBOL_GPL(mlx4_srq_query); 273 274int mlx4_init_srq_table(struct mlx4_dev *dev) 275{ 276 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; 277 int err; 278 279 spin_lock_init(&srq_table->lock); 280 INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); 281 if (mlx4_is_slave(dev)) 282 return 0; 283 284 err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, 285 dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); 286 if (err) 287 return err; 288 289 return 0; 290} 291 292void mlx4_cleanup_srq_table(struct mlx4_dev *dev) 293{ 294 if (mlx4_is_slave(dev)) 295 return; 296 mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); 297} 298