mem.c revision 318799
1/*
2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/mem.c 318799 2017-05-24 18:16:20Z np $");
34
35#include "opt_inet.h"
36
37#ifdef TCP_OFFLOAD
38#include <linux/types.h>
39#include <linux/kref.h>
40#include <rdma/ib_umem.h>
41#include <asm/atomic.h>
42
43#include <common/t4_msg.h>
44#include "iw_cxgbe.h"
45
46#define T4_ULPTX_MIN_IO 32
47#define C4IW_MAX_INLINE_SIZE 96
48
49static int
50mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
51{
52
53	return ((is_t4(dev->rdev.adap) ||
54		is_t5(dev->rdev.adap)) &&
55		length >= 8*1024*1024*1024ULL);
56}
57
58static int
59write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
60{
61	struct adapter *sc = rdev->adap;
62	struct ulp_mem_io *ulpmc;
63	struct ulptx_idata *ulpsc;
64	u8 wr_len, *to_dp, *from_dp;
65	int copy_len, num_wqe, i, ret = 0;
66	struct c4iw_wr_wait wr_wait;
67	struct wrqe *wr;
68	u32 cmd;
69
70	cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
71	if (is_t4(sc))
72		cmd |= cpu_to_be32(F_ULP_MEMIO_ORDER);
73	else
74		cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM);
75
76	addr &= 0x7FFFFFF;
77	CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len);
78	num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
79	c4iw_init_wr_wait(&wr_wait);
80	for (i = 0; i < num_wqe; i++) {
81
82		copy_len = min(len, C4IW_MAX_INLINE_SIZE);
83		wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc +
84				 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
85
86		wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
87		if (wr == NULL)
88			return (0);
89		ulpmc = wrtod(wr);
90
91		memset(ulpmc, 0, wr_len);
92		INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
93
94		if (i == (num_wqe-1)) {
95			ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
96						    F_FW_WR_COMPL);
97			ulpmc->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
98		} else
99			ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR));
100		ulpmc->wr.wr_mid = cpu_to_be32(
101				       V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
102
103		ulpmc->cmd = cmd;
104		ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(
105		    DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
106		ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr),
107						      16));
108		ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3));
109
110		ulpsc = (struct ulptx_idata *)(ulpmc + 1);
111		ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
112		ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
113
114		to_dp = (u8 *)(ulpsc + 1);
115		from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
116		if (data)
117			memcpy(to_dp, from_dp, copy_len);
118		else
119			memset(to_dp, 0, copy_len);
120		if (copy_len % T4_ULPTX_MIN_IO)
121			memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
122			       (copy_len % T4_ULPTX_MIN_IO));
123		t4_wrq_tx(sc, wr);
124		len -= C4IW_MAX_INLINE_SIZE;
125	}
126
127	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
128	return ret;
129}
130
131/*
132 * Build and write a TPT entry.
133 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
134 *     pbl_size and pbl_addr
135 * OUT: stag index
136 */
137static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
138			   u32 *stag, u8 stag_state, u32 pdid,
139			   enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
140			   int bind_enabled, u32 zbva, u64 to,
141			   u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
142{
143	int err;
144	struct fw_ri_tpte tpt;
145	u32 stag_idx;
146	static atomic_t key;
147
148	if (c4iw_fatal_error(rdev))
149		return -EIO;
150
151	stag_state = stag_state > 0;
152	stag_idx = (*stag) >> 8;
153
154	if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
155		stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
156		if (!stag_idx) {
157			mutex_lock(&rdev->stats.lock);
158			rdev->stats.stag.fail++;
159			mutex_unlock(&rdev->stats.lock);
160			return -ENOMEM;
161		}
162		mutex_lock(&rdev->stats.lock);
163		rdev->stats.stag.cur += 32;
164		if (rdev->stats.stag.cur > rdev->stats.stag.max)
165			rdev->stats.stag.max = rdev->stats.stag.cur;
166		mutex_unlock(&rdev->stats.lock);
167		*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
168	}
169	CTR5(KTR_IW_CXGBE,
170	    "%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x",
171	    __func__, stag_state, type, pdid, stag_idx);
172
173	/* write TPT entry */
174	if (reset_tpt_entry)
175		memset(&tpt, 0, sizeof(tpt));
176	else {
177		tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
178			V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
179			V_FW_RI_TPTE_STAGSTATE(stag_state) |
180			V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
181		tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
182			(bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
183			V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
184						      FW_RI_VA_BASED_TO))|
185			V_FW_RI_TPTE_PS(page_size));
186		tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
187			V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
188		tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
189		tpt.va_hi = cpu_to_be32((u32)(to >> 32));
190		tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
191		tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
192		tpt.len_hi = cpu_to_be32((u32)(len >> 32));
193	}
194	err = write_adapter_mem(rdev, stag_idx +
195				(rdev->adap->vres.stag.start >> 5),
196				sizeof(tpt), &tpt);
197
198	if (reset_tpt_entry) {
199		c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
200		mutex_lock(&rdev->stats.lock);
201		rdev->stats.stag.cur -= 32;
202		mutex_unlock(&rdev->stats.lock);
203	}
204	return err;
205}
206
207static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
208		     u32 pbl_addr, u32 pbl_size)
209{
210	int err;
211
212	CTR4(KTR_IW_CXGBE, "%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d",
213	     __func__, pbl_addr, rdev->adap->vres.pbl.start, pbl_size);
214
215	err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
216	return err;
217}
218
219static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
220		     u32 pbl_addr)
221{
222	return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
223			       pbl_size, pbl_addr);
224}
225
226static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
227{
228	*stag = T4_STAG_UNSET;
229	return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
230			       0UL, 0, 0, 0, 0);
231}
232
233static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
234{
235	return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
236			       0);
237}
238
239static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
240			 u32 pbl_size, u32 pbl_addr)
241{
242	*stag = T4_STAG_UNSET;
243	return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
244			       0UL, 0, 0, pbl_size, pbl_addr);
245}
246
247static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
248{
249	u32 mmid;
250
251	mhp->attr.state = 1;
252	mhp->attr.stag = stag;
253	mmid = stag >> 8;
254	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
255	CTR3(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p", __func__, mmid, mhp);
256	return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
257}
258
259static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
260		      struct c4iw_mr *mhp, int shift)
261{
262	u32 stag = T4_STAG_UNSET;
263	int ret;
264
265	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
266			      FW_RI_STAG_NSMR, mhp->attr.len ? mhp->attr.perms : 0,
267			      mhp->attr.mw_bind_enable, mhp->attr.zbva,
268			      mhp->attr.va_fbo, mhp->attr.len ? mhp->attr.len : -1, shift - 12,
269			      mhp->attr.pbl_size, mhp->attr.pbl_addr);
270	if (ret)
271		return ret;
272
273	ret = finish_mem_reg(mhp, stag);
274	if (ret)
275		dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
276		       mhp->attr.pbl_addr);
277	return ret;
278}
279
280static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
281			  struct c4iw_mr *mhp, int shift, int npages)
282{
283	u32 stag;
284	int ret;
285
286	if (npages > mhp->attr.pbl_size)
287		return -ENOMEM;
288
289	stag = mhp->attr.stag;
290	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
291			      FW_RI_STAG_NSMR, mhp->attr.perms,
292			      mhp->attr.mw_bind_enable, mhp->attr.zbva,
293			      mhp->attr.va_fbo, mhp->attr.len, shift - 12,
294			      mhp->attr.pbl_size, mhp->attr.pbl_addr);
295	if (ret)
296		return ret;
297
298	ret = finish_mem_reg(mhp, stag);
299	if (ret)
300		dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
301		       mhp->attr.pbl_addr);
302
303	return ret;
304}
305
306static int alloc_pbl(struct c4iw_mr *mhp, int npages)
307{
308	mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
309						    npages << 3);
310
311	if (!mhp->attr.pbl_addr)
312		return -ENOMEM;
313
314	mhp->attr.pbl_size = npages;
315
316	return 0;
317}
318
319static int build_phys_page_list(struct ib_phys_buf *buffer_list,
320				int num_phys_buf, u64 *iova_start,
321				u64 *total_size, int *npages,
322				int *shift, __be64 **page_list)
323{
324	u64 mask;
325	int i, j, n;
326
327	mask = 0;
328	*total_size = 0;
329	for (i = 0; i < num_phys_buf; ++i) {
330		if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
331			return -EINVAL;
332		if (i != 0 && i != num_phys_buf - 1 &&
333		    (buffer_list[i].size & ~PAGE_MASK))
334			return -EINVAL;
335		*total_size += buffer_list[i].size;
336		if (i > 0)
337			mask |= buffer_list[i].addr;
338		else
339			mask |= buffer_list[i].addr & PAGE_MASK;
340		if (i != num_phys_buf - 1)
341			mask |= buffer_list[i].addr + buffer_list[i].size;
342		else
343			mask |= (buffer_list[i].addr + buffer_list[i].size +
344				PAGE_SIZE - 1) & PAGE_MASK;
345	}
346
347	/* Find largest page shift we can use to cover buffers */
348	for (*shift = PAGE_SHIFT; *shift < PAGE_SHIFT + M_FW_RI_TPTE_PS;
349	    ++(*shift))
350		if ((1ULL << *shift) & mask)
351			break;
352
353	buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
354	buffer_list[0].addr &= ~0ull << *shift;
355
356	*npages = 0;
357	for (i = 0; i < num_phys_buf; ++i)
358		*npages += (buffer_list[i].size +
359			(1ULL << *shift) - 1) >> *shift;
360
361	if (!*npages)
362		return -EINVAL;
363
364	*page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
365	if (!*page_list)
366		return -ENOMEM;
367
368	n = 0;
369	for (i = 0; i < num_phys_buf; ++i)
370		for (j = 0;
371		     j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
372		     ++j)
373			(*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
374			    ((u64) j << *shift));
375
376	CTR6(KTR_IW_CXGBE,
377	    "%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d", __func__,
378	    (unsigned long long)*iova_start, (unsigned long long)mask, *shift,
379	    (unsigned long long)*total_size, *npages);
380
381	return 0;
382
383}
384
385int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
386			     struct ib_pd *pd, struct ib_phys_buf *buffer_list,
387			     int num_phys_buf, int acc, u64 *iova_start)
388{
389
390	struct c4iw_mr mh, *mhp;
391	struct c4iw_pd *php;
392	struct c4iw_dev *rhp;
393	__be64 *page_list = NULL;
394	int shift = 0;
395	u64 total_size = 0;
396	int npages = 0;
397	int ret;
398
399	CTR3(KTR_IW_CXGBE, "%s ib_mr %p ib_pd %p", __func__, mr, pd);
400
401	/* There can be no memory windows */
402	if (atomic_read(&mr->usecnt))
403		return -EINVAL;
404
405	mhp = to_c4iw_mr(mr);
406	rhp = mhp->rhp;
407	php = to_c4iw_pd(mr->pd);
408
409	/* make sure we are on the same adapter */
410	if (rhp != php->rhp)
411		return -EINVAL;
412
413	memcpy(&mh, mhp, sizeof *mhp);
414
415	if (mr_rereg_mask & IB_MR_REREG_PD)
416		php = to_c4iw_pd(pd);
417	if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
418		mh.attr.perms = c4iw_ib_to_tpt_access(acc);
419		mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
420					 IB_ACCESS_MW_BIND;
421	}
422	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
423		ret = build_phys_page_list(buffer_list, num_phys_buf,
424						iova_start,
425						&total_size, &npages,
426						&shift, &page_list);
427		if (ret)
428			return ret;
429	}
430	if (mr_exceeds_hw_limits(rhp, total_size)) {
431		kfree(page_list);
432		return -EINVAL;
433	}
434	ret = reregister_mem(rhp, php, &mh, shift, npages);
435	kfree(page_list);
436	if (ret)
437		return ret;
438	if (mr_rereg_mask & IB_MR_REREG_PD)
439		mhp->attr.pdid = php->pdid;
440	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
441		mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
442	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
443		mhp->attr.zbva = 0;
444		mhp->attr.va_fbo = *iova_start;
445		mhp->attr.page_size = shift - 12;
446		mhp->attr.len = total_size;
447		mhp->attr.pbl_size = npages;
448	}
449
450	return 0;
451}
452
453struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
454				     struct ib_phys_buf *buffer_list,
455				     int num_phys_buf, int acc, u64 *iova_start)
456{
457	__be64 *page_list;
458	int shift;
459	u64 total_size;
460	int npages;
461	struct c4iw_dev *rhp;
462	struct c4iw_pd *php;
463	struct c4iw_mr *mhp;
464	int ret;
465
466	CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
467	php = to_c4iw_pd(pd);
468	rhp = php->rhp;
469
470	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
471	if (!mhp)
472		return ERR_PTR(-ENOMEM);
473
474	mhp->rhp = rhp;
475
476	/* First check that we have enough alignment */
477	if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
478		ret = -EINVAL;
479		goto err;
480	}
481
482	if (num_phys_buf > 1 &&
483	    ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
484		ret = -EINVAL;
485		goto err;
486	}
487
488	ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
489					&total_size, &npages, &shift,
490					&page_list);
491	if (ret)
492		goto err;
493
494	if (mr_exceeds_hw_limits(rhp, total_size)) {
495		kfree(page_list);
496		ret = -EINVAL;
497		goto err;
498	}
499	ret = alloc_pbl(mhp, npages);
500	if (ret) {
501		kfree(page_list);
502		goto err;
503	}
504
505	ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
506			     npages);
507	kfree(page_list);
508	if (ret)
509		goto err_pbl;
510
511	mhp->attr.pdid = php->pdid;
512	mhp->attr.zbva = 0;
513
514	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
515	mhp->attr.va_fbo = *iova_start;
516	mhp->attr.page_size = shift - 12;
517
518	mhp->attr.len = total_size;
519	mhp->attr.pbl_size = npages;
520	ret = register_mem(rhp, php, mhp, shift);
521	if (ret)
522		goto err_pbl;
523
524	return &mhp->ibmr;
525
526err_pbl:
527	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
528			      mhp->attr.pbl_size << 3);
529
530err:
531	kfree(mhp);
532	return ERR_PTR(ret);
533
534}
535
536struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
537{
538	struct c4iw_dev *rhp;
539	struct c4iw_pd *php;
540	struct c4iw_mr *mhp;
541	int ret;
542	u32 stag = T4_STAG_UNSET;
543
544	CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
545	php = to_c4iw_pd(pd);
546	rhp = php->rhp;
547
548	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
549	if (!mhp)
550		return ERR_PTR(-ENOMEM);
551
552	mhp->rhp = rhp;
553	mhp->attr.pdid = php->pdid;
554	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
555	mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
556	mhp->attr.zbva = 0;
557	mhp->attr.va_fbo = 0;
558	mhp->attr.page_size = 0;
559	mhp->attr.len = ~0UL;
560	mhp->attr.pbl_size = 0;
561
562	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
563			      FW_RI_STAG_NSMR, mhp->attr.perms,
564			      mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
565	if (ret)
566		goto err1;
567
568	ret = finish_mem_reg(mhp, stag);
569	if (ret)
570		goto err2;
571	return &mhp->ibmr;
572err2:
573	dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
574		  mhp->attr.pbl_addr);
575err1:
576	kfree(mhp);
577	return ERR_PTR(ret);
578}
579
580struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
581    u64 virt, int acc, struct ib_udata *udata, int mr_id)
582{
583	__be64 *pages;
584	int shift, n, len;
585	int i, j, k;
586	int err = 0;
587	struct ib_umem_chunk *chunk;
588	struct c4iw_dev *rhp;
589	struct c4iw_pd *php;
590	struct c4iw_mr *mhp;
591
592	CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
593
594	if (length == ~0ULL)
595		return ERR_PTR(-EINVAL);
596
597	if ((length + start) < start)
598		return ERR_PTR(-EINVAL);
599
600	php = to_c4iw_pd(pd);
601	rhp = php->rhp;
602
603	if (mr_exceeds_hw_limits(rhp, length))
604		return ERR_PTR(-EINVAL);
605
606	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
607	if (!mhp)
608		return ERR_PTR(-ENOMEM);
609
610	mhp->rhp = rhp;
611
612	mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
613	if (IS_ERR(mhp->umem)) {
614		err = PTR_ERR(mhp->umem);
615		kfree(mhp);
616		return ERR_PTR(err);
617	}
618
619	shift = ffs(mhp->umem->page_size) - 1;
620
621	n = 0;
622	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
623		n += chunk->nents;
624
625	err = alloc_pbl(mhp, n);
626	if (err)
627		goto err;
628
629	pages = (__be64 *) __get_free_page(GFP_KERNEL);
630	if (!pages) {
631		err = -ENOMEM;
632		goto err_pbl;
633	}
634
635	i = n = 0;
636
637	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
638		for (j = 0; j < chunk->nmap; ++j) {
639			len = sg_dma_len(&chunk->page_list[j]) >> shift;
640			for (k = 0; k < len; ++k) {
641				pages[i++] = cpu_to_be64(sg_dma_address(
642					&chunk->page_list[j]) +
643					mhp->umem->page_size * k);
644				if (i == PAGE_SIZE / sizeof *pages) {
645					err = write_pbl(&mhp->rhp->rdev,
646					      pages,
647					      mhp->attr.pbl_addr + (n << 3), i);
648					if (err)
649						goto pbl_done;
650					n += i;
651					i = 0;
652				}
653			}
654		}
655
656	if (i)
657		err = write_pbl(&mhp->rhp->rdev, pages,
658				     mhp->attr.pbl_addr + (n << 3), i);
659
660pbl_done:
661	free_page((unsigned long) pages);
662	if (err)
663		goto err_pbl;
664
665	mhp->attr.pdid = php->pdid;
666	mhp->attr.zbva = 0;
667	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
668	mhp->attr.va_fbo = virt;
669	mhp->attr.page_size = shift - 12;
670	mhp->attr.len = length;
671
672	err = register_mem(rhp, php, mhp, shift);
673	if (err)
674		goto err_pbl;
675
676	return &mhp->ibmr;
677
678err_pbl:
679	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
680			      mhp->attr.pbl_size << 3);
681
682err:
683	ib_umem_release(mhp->umem);
684	kfree(mhp);
685	return ERR_PTR(err);
686}
687
688struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
689{
690	struct c4iw_dev *rhp;
691	struct c4iw_pd *php;
692	struct c4iw_mw *mhp;
693	u32 mmid;
694	u32 stag = 0;
695	int ret;
696
697	php = to_c4iw_pd(pd);
698	rhp = php->rhp;
699	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
700	if (!mhp)
701		return ERR_PTR(-ENOMEM);
702	ret = allocate_window(&rhp->rdev, &stag, php->pdid);
703	if (ret) {
704		kfree(mhp);
705		return ERR_PTR(ret);
706	}
707	mhp->rhp = rhp;
708	mhp->attr.pdid = php->pdid;
709	mhp->attr.type = FW_RI_STAG_MW;
710	mhp->attr.stag = stag;
711	mmid = (stag) >> 8;
712	mhp->ibmw.rkey = stag;
713	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
714		deallocate_window(&rhp->rdev, mhp->attr.stag);
715		kfree(mhp);
716		return ERR_PTR(-ENOMEM);
717	}
718	CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp,
719	    stag);
720	return &(mhp->ibmw);
721}
722
723int c4iw_dealloc_mw(struct ib_mw *mw)
724{
725	struct c4iw_dev *rhp;
726	struct c4iw_mw *mhp;
727	u32 mmid;
728
729	mhp = to_c4iw_mw(mw);
730	rhp = mhp->rhp;
731	mmid = (mw->rkey) >> 8;
732	remove_handle(rhp, &rhp->mmidr, mmid);
733	deallocate_window(&rhp->rdev, mhp->attr.stag);
734	kfree(mhp);
735	CTR4(KTR_IW_CXGBE, "%s ib_mw %p mmid 0x%x ptr %p", __func__, mw, mmid,
736	    mhp);
737	return 0;
738}
739
740struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
741{
742	struct c4iw_dev *rhp;
743	struct c4iw_pd *php;
744	struct c4iw_mr *mhp;
745	u32 mmid;
746	u32 stag = 0;
747	int ret = 0;
748
749	php = to_c4iw_pd(pd);
750	rhp = php->rhp;
751	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
752	if (!mhp) {
753		ret = -ENOMEM;
754		goto err;
755	}
756
757	mhp->rhp = rhp;
758	ret = alloc_pbl(mhp, pbl_depth);
759	if (ret)
760		goto err1;
761	mhp->attr.pbl_size = pbl_depth;
762	ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
763				 mhp->attr.pbl_size, mhp->attr.pbl_addr);
764	if (ret)
765		goto err2;
766	mhp->attr.pdid = php->pdid;
767	mhp->attr.type = FW_RI_STAG_NSMR;
768	mhp->attr.stag = stag;
769	mhp->attr.state = 1;
770	mmid = (stag) >> 8;
771	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
772	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
773		ret = -ENOMEM;
774		goto err3;
775	}
776
777	CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp,
778	    stag);
779	return &(mhp->ibmr);
780err3:
781	dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
782		       mhp->attr.pbl_addr);
783err2:
784	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
785			      mhp->attr.pbl_size << 3);
786err1:
787	kfree(mhp);
788err:
789	return ERR_PTR(ret);
790}
791
792struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
793						     int page_list_len)
794{
795	struct c4iw_fr_page_list *c4pl;
796	struct c4iw_dev *dev = to_c4iw_dev(device);
797	bus_addr_t dma_addr;
798	int size = sizeof *c4pl + page_list_len * sizeof(u64);
799
800	c4pl = contigmalloc(size,
801            M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 4096, 0);
802        if (c4pl)
803                dma_addr = vtophys(c4pl);
804        else
805                return ERR_PTR(-ENOMEM);;
806
807	pci_unmap_addr_set(c4pl, mapping, dma_addr);
808	c4pl->dma_addr = dma_addr;
809	c4pl->dev = dev;
810	c4pl->size = size;
811	c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
812	c4pl->ibpl.max_page_list_len = page_list_len;
813
814	return &c4pl->ibpl;
815}
816
817void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
818{
819	struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
820	contigfree(c4pl, c4pl->size, M_DEVBUF);
821}
822
823int c4iw_dereg_mr(struct ib_mr *ib_mr)
824{
825	struct c4iw_dev *rhp;
826	struct c4iw_mr *mhp;
827	u32 mmid;
828
829	CTR2(KTR_IW_CXGBE, "%s ib_mr %p", __func__, ib_mr);
830	/* There can be no memory windows */
831	if (atomic_read(&ib_mr->usecnt))
832		return -EINVAL;
833
834	mhp = to_c4iw_mr(ib_mr);
835	rhp = mhp->rhp;
836	mmid = mhp->attr.stag >> 8;
837	remove_handle(rhp, &rhp->mmidr, mmid);
838	dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
839		       mhp->attr.pbl_addr);
840	if (mhp->attr.pbl_size)
841		c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
842				  mhp->attr.pbl_size << 3);
843	if (mhp->kva)
844		kfree((void *) (unsigned long) mhp->kva);
845	if (mhp->umem)
846		ib_umem_release(mhp->umem);
847	CTR3(KTR_IW_CXGBE, "%s mmid 0x%x ptr %p", __func__, mmid, mhp);
848	kfree(mhp);
849	return 0;
850}
851#endif
852