1256694Snp/*
2256694Snp * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
3256694Snp *
4256694Snp * This software is available to you under a choice of one of two
5256694Snp * licenses.  You may choose to be licensed under the terms of the GNU
6256694Snp * General Public License (GPL) Version 2, available from the file
7256694Snp * COPYING in the main directory of this source tree, or the
8256694Snp * OpenIB.org BSD license below:
9256694Snp *
10256694Snp *     Redistribution and use in source and binary forms, with or
11256694Snp *     without modification, are permitted provided that the following
12256694Snp *     conditions are met:
13256694Snp *
14256694Snp *      - Redistributions of source code must retain the above
15256694Snp *	  copyright notice, this list of conditions and the following
16256694Snp *	  disclaimer.
17256694Snp *
18256694Snp *      - Redistributions in binary form must reproduce the above
19256694Snp *	  copyright notice, this list of conditions and the following
20256694Snp *	  disclaimer in the documentation and/or other materials
21256694Snp *	  provided with the distribution.
22256694Snp *
23256694Snp * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24256694Snp * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25256694Snp * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26256694Snp * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27256694Snp * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28256694Snp * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29256694Snp * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30256694Snp * SOFTWARE.
31256694Snp */
32256694Snp#include <sys/cdefs.h>
33256694Snp__FBSDID("$FreeBSD$");
34256694Snp
35256694Snp#include "opt_inet.h"
36256694Snp
37256694Snp#ifdef TCP_OFFLOAD
38256694Snp#include <sys/param.h>
39256694Snp#include <sys/systm.h>
40256694Snp#include <sys/kernel.h>
41256694Snp#include <sys/ktr.h>
42256694Snp#include <sys/bus.h>
43256694Snp#include <sys/lock.h>
44256694Snp#include <sys/mutex.h>
45256694Snp#include <sys/rwlock.h>
46256694Snp#include <sys/socket.h>
47256694Snp#include <sys/sbuf.h>
48256694Snp
49256694Snp#include "iw_cxgbe.h"
50256694Snp#include "user.h"
51256694Snp
52256694Snpstatic int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
53256694Snp		      struct c4iw_dev_ucontext *uctx)
54256694Snp{
55256694Snp	struct adapter *sc = rdev->adap;
56256694Snp	struct fw_ri_res_wr *res_wr;
57256694Snp	struct fw_ri_res *res;
58256694Snp	int wr_len;
59256694Snp	struct c4iw_wr_wait wr_wait;
60256694Snp	struct wrqe *wr;
61256694Snp
62256694Snp	wr_len = sizeof *res_wr + sizeof *res;
63256694Snp	wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
64256694Snp                if (wr == NULL)
65256694Snp                        return (0);
66256694Snp        res_wr = wrtod(wr);
67256694Snp	memset(res_wr, 0, wr_len);
68256694Snp	res_wr->op_nres = cpu_to_be32(
69256694Snp			V_FW_WR_OP(FW_RI_RES_WR) |
70256694Snp			V_FW_RI_RES_WR_NRES(1) |
71256694Snp			F_FW_WR_COMPL);
72256694Snp	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
73256694Snp	res_wr->cookie = (unsigned long) &wr_wait;
74256694Snp	res = res_wr->res;
75256694Snp	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
76256694Snp	res->u.cq.op = FW_RI_RES_OP_RESET;
77256694Snp	res->u.cq.iqid = cpu_to_be32(cq->cqid);
78256694Snp
79256694Snp	c4iw_init_wr_wait(&wr_wait);
80256694Snp
81256694Snp	t4_wrq_tx(sc, wr);
82256694Snp
83256694Snp	c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
84256694Snp
85256694Snp	kfree(cq->sw_queue);
86256694Snp	contigfree(cq->queue, cq->memsize, M_DEVBUF);
87256694Snp	c4iw_put_cqid(rdev, cq->cqid, uctx);
88256694Snp	return 0;
89256694Snp}
90256694Snp
91256694Snpstatic int
92256694Snpcreate_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
93256694Snp    struct c4iw_dev_ucontext *uctx)
94256694Snp{
95256694Snp	struct adapter *sc = rdev->adap;
96256694Snp	struct fw_ri_res_wr *res_wr;
97256694Snp	struct fw_ri_res *res;
98256694Snp	int wr_len;
99256694Snp	int user = (uctx != &rdev->uctx);
100256694Snp	struct c4iw_wr_wait wr_wait;
101256694Snp	int ret;
102256694Snp	struct wrqe *wr;
103256694Snp
104256694Snp	cq->cqid = c4iw_get_cqid(rdev, uctx);
105256694Snp	if (!cq->cqid) {
106256694Snp		ret = -ENOMEM;
107256694Snp		goto err1;
108256694Snp	}
109256694Snp
110256694Snp	if (!user) {
111256694Snp		cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
112256694Snp		if (!cq->sw_queue) {
113256694Snp			ret = -ENOMEM;
114256694Snp			goto err2;
115256694Snp		}
116256694Snp	}
117256694Snp
118256694Snp	cq->queue = contigmalloc(cq->memsize, M_DEVBUF, M_NOWAIT, 0ul, ~0ul,
119256694Snp	    PAGE_SIZE, 0);
120256694Snp        if (cq->queue)
121256694Snp                cq->dma_addr = vtophys(cq->queue);
122256694Snp        else {
123256694Snp		ret = -ENOMEM;
124256694Snp                goto err3;
125256694Snp	}
126256694Snp
127256694Snp	pci_unmap_addr_set(cq, mapping, cq->dma_addr);
128256694Snp	memset(cq->queue, 0, cq->memsize);
129256694Snp
130256694Snp	/* build fw_ri_res_wr */
131256694Snp	wr_len = sizeof *res_wr + sizeof *res;
132256694Snp
133256694Snp	wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
134256694Snp	if (wr == NULL)
135256694Snp        	return (0);
136256694Snp        res_wr = wrtod(wr);
137256694Snp
138256694Snp	memset(res_wr, 0, wr_len);
139256694Snp	res_wr->op_nres = cpu_to_be32(
140256694Snp			V_FW_WR_OP(FW_RI_RES_WR) |
141256694Snp			V_FW_RI_RES_WR_NRES(1) |
142256694Snp			F_FW_WR_COMPL);
143256694Snp	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
144256694Snp	res_wr->cookie = (unsigned long) &wr_wait;
145256694Snp	res = res_wr->res;
146256694Snp	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
147256694Snp	res->u.cq.op = FW_RI_RES_OP_WRITE;
148256694Snp	res->u.cq.iqid = cpu_to_be32(cq->cqid);
149256694Snp	//Fixme: Always use first queue id for IQANDSTINDEX. Linux does the same.
150256694Snp	res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
151256694Snp			V_FW_RI_RES_WR_IQANUS(0) |
152256694Snp			V_FW_RI_RES_WR_IQANUD(1) |
153256694Snp			F_FW_RI_RES_WR_IQANDST |
154256694Snp			V_FW_RI_RES_WR_IQANDSTINDEX(sc->sge.ofld_rxq[0].iq.abs_id));
155256694Snp	res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
156256694Snp			F_FW_RI_RES_WR_IQDROPRSS |
157256694Snp			V_FW_RI_RES_WR_IQPCIECH(2) |
158256694Snp			V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
159256694Snp			F_FW_RI_RES_WR_IQO |
160256694Snp			V_FW_RI_RES_WR_IQESIZE(1));
161256694Snp	res->u.cq.iqsize = cpu_to_be16(cq->size);
162256694Snp	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
163256694Snp
164256694Snp	c4iw_init_wr_wait(&wr_wait);
165256694Snp
166256694Snp	t4_wrq_tx(sc, wr);
167256694Snp
168256694Snp	CTR2(KTR_IW_CXGBE, "%s wait_event wr_wait %p", __func__, &wr_wait);
169256694Snp	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
170256694Snp	if (ret)
171256694Snp		goto err4;
172256694Snp
173256694Snp	cq->gen = 1;
174256694Snp	cq->gts = (void *)((unsigned long)rman_get_virtual(sc->regs_res) +
175256694Snp	    MYPF_REG(SGE_PF_GTS));
176256694Snp	cq->rdev = rdev;
177256694Snp
178256694Snp	if (user) {
179256694Snp		cq->ugts = (u64)((char*)rman_get_virtual(sc->udbs_res) +
180256694Snp		    (cq->cqid << rdev->cqshift));
181256694Snp		cq->ugts &= PAGE_MASK;
182256694Snp		CTR5(KTR_IW_CXGBE,
183256694Snp		    "%s: UGTS %p cqid %x cqshift %d page_mask %x", __func__,
184256694Snp		    cq->ugts, cq->cqid, rdev->cqshift, PAGE_MASK);
185256694Snp	}
186256694Snp	return 0;
187256694Snperr4:
188256694Snp	contigfree(cq->queue, cq->memsize, M_DEVBUF);
189256694Snperr3:
190256694Snp	kfree(cq->sw_queue);
191256694Snperr2:
192256694Snp	c4iw_put_cqid(rdev, cq->cqid, uctx);
193256694Snperr1:
194256694Snp	return ret;
195256694Snp}
196256694Snp
197256694Snpstatic void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
198256694Snp{
199256694Snp	struct t4_cqe cqe;
200256694Snp
201256694Snp	CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
202256694Snp	    cq, cq->sw_cidx, cq->sw_pidx);
203256694Snp	memset(&cqe, 0, sizeof(cqe));
204256694Snp	cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
205256694Snp				 V_CQE_OPCODE(FW_RI_SEND) |
206256694Snp				 V_CQE_TYPE(0) |
207256694Snp				 V_CQE_SWCQE(1) |
208256694Snp				 V_CQE_QPID(wq->sq.qid));
209256694Snp	cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
210256694Snp	cq->sw_queue[cq->sw_pidx] = cqe;
211256694Snp	t4_swcq_produce(cq);
212256694Snp}
213256694Snp
214256694Snpint c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
215256694Snp{
216256694Snp	int flushed = 0;
217256694Snp	int in_use = wq->rq.in_use - count;
218256694Snp
219256694Snp	BUG_ON(in_use < 0);
220256694Snp	CTR5(KTR_IW_CXGBE, "%s wq %p cq %p rq.in_use %u skip count %u",
221256694Snp	    __func__, wq, cq, wq->rq.in_use, count);
222256694Snp	while (in_use--) {
223256694Snp		insert_recv_cqe(wq, cq);
224256694Snp		flushed++;
225256694Snp	}
226256694Snp	return flushed;
227256694Snp}
228256694Snp
229256694Snpstatic void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
230256694Snp			  struct t4_swsqe *swcqe)
231256694Snp{
232256694Snp	struct t4_cqe cqe;
233256694Snp
234256694Snp	CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
235256694Snp	    cq, cq->sw_cidx, cq->sw_pidx);
236256694Snp	memset(&cqe, 0, sizeof(cqe));
237256694Snp	cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
238256694Snp				 V_CQE_OPCODE(swcqe->opcode) |
239256694Snp				 V_CQE_TYPE(1) |
240256694Snp				 V_CQE_SWCQE(1) |
241256694Snp				 V_CQE_QPID(wq->sq.qid));
242256694Snp	CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
243256694Snp	cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
244256694Snp	cq->sw_queue[cq->sw_pidx] = cqe;
245256694Snp	t4_swcq_produce(cq);
246256694Snp}
247256694Snp
248256694Snpint c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
249256694Snp{
250256694Snp	int flushed = 0;
251256694Snp	struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
252256694Snp	int in_use = wq->sq.in_use - count;
253256694Snp
254256694Snp	BUG_ON(in_use < 0);
255256694Snp	while (in_use--) {
256256694Snp		swsqe->signaled = 0;
257256694Snp		insert_sq_cqe(wq, cq, swsqe);
258256694Snp		swsqe++;
259256694Snp		if (swsqe == (wq->sq.sw_sq + wq->sq.size))
260256694Snp			swsqe = wq->sq.sw_sq;
261256694Snp		flushed++;
262256694Snp	}
263256694Snp	return flushed;
264256694Snp}
265256694Snp
266256694Snp/*
267256694Snp * Move all CQEs from the HWCQ into the SWCQ.
268256694Snp */
269256694Snpvoid c4iw_flush_hw_cq(struct t4_cq *cq)
270256694Snp{
271256694Snp	struct t4_cqe *cqe = NULL, *swcqe;
272256694Snp	int ret;
273256694Snp
274256694Snp	CTR3(KTR_IW_CXGBE, "%s cq %p cqid 0x%x", __func__, cq, cq->cqid);
275256694Snp	ret = t4_next_hw_cqe(cq, &cqe);
276256694Snp	while (!ret) {
277256694Snp		CTR3(KTR_IW_CXGBE, "%s flushing hwcq cidx 0x%x swcq pidx 0x%x",
278256694Snp		    __func__, cq->cidx, cq->sw_pidx);
279256694Snp		swcqe = &cq->sw_queue[cq->sw_pidx];
280256694Snp		*swcqe = *cqe;
281256694Snp		swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
282256694Snp		t4_swcq_produce(cq);
283256694Snp		t4_hwcq_consume(cq);
284256694Snp		ret = t4_next_hw_cqe(cq, &cqe);
285256694Snp	}
286256694Snp}
287256694Snp
288256694Snpstatic int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
289256694Snp{
290256694Snp	if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
291256694Snp		return 0;
292256694Snp
293256694Snp	if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
294256694Snp		return 0;
295256694Snp
296256694Snp	if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
297256694Snp		return 0;
298256694Snp
299256694Snp	if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
300256694Snp		return 0;
301256694Snp	return 1;
302256694Snp}
303256694Snp
304256694Snpvoid c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
305256694Snp{
306256694Snp	struct t4_cqe *cqe;
307256694Snp	u32 ptr;
308256694Snp
309256694Snp	*count = 0;
310256694Snp	ptr = cq->sw_cidx;
311256694Snp	while (ptr != cq->sw_pidx) {
312256694Snp		cqe = &cq->sw_queue[ptr];
313256694Snp		if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
314256694Snp				      wq->sq.oldest_read)) &&
315256694Snp		    (CQE_QPID(cqe) == wq->sq.qid))
316256694Snp			(*count)++;
317256694Snp		if (++ptr == cq->size)
318256694Snp			ptr = 0;
319256694Snp	}
320256694Snp	CTR3(KTR_IW_CXGBE, "%s cq %p count %d", __func__, cq, *count);
321256694Snp}
322256694Snp
323256694Snpvoid c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
324256694Snp{
325256694Snp	struct t4_cqe *cqe;
326256694Snp	u32 ptr;
327256694Snp
328256694Snp	*count = 0;
329256694Snp	CTR2(KTR_IW_CXGBE, "%s count zero %d", __func__, *count);
330256694Snp	ptr = cq->sw_cidx;
331256694Snp	while (ptr != cq->sw_pidx) {
332256694Snp		cqe = &cq->sw_queue[ptr];
333256694Snp		if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
334256694Snp		    (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
335256694Snp			(*count)++;
336256694Snp		if (++ptr == cq->size)
337256694Snp			ptr = 0;
338256694Snp	}
339256694Snp	CTR3(KTR_IW_CXGBE, "%s cq %p count %d", __func__, cq, *count);
340256694Snp}
341256694Snp
342256694Snpstatic void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
343256694Snp{
344256694Snp	struct t4_swsqe *swsqe;
345256694Snp	u16 ptr = wq->sq.cidx;
346256694Snp	int count = wq->sq.in_use;
347256694Snp	int unsignaled = 0;
348256694Snp
349256694Snp	swsqe = &wq->sq.sw_sq[ptr];
350256694Snp	while (count--)
351256694Snp		if (!swsqe->signaled) {
352256694Snp			if (++ptr == wq->sq.size)
353256694Snp				ptr = 0;
354256694Snp			swsqe = &wq->sq.sw_sq[ptr];
355256694Snp			unsignaled++;
356256694Snp		} else if (swsqe->complete) {
357256694Snp
358256694Snp			/*
359256694Snp			 * Insert this completed cqe into the swcq.
360256694Snp			 */
361256694Snp			CTR3(KTR_IW_CXGBE,
362256694Snp			    "%s moving cqe into swcq sq idx %u cq idx %u",
363256694Snp			    __func__, ptr, cq->sw_pidx);
364256694Snp			swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
365256694Snp			cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
366256694Snp			t4_swcq_produce(cq);
367256694Snp			swsqe->signaled = 0;
368256694Snp			wq->sq.in_use -= unsignaled;
369256694Snp			break;
370256694Snp		} else
371256694Snp			break;
372256694Snp}
373256694Snp
374256694Snpstatic void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
375256694Snp				struct t4_cqe *read_cqe)
376256694Snp{
377256694Snp	read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
378256694Snp	read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
379256694Snp	read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
380256694Snp				 V_CQE_SWCQE(SW_CQE(hw_cqe)) |
381256694Snp				 V_CQE_OPCODE(FW_RI_READ_REQ) |
382256694Snp				 V_CQE_TYPE(1));
383256694Snp	read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
384256694Snp}
385256694Snp
386256694Snp/*
387256694Snp * Return a ptr to the next read wr in the SWSQ or NULL.
388256694Snp */
389256694Snpstatic void advance_oldest_read(struct t4_wq *wq)
390256694Snp{
391256694Snp
392256694Snp	u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
393256694Snp
394256694Snp	if (rptr == wq->sq.size)
395256694Snp		rptr = 0;
396256694Snp	while (rptr != wq->sq.pidx) {
397256694Snp		wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
398256694Snp
399256694Snp		if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
400256694Snp			return;
401256694Snp		if (++rptr == wq->sq.size)
402256694Snp			rptr = 0;
403256694Snp	}
404256694Snp	wq->sq.oldest_read = NULL;
405256694Snp}
406256694Snp
407256694Snp/*
408256694Snp * poll_cq
409256694Snp *
410256694Snp * Caller must:
411256694Snp *     check the validity of the first CQE,
412256694Snp *     supply the wq assicated with the qpid.
413256694Snp *
414256694Snp * credit: cq credit to return to sge.
415256694Snp * cqe_flushed: 1 iff the CQE is flushed.
416256694Snp * cqe: copy of the polled CQE.
417256694Snp *
418256694Snp * return value:
419256694Snp *    0		    CQE returned ok.
420256694Snp *    -EAGAIN       CQE skipped, try again.
421256694Snp *    -EOVERFLOW    CQ overflow detected.
422256694Snp */
423256694Snpstatic int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
424256694Snp		   u8 *cqe_flushed, u64 *cookie, u32 *credit)
425256694Snp{
426256694Snp	int ret = 0;
427256694Snp	struct t4_cqe *hw_cqe, read_cqe;
428256694Snp
429256694Snp	*cqe_flushed = 0;
430256694Snp	*credit = 0;
431256694Snp	ret = t4_next_cqe(cq, &hw_cqe);
432256694Snp	if (ret)
433256694Snp		return ret;
434256694Snp
435256694Snp	CTR6(KTR_IW_CXGBE,
436256694Snp	    "%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x", __func__,
437256694Snp	    CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), CQE_GENBIT(hw_cqe),
438256694Snp	    CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe));
439256694Snp	CTR5(KTR_IW_CXGBE,
440256694Snp	    "%s opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x",
441256694Snp	    __func__, CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
442256694Snp	    CQE_WRID_LOW(hw_cqe));
443256694Snp
444256694Snp	/*
445256694Snp	 * skip cqe's not affiliated with a QP.
446256694Snp	 */
447256694Snp	if (wq == NULL) {
448256694Snp		ret = -EAGAIN;
449256694Snp		goto skip_cqe;
450256694Snp	}
451256694Snp
452256694Snp	/*
453256694Snp	 * Gotta tweak READ completions:
454256694Snp	 *	1) the cqe doesn't contain the sq_wptr from the wr.
455256694Snp	 *	2) opcode not reflected from the wr.
456256694Snp	 *	3) read_len not reflected from the wr.
457256694Snp	 *	4) cq_type is RQ_TYPE not SQ_TYPE.
458256694Snp	 */
459256694Snp	if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
460256694Snp
461256694Snp		/*
462256694Snp		 * If this is an unsolicited read response, then the read
463256694Snp		 * was generated by the kernel driver as part of peer-2-peer
464256694Snp		 * connection setup.  So ignore the completion.
465256694Snp		 */
466256694Snp		if (!wq->sq.oldest_read) {
467256694Snp			if (CQE_STATUS(hw_cqe))
468256694Snp				t4_set_wq_in_error(wq);
469256694Snp			ret = -EAGAIN;
470256694Snp			goto skip_cqe;
471256694Snp		}
472256694Snp
473256694Snp		/*
474256694Snp		 * Don't write to the HWCQ, so create a new read req CQE
475256694Snp		 * in local memory.
476256694Snp		 */
477256694Snp		create_read_req_cqe(wq, hw_cqe, &read_cqe);
478256694Snp		hw_cqe = &read_cqe;
479256694Snp		advance_oldest_read(wq);
480256694Snp	}
481256694Snp
482256694Snp	if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
483256694Snp		*cqe_flushed = t4_wq_in_error(wq);
484256694Snp		t4_set_wq_in_error(wq);
485256694Snp		goto proc_cqe;
486256694Snp	}
487256694Snp
488256694Snp	if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
489256694Snp		ret = -EAGAIN;
490256694Snp		goto skip_cqe;
491256694Snp	}
492256694Snp
493256694Snp	/*
494256694Snp	 * RECV completion.
495256694Snp	 */
496256694Snp	if (RQ_TYPE(hw_cqe)) {
497256694Snp
498256694Snp		/*
499256694Snp		 * HW only validates 4 bits of MSN.  So we must validate that
500256694Snp		 * the MSN in the SEND is the next expected MSN.  If its not,
501256694Snp		 * then we complete this with T4_ERR_MSN and mark the wq in
502256694Snp		 * error.
503256694Snp		 */
504256694Snp
505256694Snp		if (t4_rq_empty(wq)) {
506256694Snp			t4_set_wq_in_error(wq);
507256694Snp			ret = -EAGAIN;
508256694Snp			goto skip_cqe;
509256694Snp		}
510256694Snp		if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
511256694Snp			t4_set_wq_in_error(wq);
512256694Snp			hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
513256694Snp			goto proc_cqe;
514256694Snp		}
515256694Snp		goto proc_cqe;
516256694Snp	}
517256694Snp
518256694Snp	/*
519256694Snp	 * If we get here its a send completion.
520256694Snp	 *
521256694Snp	 * Handle out of order completion. These get stuffed
522256694Snp	 * in the SW SQ. Then the SW SQ is walked to move any
523256694Snp	 * now in-order completions into the SW CQ.  This handles
524256694Snp	 * 2 cases:
525256694Snp	 *	1) reaping unsignaled WRs when the first subsequent
526256694Snp	 *	   signaled WR is completed.
527256694Snp	 *	2) out of order read completions.
528256694Snp	 */
529256694Snp	if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
530256694Snp		struct t4_swsqe *swsqe;
531256694Snp
532256694Snp		CTR2(KTR_IW_CXGBE,
533256694Snp		    "%s out of order completion going in sw_sq at idx %u",
534256694Snp		    __func__, CQE_WRID_SQ_IDX(hw_cqe));
535256694Snp		swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
536256694Snp		swsqe->cqe = *hw_cqe;
537256694Snp		swsqe->complete = 1;
538256694Snp		ret = -EAGAIN;
539256694Snp		goto flush_wq;
540256694Snp	}
541256694Snp
542256694Snpproc_cqe:
543256694Snp	*cqe = *hw_cqe;
544256694Snp
545256694Snp	/*
546256694Snp	 * Reap the associated WR(s) that are freed up with this
547256694Snp	 * completion.
548256694Snp	 */
549256694Snp	if (SQ_TYPE(hw_cqe)) {
550256694Snp		wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
551256694Snp		CTR2(KTR_IW_CXGBE, "%s completing sq idx %u",
552256694Snp		     __func__, wq->sq.cidx);
553256694Snp		*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
554256694Snp		t4_sq_consume(wq);
555256694Snp	} else {
556256694Snp		CTR2(KTR_IW_CXGBE, "%s completing rq idx %u",
557256694Snp		     __func__, wq->rq.cidx);
558256694Snp		*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
559256694Snp		BUG_ON(t4_rq_empty(wq));
560256694Snp		t4_rq_consume(wq);
561256694Snp	}
562256694Snp
563256694Snpflush_wq:
564256694Snp	/*
565256694Snp	 * Flush any completed cqes that are now in-order.
566256694Snp	 */
567256694Snp	flush_completed_wrs(wq, cq);
568256694Snp
569256694Snpskip_cqe:
570256694Snp	if (SW_CQE(hw_cqe)) {
571256694Snp		CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip sw cqe cidx %u",
572256694Snp		     __func__, cq, cq->cqid, cq->sw_cidx);
573256694Snp		t4_swcq_consume(cq);
574256694Snp	} else {
575256694Snp		CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip hw cqe cidx %u",
576256694Snp		     __func__, cq, cq->cqid, cq->cidx);
577256694Snp		t4_hwcq_consume(cq);
578256694Snp	}
579256694Snp	return ret;
580256694Snp}
581256694Snp
582256694Snp/*
583256694Snp * Get one cq entry from c4iw and map it to openib.
584256694Snp *
585256694Snp * Returns:
586256694Snp *	0			cqe returned
587256694Snp *	-ENODATA		EMPTY;
588256694Snp *	-EAGAIN			caller must try again
589256694Snp *	any other -errno	fatal error
590256694Snp */
591256694Snpstatic int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
592256694Snp{
593256694Snp	struct c4iw_qp *qhp = NULL;
594256694Snp	struct t4_cqe cqe = {0, 0}, *rd_cqe;
595256694Snp	struct t4_wq *wq;
596256694Snp	u32 credit = 0;
597256694Snp	u8 cqe_flushed;
598256694Snp	u64 cookie = 0;
599256694Snp	int ret;
600256694Snp
601256694Snp	ret = t4_next_cqe(&chp->cq, &rd_cqe);
602256694Snp
603256694Snp	if (ret)
604256694Snp		return ret;
605256694Snp
606256694Snp	qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
607256694Snp	if (!qhp)
608256694Snp		wq = NULL;
609256694Snp	else {
610256694Snp		spin_lock(&qhp->lock);
611256694Snp		wq = &(qhp->wq);
612256694Snp	}
613256694Snp	ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
614256694Snp	if (ret)
615256694Snp		goto out;
616256694Snp
617256694Snp	wc->wr_id = cookie;
618256694Snp	wc->qp = &qhp->ibqp;
619256694Snp	wc->vendor_err = CQE_STATUS(&cqe);
620256694Snp	wc->wc_flags = 0;
621256694Snp
622256694Snp	CTR5(KTR_IW_CXGBE, "%s qpid 0x%x type %d opcode %d status 0x%x",
623256694Snp	    __func__, CQE_QPID(&cqe), CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
624256694Snp	    CQE_STATUS(&cqe));
625256694Snp	CTR5(KTR_IW_CXGBE, "%s len %u wrid hi 0x%x lo 0x%x cookie 0x%llx",
626256694Snp	    __func__, CQE_LEN(&cqe), CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
627256694Snp	    (unsigned long long)cookie);
628256694Snp
629256694Snp	if (CQE_TYPE(&cqe) == 0) {
630256694Snp		if (!CQE_STATUS(&cqe))
631256694Snp			wc->byte_len = CQE_LEN(&cqe);
632256694Snp		else
633256694Snp			wc->byte_len = 0;
634256694Snp		wc->opcode = IB_WC_RECV;
635256694Snp		if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
636256694Snp		    CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
637256694Snp			wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
638256694Snp			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
639256694Snp		}
640256694Snp	} else {
641256694Snp		switch (CQE_OPCODE(&cqe)) {
642256694Snp		case FW_RI_RDMA_WRITE:
643256694Snp			wc->opcode = IB_WC_RDMA_WRITE;
644256694Snp			break;
645256694Snp		case FW_RI_READ_REQ:
646256694Snp			wc->opcode = IB_WC_RDMA_READ;
647256694Snp			wc->byte_len = CQE_LEN(&cqe);
648256694Snp			break;
649256694Snp		case FW_RI_SEND_WITH_INV:
650256694Snp		case FW_RI_SEND_WITH_SE_INV:
651256694Snp			wc->opcode = IB_WC_SEND;
652256694Snp			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
653256694Snp			break;
654256694Snp		case FW_RI_SEND:
655256694Snp		case FW_RI_SEND_WITH_SE:
656256694Snp			wc->opcode = IB_WC_SEND;
657256694Snp			break;
658256694Snp		case FW_RI_BIND_MW:
659256694Snp			wc->opcode = IB_WC_BIND_MW;
660256694Snp			break;
661256694Snp
662256694Snp		case FW_RI_LOCAL_INV:
663256694Snp			wc->opcode = IB_WC_LOCAL_INV;
664256694Snp			break;
665256694Snp		case FW_RI_FAST_REGISTER:
666256694Snp			wc->opcode = IB_WC_FAST_REG_MR;
667256694Snp			break;
668256694Snp		default:
669256694Snp			printf("Unexpected opcode %d "
670256694Snp			       "in the CQE received for QPID = 0x%0x\n",
671256694Snp			       CQE_OPCODE(&cqe), CQE_QPID(&cqe));
672256694Snp			ret = -EINVAL;
673256694Snp			goto out;
674256694Snp		}
675256694Snp	}
676256694Snp
677256694Snp	if (cqe_flushed)
678256694Snp		wc->status = IB_WC_WR_FLUSH_ERR;
679256694Snp	else {
680256694Snp
681256694Snp		switch (CQE_STATUS(&cqe)) {
682256694Snp		case T4_ERR_SUCCESS:
683256694Snp			wc->status = IB_WC_SUCCESS;
684256694Snp			break;
685256694Snp		case T4_ERR_STAG:
686256694Snp			wc->status = IB_WC_LOC_ACCESS_ERR;
687256694Snp			break;
688256694Snp		case T4_ERR_PDID:
689256694Snp			wc->status = IB_WC_LOC_PROT_ERR;
690256694Snp			break;
691256694Snp		case T4_ERR_QPID:
692256694Snp		case T4_ERR_ACCESS:
693256694Snp			wc->status = IB_WC_LOC_ACCESS_ERR;
694256694Snp			break;
695256694Snp		case T4_ERR_WRAP:
696256694Snp			wc->status = IB_WC_GENERAL_ERR;
697256694Snp			break;
698256694Snp		case T4_ERR_BOUND:
699256694Snp			wc->status = IB_WC_LOC_LEN_ERR;
700256694Snp			break;
701256694Snp		case T4_ERR_INVALIDATE_SHARED_MR:
702256694Snp		case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
703256694Snp			wc->status = IB_WC_MW_BIND_ERR;
704256694Snp			break;
705256694Snp		case T4_ERR_CRC:
706256694Snp		case T4_ERR_MARKER:
707256694Snp		case T4_ERR_PDU_LEN_ERR:
708256694Snp		case T4_ERR_OUT_OF_RQE:
709256694Snp		case T4_ERR_DDP_VERSION:
710256694Snp		case T4_ERR_RDMA_VERSION:
711256694Snp		case T4_ERR_DDP_QUEUE_NUM:
712256694Snp		case T4_ERR_MSN:
713256694Snp		case T4_ERR_TBIT:
714256694Snp		case T4_ERR_MO:
715256694Snp		case T4_ERR_MSN_RANGE:
716256694Snp		case T4_ERR_IRD_OVERFLOW:
717256694Snp		case T4_ERR_OPCODE:
718256694Snp		case T4_ERR_INTERNAL_ERR:
719256694Snp			wc->status = IB_WC_FATAL_ERR;
720256694Snp			break;
721256694Snp		case T4_ERR_SWFLUSH:
722256694Snp			wc->status = IB_WC_WR_FLUSH_ERR;
723256694Snp			break;
724256694Snp		default:
725256694Snp			printf("Unexpected cqe_status 0x%x for QPID = 0x%0x\n",
726256694Snp			       CQE_STATUS(&cqe), CQE_QPID(&cqe));
727256694Snp			ret = -EINVAL;
728256694Snp		}
729256694Snp	}
730256694Snpout:
731256694Snp	if (wq)
732256694Snp		spin_unlock(&qhp->lock);
733256694Snp	return ret;
734256694Snp}
735256694Snp
736256694Snpint c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
737256694Snp{
738256694Snp	struct c4iw_cq *chp;
739256694Snp	unsigned long flags;
740256694Snp	int npolled;
741256694Snp	int err = 0;
742256694Snp
743256694Snp	chp = to_c4iw_cq(ibcq);
744256694Snp
745256694Snp	spin_lock_irqsave(&chp->lock, flags);
746256694Snp	for (npolled = 0; npolled < num_entries; ++npolled) {
747256694Snp		do {
748256694Snp			err = c4iw_poll_cq_one(chp, wc + npolled);
749256694Snp		} while (err == -EAGAIN);
750256694Snp		if (err)
751256694Snp			break;
752256694Snp	}
753256694Snp	spin_unlock_irqrestore(&chp->lock, flags);
754256694Snp	return !err || err == -ENODATA ? npolled : err;
755256694Snp}
756256694Snp
757256694Snpint c4iw_destroy_cq(struct ib_cq *ib_cq)
758256694Snp{
759256694Snp	struct c4iw_cq *chp;
760256694Snp	struct c4iw_ucontext *ucontext;
761256694Snp
762256694Snp	CTR2(KTR_IW_CXGBE, "%s ib_cq %p", __func__, ib_cq);
763256694Snp	chp = to_c4iw_cq(ib_cq);
764256694Snp
765256694Snp	remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
766256694Snp	atomic_dec(&chp->refcnt);
767256694Snp	wait_event(chp->wait, !atomic_read(&chp->refcnt));
768256694Snp
769256694Snp	ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
770256694Snp				  : NULL;
771256694Snp	destroy_cq(&chp->rhp->rdev, &chp->cq,
772256694Snp		   ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
773256694Snp	kfree(chp);
774256694Snp	return 0;
775256694Snp}
776256694Snp
777256694Snpstruct ib_cq *
778256694Snpc4iw_create_cq(struct ib_device *ibdev, int entries, int vector,
779256694Snp    struct ib_ucontext *ib_context, struct ib_udata *udata)
780256694Snp{
781256694Snp	struct c4iw_dev *rhp;
782256694Snp	struct c4iw_cq *chp;
783256694Snp	struct c4iw_create_cq_resp uresp;
784256694Snp	struct c4iw_ucontext *ucontext = NULL;
785256694Snp	int ret;
786256694Snp	size_t memsize, hwentries;
787256694Snp	struct c4iw_mm_entry *mm, *mm2;
788256694Snp
789256694Snp	CTR3(KTR_IW_CXGBE, "%s ib_dev %p entries %d", __func__, ibdev, entries);
790256694Snp
791256694Snp	rhp = to_c4iw_dev(ibdev);
792256694Snp
793256694Snp	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
794256694Snp	if (!chp)
795256694Snp		return ERR_PTR(-ENOMEM);
796256694Snp
797256694Snp	if (ib_context)
798256694Snp		ucontext = to_c4iw_ucontext(ib_context);
799256694Snp
800256694Snp	/* account for the status page. */
801256694Snp	entries++;
802256694Snp
803256694Snp	/* IQ needs one extra entry to differentiate full vs empty. */
804256694Snp	entries++;
805256694Snp
806256694Snp	/*
807256694Snp	 * entries must be multiple of 16 for HW.
808256694Snp	 */
809256694Snp	entries = roundup(entries, 16);
810256694Snp
811256694Snp	/*
812256694Snp	 * Make actual HW queue 2x to avoid cidx_inc overflows.
813256694Snp	 */
814256694Snp	hwentries = entries * 2;
815256694Snp
816256694Snp	/*
817256694Snp	 * Make HW queue at least 64 entries so GTS updates aren't too
818256694Snp	 * frequent.
819256694Snp	 */
820256694Snp	if (hwentries < 64)
821256694Snp		hwentries = 64;
822256694Snp
823256694Snp	memsize = hwentries * sizeof *chp->cq.queue;
824256694Snp
825256694Snp	/*
826256694Snp	 * memsize must be a multiple of the page size if its a user cq.
827256694Snp	 */
828256694Snp	if (ucontext) {
829256694Snp		memsize = roundup(memsize, PAGE_SIZE);
830256694Snp		hwentries = memsize / sizeof *chp->cq.queue;
831256694Snp		while (hwentries > T4_MAX_IQ_SIZE) {
832256694Snp			memsize -= PAGE_SIZE;
833256694Snp			hwentries = memsize / sizeof *chp->cq.queue;
834256694Snp		}
835256694Snp	}
836256694Snp	chp->cq.size = hwentries;
837256694Snp	chp->cq.memsize = memsize;
838256694Snp
839256694Snp	ret = create_cq(&rhp->rdev, &chp->cq,
840256694Snp			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
841256694Snp	if (ret)
842256694Snp		goto err1;
843256694Snp
844256694Snp	chp->rhp = rhp;
845256694Snp	chp->cq.size--;				/* status page */
846256694Snp	chp->ibcq.cqe = entries - 2;
847256694Snp	spin_lock_init(&chp->lock);
848256694Snp	spin_lock_init(&chp->comp_handler_lock);
849256694Snp	atomic_set(&chp->refcnt, 1);
850256694Snp	init_waitqueue_head(&chp->wait);
851256694Snp	ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
852256694Snp	if (ret)
853256694Snp		goto err2;
854256694Snp
855256694Snp	if (ucontext) {
856256694Snp		mm = kmalloc(sizeof *mm, GFP_KERNEL);
857256694Snp		if (!mm)
858256694Snp			goto err3;
859256694Snp		mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
860256694Snp		if (!mm2)
861256694Snp			goto err4;
862256694Snp
863256694Snp		uresp.qid_mask = rhp->rdev.cqmask;
864256694Snp		uresp.cqid = chp->cq.cqid;
865256694Snp		uresp.size = chp->cq.size;
866256694Snp		uresp.memsize = chp->cq.memsize;
867256694Snp		spin_lock(&ucontext->mmap_lock);
868256694Snp		uresp.key = ucontext->key;
869256694Snp		ucontext->key += PAGE_SIZE;
870256694Snp		uresp.gts_key = ucontext->key;
871256694Snp		ucontext->key += PAGE_SIZE;
872256694Snp		spin_unlock(&ucontext->mmap_lock);
873256694Snp		ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
874256694Snp		if (ret)
875256694Snp			goto err5;
876256694Snp
877256694Snp		mm->key = uresp.key;
878256694Snp		mm->addr = vtophys(chp->cq.queue);
879256694Snp		mm->len = chp->cq.memsize;
880256694Snp		insert_mmap(ucontext, mm);
881256694Snp
882256694Snp		mm2->key = uresp.gts_key;
883256694Snp		mm2->addr = chp->cq.ugts;
884256694Snp		mm2->len = PAGE_SIZE;
885256694Snp		insert_mmap(ucontext, mm2);
886256694Snp	}
887256694Snp	CTR6(KTR_IW_CXGBE,
888256694Snp	    "%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx",
889256694Snp	    __func__, chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize,
890256694Snp	    (unsigned long long) chp->cq.dma_addr);
891256694Snp	return &chp->ibcq;
892256694Snperr5:
893256694Snp	kfree(mm2);
894256694Snperr4:
895256694Snp	kfree(mm);
896256694Snperr3:
897256694Snp	remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
898256694Snperr2:
899256694Snp	destroy_cq(&chp->rhp->rdev, &chp->cq,
900256694Snp		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
901256694Snperr1:
902256694Snp	kfree(chp);
903256694Snp	return ERR_PTR(ret);
904256694Snp}
905256694Snp
906256694Snpint c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
907256694Snp{
908256694Snp	return -ENOSYS;
909256694Snp}
910256694Snp
911256694Snpint c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
912256694Snp{
913256694Snp	struct c4iw_cq *chp;
914256694Snp	int ret;
915256694Snp	unsigned long flag;
916256694Snp
917256694Snp	chp = to_c4iw_cq(ibcq);
918256694Snp	spin_lock_irqsave(&chp->lock, flag);
919256694Snp	ret = t4_arm_cq(&chp->cq,
920256694Snp			(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
921256694Snp	spin_unlock_irqrestore(&chp->lock, flag);
922256694Snp	if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
923256694Snp		ret = 0;
924256694Snp	return ret;
925256694Snp}
926256694Snp#endif
927