cq.c revision 309378
1/*
2 * Copyright (c) 2006-2014 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#if HAVE_CONFIG_H
33#  include <config.h>
34#endif				/* HAVE_CONFIG_H */
35
36#include <stdio.h>
37#include <syslog.h>
38#include <pthread.h>
39#include <sys/errno.h>
40#include <netinet/in.h>
41#include <infiniband/opcode.h>
42#include "libcxgb4.h"
43#include "cxgb4-abi.h"
44
45static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
46{
47	struct t4_cqe cqe;
48
49	PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
50	     wq, cq, cq->sw_cidx, cq->sw_pidx);
51	memset(&cqe, 0, sizeof(cqe));
52	cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
53			         V_CQE_OPCODE(FW_RI_SEND) |
54				 V_CQE_TYPE(0) |
55				 V_CQE_SWCQE(1) |
56				 V_CQE_QPID(wq->sq.qid));
57	cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
58	cq->sw_queue[cq->sw_pidx] = cqe;
59	t4_swcq_produce(cq);
60}
61
62int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
63{
64	int flushed = 0;
65	int in_use = wq->rq.in_use - count;
66
67	BUG_ON(in_use < 0);
68	PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
69	     wq, cq, wq->rq.in_use, count);
70	while (in_use--) {
71		insert_recv_cqe(wq, cq);
72		flushed++;
73	}
74	return flushed;
75}
76
77static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
78		          struct t4_swsqe *swcqe)
79{
80	struct t4_cqe cqe;
81
82	PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
83	     wq, cq, cq->sw_cidx, cq->sw_pidx);
84	memset(&cqe, 0, sizeof(cqe));
85	cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
86			         V_CQE_OPCODE(swcqe->opcode) |
87			         V_CQE_TYPE(1) |
88			         V_CQE_SWCQE(1) |
89			         V_CQE_QPID(wq->sq.qid));
90	CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
91	cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
92	cq->sw_queue[cq->sw_pidx] = cqe;
93	t4_swcq_produce(cq);
94}
95
96static void advance_oldest_read(struct t4_wq *wq);
97
98void c4iw_flush_sq(struct c4iw_qp *qhp)
99{
100	unsigned short flushed = 0;
101	struct t4_wq *wq = &qhp->wq;
102	struct c4iw_cq *chp = to_c4iw_cq(qhp->ibv_qp.send_cq);
103	struct t4_cq *cq = &chp->cq;
104	int idx;
105	struct t4_swsqe *swsqe;
106
107	if (wq->sq.flush_cidx == -1)
108		wq->sq.flush_cidx = wq->sq.cidx;
109	idx = wq->sq.flush_cidx;
110	BUG_ON(idx >= wq->sq.size);
111	while (idx != wq->sq.pidx) {
112		swsqe = &wq->sq.sw_sq[idx];
113		BUG_ON(swsqe->flushed);
114		swsqe->flushed = 1;
115		insert_sq_cqe(wq, cq, swsqe);
116		if (wq->sq.oldest_read == swsqe) {
117			BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
118			advance_oldest_read(wq);
119		}
120		flushed++;
121		if (++idx == wq->sq.size)
122			idx = 0;
123	}
124	wq->sq.flush_cidx += flushed;
125	if (wq->sq.flush_cidx >= wq->sq.size)
126		wq->sq.flush_cidx -= wq->sq.size;
127}
128
129static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
130{
131	struct t4_swsqe *swsqe;
132	unsigned short cidx;
133
134	if (wq->sq.flush_cidx == -1)
135		wq->sq.flush_cidx = wq->sq.cidx;
136	cidx = wq->sq.flush_cidx;
137	BUG_ON(cidx >= wq->sq.size);
138
139	while (cidx != wq->sq.pidx) {
140		swsqe = &wq->sq.sw_sq[cidx];
141		if (!swsqe->signaled) {
142			if (++cidx == wq->sq.size)
143				cidx = 0;
144		} else if (swsqe->complete) {
145
146			BUG_ON(swsqe->flushed);
147
148			/*
149			 * Insert this completed cqe into the swcq.
150			 */
151			PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
152			     __func__, cidx, cq->sw_pidx);
153
154			swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
155			cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
156			t4_swcq_produce(cq);
157			swsqe->flushed = 1;
158			if (++cidx == wq->sq.size)
159				cidx = 0;
160			wq->sq.flush_cidx = cidx;
161		} else
162			break;
163	}
164}
165
166static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
167				struct t4_cqe *read_cqe)
168{
169	read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
170	read_cqe->len = ntohl(wq->sq.oldest_read->read_len);
171	read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
172				 V_CQE_SWCQE(SW_CQE(hw_cqe)) |
173				 V_CQE_OPCODE(FW_RI_READ_REQ) |
174				 V_CQE_TYPE(1));
175	read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
176}
177
178static void advance_oldest_read(struct t4_wq *wq)
179{
180
181	u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
182
183	if (rptr == wq->sq.size)
184		rptr = 0;
185	while (rptr != wq->sq.pidx) {
186		wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
187
188		if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
189			return;
190		if (++rptr == wq->sq.size)
191			rptr = 0;
192	}
193	wq->sq.oldest_read = NULL;
194}
195
196/*
197 * Move all CQEs from the HWCQ into the SWCQ.
198 * Deal with out-of-order and/or completions that complete
199 * prior unsignalled WRs.
200 */
201void c4iw_flush_hw_cq(struct c4iw_cq *chp)
202{
203	struct t4_cqe *hw_cqe, *swcqe, read_cqe;
204	struct c4iw_qp *qhp;
205	struct t4_swsqe *swsqe;
206	int ret;
207
208	PDBG("%s  cqid 0x%x\n", __func__, chp->cq.cqid);
209	ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
210
211	/*
212	 * This logic is similar to poll_cq(), but not quite the same
213	 * unfortunately.  Need to move pertinent HW CQEs to the SW CQ but
214	 * also do any translation magic that poll_cq() normally does.
215	 */
216	while (!ret) {
217		qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
218
219		/*
220		 * drop CQEs with no associated QP
221		 */
222		if (qhp == NULL)
223			goto next_cqe;
224
225		if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
226			goto next_cqe;
227
228		if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
229
230			/*
231			 * If we have reached here because of async
232			 * event or other error, and have egress error
233			 * then drop
234			 */
235			if (CQE_TYPE(hw_cqe) == 1) {
236				syslog(LOG_CRIT, "%s: got egress error in \
237					read-response, dropping!\n", __func__);
238				goto next_cqe;
239			}
240
241			/*
242			 * drop peer2peer RTR reads.
243			 */
244			if (CQE_WRID_STAG(hw_cqe) == 1)
245				goto next_cqe;
246
247			/*
248			 * Eat completions for unsignaled read WRs.
249			 */
250			if (!qhp->wq.sq.oldest_read->signaled) {
251				advance_oldest_read(&qhp->wq);
252				goto next_cqe;
253			}
254
255			/*
256			 * Don't write to the HWCQ, create a new read req CQE
257			 * in local memory and move it into the swcq.
258			 */
259			create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
260			hw_cqe = &read_cqe;
261			advance_oldest_read(&qhp->wq);
262		}
263
264		/* if its a SQ completion, then do the magic to move all the
265		 * unsignaled and now in-order completions into the swcq.
266		 */
267		if (SQ_TYPE(hw_cqe)) {
268			int idx = CQE_WRID_SQ_IDX(hw_cqe);
269
270			BUG_ON(idx >= qhp->wq.sq.size);
271			swsqe = &qhp->wq.sq.sw_sq[idx];
272			swsqe->cqe = *hw_cqe;
273			swsqe->complete = 1;
274			flush_completed_wrs(&qhp->wq, &chp->cq);
275		} else {
276			swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
277			*swcqe = *hw_cqe;
278			swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
279			t4_swcq_produce(&chp->cq);
280		}
281next_cqe:
282		t4_hwcq_consume(&chp->cq);
283		ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
284	}
285}
286
287static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
288{
289	if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
290		return 0;
291
292	if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
293		return 0;
294
295	if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
296		return 0;
297
298	if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
299		return 0;
300	return 1;
301}
302
303void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
304{
305	struct t4_cqe *cqe;
306	u32 ptr;
307
308	*count = 0;
309	ptr = cq->sw_cidx;
310	BUG_ON(ptr >= cq->size);
311	while (ptr != cq->sw_pidx) {
312		cqe = &cq->sw_queue[ptr];
313		if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
314		    (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
315			(*count)++;
316		if (++ptr == cq->size)
317			ptr = 0;
318	}
319	PDBG("%s cq %p count %d\n", __func__, cq, *count);
320}
321
322static void dump_cqe(void *arg)
323{
324	u64 *p = arg;
325	syslog(LOG_NOTICE, "cxgb4 err cqe %016lx %016lx %016lx %016lx\n",
326	       be64_to_cpu(p[0]),
327	       be64_to_cpu(p[1]),
328	       be64_to_cpu(p[2]),
329	       be64_to_cpu(p[3]));
330}
331
332/*
333 * poll_cq
334 *
335 * Caller must:
336 *     check the validity of the first CQE,
337 *     supply the wq assicated with the qpid.
338 *
339 * credit: cq credit to return to sge.
340 * cqe_flushed: 1 iff the CQE is flushed.
341 * cqe: copy of the polled CQE.
342 *
343 * return value:
344 *    0		    CQE returned ok.
345 *    -EAGAIN       CQE skipped, try again.
346 *    -EOVERFLOW    CQ overflow detected.
347 */
348static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
349	           u8 *cqe_flushed, u64 *cookie, u32 *credit)
350{
351	int ret = 0;
352	struct t4_cqe *hw_cqe, read_cqe;
353
354	*cqe_flushed = 0;
355	*credit = 0;
356
357	ret = t4_next_cqe(cq, &hw_cqe);
358	if (ret)
359		return ret;
360
361	PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
362	     " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
363	     __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
364	     CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
365	     CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
366	     CQE_WRID_LOW(hw_cqe));
367
368	/*
369	 * skip cqe's not affiliated with a QP.
370	 */
371	if (wq == NULL) {
372		ret = -EAGAIN;
373		goto skip_cqe;
374	}
375
376	/*
377	 * Gotta tweak READ completions:
378	 *	1) the cqe doesn't contain the sq_wptr from the wr.
379	 *	2) opcode not reflected from the wr.
380	 *	3) read_len not reflected from the wr.
381	 *	4) T4 HW (for now) inserts target read response failures which
382	 * 	   need to be skipped.
383	 */
384	if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
385
386		/*
387		 * If we have reached here because of async
388		 * event or other error, and have egress error
389		 * then drop
390		 */
391		if (CQE_TYPE(hw_cqe) == 1) {
392			syslog(LOG_CRIT, "%s: got egress error in \
393				read-response, dropping!\n", __func__);
394			if (CQE_STATUS(hw_cqe))
395				t4_set_wq_in_error(wq);
396			ret = -EAGAIN;
397			goto skip_cqe;
398		}
399
400		/*
401		 * If this is an unsolicited read response, then the read
402		 * was generated by the kernel driver as part of peer-2-peer
403		 * connection setup, or a target read response failure.
404		 * So skip the completion.
405		 */
406		if (CQE_WRID_STAG(hw_cqe) == 1) {
407			if (CQE_STATUS(hw_cqe))
408				t4_set_wq_in_error(wq);
409			ret = -EAGAIN;
410			goto skip_cqe;
411		}
412
413		/*
414		 * Eat completions for unsignaled read WRs.
415		 */
416		if (!wq->sq.oldest_read->signaled) {
417			advance_oldest_read(wq);
418			ret = -EAGAIN;
419			goto skip_cqe;
420		}
421
422		/*
423		 * Don't write to the HWCQ, so create a new read req CQE
424		 * in local memory.
425		 */
426		create_read_req_cqe(wq, hw_cqe, &read_cqe);
427		hw_cqe = &read_cqe;
428		advance_oldest_read(wq);
429	}
430
431	if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
432		*cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
433		wq->error = 1;
434
435		if (!*cqe_flushed && CQE_STATUS(hw_cqe))
436			dump_cqe(hw_cqe);
437
438		BUG_ON((cqe_flushed == 0) && !SW_CQE(hw_cqe));
439		goto proc_cqe;
440	}
441
442	if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
443		ret = -EAGAIN;
444		goto skip_cqe;
445	}
446
447	/*
448	 * RECV completion.
449	 */
450	if (RQ_TYPE(hw_cqe)) {
451
452		/*
453		 * HW only validates 4 bits of MSN.  So we must validate that
454		 * the MSN in the SEND is the next expected MSN.  If its not,
455		 * then we complete this with T4_ERR_MSN and mark the wq in
456		 * error.
457		 */
458
459		if (t4_rq_empty(wq)) {
460			t4_set_wq_in_error(wq);
461			ret = -EAGAIN;
462			goto skip_cqe;
463		}
464		if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
465			t4_set_wq_in_error(wq);
466			hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
467			goto proc_cqe;
468		}
469		goto proc_cqe;
470	}
471
472	/*
473	 * If we get here its a send completion.
474	 *
475	 * Handle out of order completion. These get stuffed
476	 * in the SW SQ. Then the SW SQ is walked to move any
477	 * now in-order completions into the SW CQ.  This handles
478	 * 2 cases:
479	 *	1) reaping unsignaled WRs when the first subsequent
480	 *	   signaled WR is completed.
481	 *	2) out of order read completions.
482	 */
483	if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
484		struct t4_swsqe *swsqe;
485		int idx =  CQE_WRID_SQ_IDX(hw_cqe);
486
487		PDBG("%s out of order completion going in sw_sq at idx %u\n",
488		     __func__, idx);
489		BUG_ON(idx >= wq->sq.size);
490		swsqe = &wq->sq.sw_sq[idx];
491		swsqe->cqe = *hw_cqe;
492		swsqe->complete = 1;
493		ret = -EAGAIN;
494		goto flush_wq;
495	}
496
497proc_cqe:
498	*cqe = *hw_cqe;
499
500	/*
501	 * Reap the associated WR(s) that are freed up with this
502	 * completion.
503	 */
504	if (SQ_TYPE(hw_cqe)) {
505		int idx = CQE_WRID_SQ_IDX(hw_cqe);
506		BUG_ON(idx >= wq->sq.size);
507
508		/*
509		 * Account for any unsignaled completions completed by
510		 * this signaled completion.  In this case, cidx points
511		 * to the first unsignaled one, and idx points to the
512		 * signaled one.  So adjust in_use based on this delta.
513		 * if this is not completing any unsigned wrs, then the
514		 * delta will be 0. Handle wrapping also!
515		 */
516		if (idx < wq->sq.cidx)
517			wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
518		else
519			wq->sq.in_use -= idx - wq->sq.cidx;
520		BUG_ON(wq->sq.in_use <= 0 || wq->sq.in_use >= wq->sq.size);
521
522		wq->sq.cidx = (u16)idx;
523		PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
524		*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
525		t4_sq_consume(wq);
526	} else {
527		PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
528		BUG_ON(wq->rq.cidx >= wq->rq.size);
529		*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
530		BUG_ON(t4_rq_empty(wq));
531		t4_rq_consume(wq);
532		goto skip_cqe;
533	}
534
535flush_wq:
536	/*
537	 * Flush any completed cqes that are now in-order.
538	 */
539	flush_completed_wrs(wq, cq);
540
541skip_cqe:
542	if (SW_CQE(hw_cqe)) {
543		PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
544		     __func__, cq, cq->cqid, cq->sw_cidx);
545		t4_swcq_consume(cq);
546	} else {
547		PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
548		     __func__, cq, cq->cqid, cq->cidx);
549		t4_hwcq_consume(cq);
550	}
551	return ret;
552}
553
554/*
555 * Get one cq entry from c4iw and map it to openib.
556 *
557 * Returns:
558 *	0			cqe returned
559 *	-ENODATA		EMPTY;
560 *	-EAGAIN			caller must try again
561 *	any other -errno	fatal error
562 */
563static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ibv_wc *wc)
564{
565	struct c4iw_qp *qhp = NULL;
566	struct t4_cqe cqe, *rd_cqe;
567	struct t4_wq *wq;
568	u32 credit = 0;
569	u8 cqe_flushed;
570	u64 cookie = 0;
571	int ret;
572
573	ret = t4_next_cqe(&chp->cq, &rd_cqe);
574
575	if (ret) {
576#ifdef STALL_DETECTION
577		if (ret == -ENODATA && stall_to && !chp->dumped) {
578			struct timeval t;
579
580			gettimeofday(&t, NULL);
581			if ((t.tv_sec - chp->time.tv_sec) > stall_to) {
582				dump_state();
583				chp->dumped = 1;
584			}
585		}
586#endif
587		return ret;
588	}
589
590#ifdef STALL_DETECTION
591	gettimeofday(&chp->time, NULL);
592#endif
593
594	qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
595	if (!qhp)
596		wq = NULL;
597	else {
598		pthread_spin_lock(&qhp->lock);
599		wq = &(qhp->wq);
600	}
601	ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
602	if (ret)
603		goto out;
604
605	INC_STAT(cqe);
606	wc->wr_id = cookie;
607	wc->qp_num = qhp->wq.sq.qid;
608	wc->vendor_err = CQE_STATUS(&cqe);
609	wc->wc_flags = 0;
610
611	PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
612	     "lo 0x%x cookie 0x%llx\n", __func__,
613	     CQE_QPID(&cqe), CQE_TYPE(&cqe),
614	     CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_WRID_HI(&cqe),
615	     CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
616
617	if (CQE_TYPE(&cqe) == 0) {
618		if (!CQE_STATUS(&cqe))
619			wc->byte_len = CQE_LEN(&cqe);
620		else
621			wc->byte_len = 0;
622		wc->opcode = IBV_WC_RECV;
623	} else {
624		switch (CQE_OPCODE(&cqe)) {
625		case FW_RI_RDMA_WRITE:
626			wc->opcode = IBV_WC_RDMA_WRITE;
627			break;
628		case FW_RI_READ_REQ:
629			wc->opcode = IBV_WC_RDMA_READ;
630			wc->byte_len = CQE_LEN(&cqe);
631			break;
632		case FW_RI_SEND:
633		case FW_RI_SEND_WITH_SE:
634		case FW_RI_SEND_WITH_INV:
635		case FW_RI_SEND_WITH_SE_INV:
636			wc->opcode = IBV_WC_SEND;
637			break;
638		case FW_RI_BIND_MW:
639			wc->opcode = IBV_WC_BIND_MW;
640			break;
641		default:
642			PDBG("Unexpected opcode %d "
643			     "in the CQE received for QPID=0x%0x\n",
644			     CQE_OPCODE(&cqe), CQE_QPID(&cqe));
645			ret = -EINVAL;
646			goto out;
647		}
648	}
649
650	if (cqe_flushed)
651		wc->status = IBV_WC_WR_FLUSH_ERR;
652	else {
653
654		switch (CQE_STATUS(&cqe)) {
655		case T4_ERR_SUCCESS:
656			wc->status = IBV_WC_SUCCESS;
657			break;
658		case T4_ERR_STAG:
659			wc->status = IBV_WC_LOC_ACCESS_ERR;
660			break;
661		case T4_ERR_PDID:
662			wc->status = IBV_WC_LOC_PROT_ERR;
663			break;
664		case T4_ERR_QPID:
665		case T4_ERR_ACCESS:
666			wc->status = IBV_WC_LOC_ACCESS_ERR;
667			break;
668		case T4_ERR_WRAP:
669			wc->status = IBV_WC_GENERAL_ERR;
670			break;
671		case T4_ERR_BOUND:
672			wc->status = IBV_WC_LOC_LEN_ERR;
673			break;
674		case T4_ERR_INVALIDATE_SHARED_MR:
675		case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
676			wc->status = IBV_WC_MW_BIND_ERR;
677			break;
678		case T4_ERR_CRC:
679		case T4_ERR_MARKER:
680		case T4_ERR_PDU_LEN_ERR:
681		case T4_ERR_OUT_OF_RQE:
682		case T4_ERR_DDP_VERSION:
683		case T4_ERR_RDMA_VERSION:
684		case T4_ERR_DDP_QUEUE_NUM:
685		case T4_ERR_MSN:
686		case T4_ERR_TBIT:
687		case T4_ERR_MO:
688		case T4_ERR_MSN_RANGE:
689		case T4_ERR_IRD_OVERFLOW:
690		case T4_ERR_OPCODE:
691		case T4_ERR_INTERNAL_ERR:
692			wc->status = IBV_WC_FATAL_ERR;
693			break;
694		case T4_ERR_SWFLUSH:
695			wc->status = IBV_WC_WR_FLUSH_ERR;
696			break;
697		default:
698			PDBG("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
699			     CQE_STATUS(&cqe), CQE_QPID(&cqe));
700			wc->status = IBV_WC_FATAL_ERR;
701		}
702	}
703	if (wc->status && wc->status != IBV_WC_WR_FLUSH_ERR)
704		syslog(LOG_NOTICE, "cxgb4 app err cqid %u qpid %u "
705			"type %u opcode %u status 0x%x\n",
706			chp->cq.cqid, CQE_QPID(&cqe), CQE_TYPE(&cqe),
707			CQE_OPCODE(&cqe), CQE_STATUS(&cqe));
708out:
709	if (wq)
710		pthread_spin_unlock(&qhp->lock);
711	return ret;
712}
713
714int c4iw_poll_cq(struct ibv_cq *ibcq, int num_entries, struct ibv_wc *wc)
715{
716	struct c4iw_cq *chp;
717	int npolled;
718	int err = 0;
719
720	chp = to_c4iw_cq(ibcq);
721
722	if (t4_cq_in_error(&chp->cq)) {
723		t4_reset_cq_in_error(&chp->cq);
724		c4iw_flush_qps(chp->rhp);
725	}
726
727	if (!num_entries)
728		return t4_cq_notempty(&chp->cq);
729
730	pthread_spin_lock(&chp->lock);
731	for (npolled = 0; npolled < num_entries; ++npolled) {
732		do {
733			err = c4iw_poll_cq_one(chp, wc + npolled);
734		} while (err == -EAGAIN);
735		if (err)
736			break;
737	}
738	pthread_spin_unlock(&chp->lock);
739	return !err || err == -ENODATA ? npolled : err;
740}
741
742int c4iw_arm_cq(struct ibv_cq *ibcq, int solicited)
743{
744	struct c4iw_cq *chp;
745	int ret;
746
747	INC_STAT(arm);
748	chp = to_c4iw_cq(ibcq);
749	pthread_spin_lock(&chp->lock);
750	ret = t4_arm_cq(&chp->cq, solicited);
751	pthread_spin_unlock(&chp->lock);
752	return ret;
753}
754