qp.c revision 309378
1139823Simp/* 2157094Srwatson * Copyright (c) 2006-2014 Chelsio, Inc. All rights reserved. 3157094Srwatson * 4194905Srwatson * This software is available to you under a choice of one of two 5157094Srwatson * licenses. You may choose to be licensed under the terms of the GNU 611819Sjulian * General Public License (GPL) Version 2, available from the file 711819Sjulian * COPYING in the main directory of this source tree, or the 811819Sjulian * OpenIB.org BSD license below: 911819Sjulian * 1011819Sjulian * Redistribution and use in source and binary forms, with or 1111819Sjulian * without modification, are permitted provided that the following 1211819Sjulian * conditions are met: 1311819Sjulian * 1411819Sjulian * - Redistributions of source code must retain the above 15165899Srwatson * copyright notice, this list of conditions and the following 16165899Srwatson * disclaimer. 17165899Srwatson * 18165899Srwatson * - Redistributions in binary form must reproduce the above 19165899Srwatson * copyright notice, this list of conditions and the following 20165899Srwatson * disclaimer in the documentation and/or other materials 21165899Srwatson * provided with the distribution. 22165899Srwatson * 23165899Srwatson * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24165899Srwatson * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25165899Srwatson * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26165899Srwatson * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27165899Srwatson * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28165899Srwatson * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29165899Srwatson * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30165899Srwatson * SOFTWARE. 31165899Srwatson */ 32165899Srwatson#if HAVE_CONFIG_H 33165899Srwatson# include <config.h> 34165899Srwatson#endif /* HAVE_CONFIG_H */ 35165899Srwatson 36165899Srwatson#include <assert.h> 37165899Srwatson#include <stdlib.h> 38165899Srwatson#include <pthread.h> 39165899Srwatson#include <string.h> 40165899Srwatson#include <stdio.h> 41165899Srwatson#include <netinet/in.h> 4211819Sjulian#include "libcxgb4.h" 4311819Sjulian 4411819Sjulian#ifdef STATS 4511819Sjulianstruct c4iw_stats c4iw_stats; 4611819Sjulian#endif 4711819Sjulian 4811819Sjulianstatic void copy_wr_to_sq(struct t4_wq *wq, union t4_wr *wqe, u8 len16) 4911819Sjulian{ 5011819Sjulian u64 *src, *dst; 5111819Sjulian 5211819Sjulian src = (u64 *)wqe; 5311819Sjulian dst = (u64 *)((u8 *)wq->sq.queue + wq->sq.wq_pidx * T4_EQ_ENTRY_SIZE); 5411819Sjulian if (t4_sq_onchip(wq)) { 5511819Sjulian len16 = align(len16, 4); 5611819Sjulian wc_wmb(); 5711819Sjulian } 5811819Sjulian while (len16) { 5911819Sjulian *dst++ = *src++; 6011819Sjulian if (dst == (u64 *)&wq->sq.queue[wq->sq.size]) 6111819Sjulian dst = (u64 *)wq->sq.queue; 6212057Sjulian *dst++ = *src++; 6311819Sjulian if (dst == (u64 *)&wq->sq.queue[wq->sq.size]) 6411819Sjulian dst = (u64 *)wq->sq.queue; 65116189Sobrien len16--; 66116189Sobrien } 67116189Sobrien} 6811819Sjulian 6911819Sjulianstatic void copy_wr_to_rq(struct t4_wq *wq, union t4_recv_wr *wqe, u8 len16) 7029024Sbde{ 71164033Srwatson u64 *src, *dst; 7211819Sjulian 7311819Sjulian src = (u64 *)wqe; 7411819Sjulian dst = (u64 *)((u8 *)wq->rq.queue + wq->rq.wq_pidx * T4_EQ_ENTRY_SIZE); 7511819Sjulian while (len16) { 7611819Sjulian *dst++ = *src++; 7711819Sjulian if (dst >= (u64 *)&wq->rq.queue[wq->rq.size]) 7811819Sjulian dst = (u64 *)wq->rq.queue; 7911819Sjulian *dst++ = *src++; 8011819Sjulian if (dst >= (u64 *)&wq->rq.queue[wq->rq.size]) 8125652Sjhay dst = (u64 *)wq->rq.queue; 8211819Sjulian len16--; 8333181Seivind } 84139584Srwatson} 8511819Sjulian 8611819Sjulianstatic int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, 87169463Srwatson struct ibv_send_wr *wr, int max, u32 *plenp) 8811819Sjulian{ 89169463Srwatson u8 *dstp, *srcp; 9011819Sjulian u32 plen = 0; 91157094Srwatson int i; 92139928Srwatson int len; 93139928Srwatson 94184205Sdes dstp = (u8 *)immdp->data; 9528270Swollman for (i = 0; i < wr->num_sge; i++) { 9611819Sjulian if ((plen + wr->sg_list[i].length) > max) 97139925Srwatson return -EMSGSIZE; 9811819Sjulian srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; 9950519Sjhay plen += wr->sg_list[i].length; 10050519Sjhay len = wr->sg_list[i].length; 101139444Srwatson memcpy(dstp, srcp, len); 10211819Sjulian dstp += len; 10311819Sjulian srcp += len; 10411819Sjulian } 105139584Srwatson len = ROUND_UP(plen + 8, 16) - (plen + 8); 10611819Sjulian if (len) 107169463Srwatson memset(dstp, 0, len); 10811819Sjulian immdp->op = FW_RI_DATA_IMMD; 109169463Srwatson immdp->r1 = 0; 11011819Sjulian immdp->r2 = 0; 11111819Sjulian immdp->immdlen = cpu_to_be32(plen); 112139928Srwatson *plenp = plen; 113139928Srwatson return 0; 114139928Srwatson} 11511819Sjulian 11611819Sjulianstatic int build_isgl(struct fw_ri_isgl *isglp, struct ibv_sge *sg_list, 11725652Sjhay int num_sge, u32 *plenp) 11811819Sjulian{ 11928270Swollman int i; 12011819Sjulian u32 plen = 0; 12111819Sjulian __be64 *flitp = (__be64 *)isglp->sge; 12211819Sjulian 12311819Sjulian for (i = 0; i < num_sge; i++) { 124194622Srwatson if ((plen + sg_list[i].length) < plen) 12511819Sjulian return -EMSGSIZE; 12611819Sjulian plen += sg_list[i].length; 12711819Sjulian *flitp++ = cpu_to_be64(((u64)sg_list[i].lkey << 32) | 12811819Sjulian sg_list[i].length); 12911819Sjulian *flitp++ = cpu_to_be64(sg_list[i].addr); 13011819Sjulian } 13111819Sjulian *flitp = 0; 132164033Srwatson isglp->op = FW_RI_DATA_ISGL; 133164033Srwatson isglp->r1 = 0; 134164033Srwatson isglp->nsge = cpu_to_be16(num_sge); 13511819Sjulian isglp->r2 = 0; 13611819Sjulian if (plenp) 13711819Sjulian *plenp = plen; 13811819Sjulian return 0; 13911819Sjulian} 14011819Sjulian 14111819Sjulianstatic int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, 142139445Srwatson struct ibv_send_wr *wr, u8 *len16) 143139445Srwatson{ 144139445Srwatson u32 plen; 145139445Srwatson int size; 146139445Srwatson int ret; 14711819Sjulian 14811819Sjulian if (wr->num_sge > T4_MAX_SEND_SGE) 14911819Sjulian return -EINVAL; 15011819Sjulian if (wr->send_flags & IBV_SEND_SOLICITED) 15111819Sjulian wqe->send.sendop_pkd = cpu_to_be32( 15211819Sjulian V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE)); 15311819Sjulian else 15411819Sjulian wqe->send.sendop_pkd = cpu_to_be32( 15511819Sjulian V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND)); 15611819Sjulian wqe->send.stag_inv = 0; 15711819Sjulian wqe->send.r3 = 0; 15811819Sjulian wqe->send.r4 = 0; 159169463Srwatson 16011819Sjulian plen = 0; 161169463Srwatson if (wr->num_sge) { 162169463Srwatson if (wr->send_flags & IBV_SEND_INLINE) { 163169463Srwatson ret = build_immd(sq, wqe->send.u.immd_src, wr, 16411819Sjulian T4_MAX_SEND_INLINE, &plen); 16511819Sjulian if (ret) 166139928Srwatson return ret; 167139928Srwatson size = sizeof wqe->send + sizeof(struct fw_ri_immd) + 168139928Srwatson plen; 16911819Sjulian } else { 17011819Sjulian ret = build_isgl(wqe->send.u.isgl_src, 17125652Sjhay wr->sg_list, wr->num_sge, &plen); 17211819Sjulian if (ret) 17311819Sjulian return ret; 17411819Sjulian size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + 17511819Sjulian wr->num_sge * sizeof (struct fw_ri_sge); 17611819Sjulian } 17711819Sjulian } else { 17811819Sjulian wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; 17911819Sjulian wqe->send.u.immd_src[0].r1 = 0; 18011819Sjulian wqe->send.u.immd_src[0].r2 = 0; 18111819Sjulian wqe->send.u.immd_src[0].immdlen = 0; 18211819Sjulian size = sizeof wqe->send + sizeof(struct fw_ri_immd); 18311819Sjulian plen = 0; 18411819Sjulian } 18511819Sjulian *len16 = DIV_ROUND_UP(size, 16); 18611819Sjulian wqe->send.plen = cpu_to_be32(plen); 18797658Stanimura return 0; 18811819Sjulian} 18911819Sjulian 19011819Sjulianstatic int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, 19111819Sjulian struct ibv_send_wr *wr, u8 *len16) 19225652Sjhay{ 19311819Sjulian u32 plen; 19411819Sjulian int size; 19511819Sjulian int ret; 19611819Sjulian 19725652Sjhay if (wr->num_sge > T4_MAX_SEND_SGE) 19811819Sjulian return -EINVAL; 19925652Sjhay wqe->write.r2 = 0; 20011819Sjulian wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); 20111819Sjulian wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); 20211819Sjulian if (wr->num_sge) { 20311819Sjulian if (wr->send_flags & IBV_SEND_INLINE) { 20425652Sjhay ret = build_immd(sq, wqe->write.u.immd_src, wr, 20511819Sjulian T4_MAX_WRITE_INLINE, &plen); 20611819Sjulian if (ret) 20711819Sjulian return ret; 20811819Sjulian size = sizeof wqe->write + sizeof(struct fw_ri_immd) + 20911819Sjulian plen; 210139556Srwatson } else { 21197658Stanimura ret = build_isgl(wqe->write.u.isgl_src, 21211819Sjulian wr->sg_list, wr->num_sge, &plen); 213194760Srwatson if (ret) 214194760Srwatson return ret; 215139584Srwatson size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + 21611819Sjulian wr->num_sge * sizeof (struct fw_ri_sge); 21711819Sjulian } 21811819Sjulian } else { 21911819Sjulian wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; 22011819Sjulian wqe->write.u.immd_src[0].r1 = 0; 22111819Sjulian wqe->write.u.immd_src[0].r2 = 0; 22211819Sjulian wqe->write.u.immd_src[0].immdlen = 0; 22311819Sjulian size = sizeof wqe->write + sizeof(struct fw_ri_immd); 224194608Srwatson plen = 0; 225194608Srwatson } 226194905Srwatson *len16 = DIV_ROUND_UP(size, 16); 227194760Srwatson wqe->write.plen = cpu_to_be32(plen); 228194760Srwatson return 0; 22911819Sjulian} 230194760Srwatson 231194905Srwatsonstatic int build_rdma_read(union t4_wr *wqe, struct ibv_send_wr *wr, u8 *len16) 232194608Srwatson{ 233194608Srwatson if (wr->num_sge > 1) 23425652Sjhay return -EINVAL; 23511819Sjulian if (wr->num_sge) { 23611819Sjulian wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey); 23711819Sjulian wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr >>32)); 23811819Sjulian wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr); 23911819Sjulian wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); 240194760Srwatson wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); 241194760Srwatson wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr >> 32)); 24211819Sjulian wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); 243194760Srwatson } else { 244194760Srwatson wqe->read.stag_src = cpu_to_be32(2); 245194760Srwatson wqe->read.to_src_hi = 0; 246194760Srwatson wqe->read.to_src_lo = 0; 247194760Srwatson wqe->read.stag_sink = cpu_to_be32(2); 248194760Srwatson wqe->read.plen = 0; 249194905Srwatson wqe->read.to_sink_hi = 0; 250194760Srwatson wqe->read.to_sink_lo = 0; 251194760Srwatson } 252194760Srwatson wqe->read.r2 = 0; 253194760Srwatson wqe->read.r5 = 0; 25425652Sjhay *len16 = DIV_ROUND_UP(sizeof wqe->read, 16); 25511819Sjulian return 0; 25611819Sjulian} 25711819Sjulian 258194760Srwatsonstatic int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, 25911819Sjulian struct ibv_recv_wr *wr, u8 *len16) 26025652Sjhay{ 261194760Srwatson int ret; 262139584Srwatson 26325652Sjhay ret = build_isgl(&wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); 26425652Sjhay if (ret) 26525652Sjhay return ret; 26625652Sjhay *len16 = DIV_ROUND_UP(sizeof wqe->recv + 26725652Sjhay wr->num_sge * sizeof(struct fw_ri_sge), 16); 26825652Sjhay return 0; 26925652Sjhay} 27025652Sjhay 271194608Srwatsonvoid dump_wqe(void *arg) 272194608Srwatson{ 273194905Srwatson u64 *p = arg; 274194760Srwatson int len16; 275194760Srwatson 27625652Sjhay len16 = be64_to_cpu(*p) & 0xff; 277194760Srwatson while (len16--) { 278194905Srwatson printf("%02x: %016lx ", (u8)(unsigned long)p, be64_to_cpu(*p)); 279194608Srwatson p++; 280194608Srwatson printf("%016lx\n", be64_to_cpu(*p)); 28125652Sjhay p++; 28225652Sjhay } 28325652Sjhay} 28425652Sjhay 28525652Sjhaystatic void ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 idx) 28625652Sjhay{ 287194608Srwatson struct ibv_modify_qp cmd; 288194608Srwatson struct ibv_qp_attr attr; 28925652Sjhay int mask; 290194760Srwatson int ret; 291194760Srwatson 292194608Srwatson wc_wmb(); 293194608Srwatson if (qid == qhp->wq.sq.qid) { 294194608Srwatson attr.sq_psn = idx; 295194608Srwatson mask = IBV_QP_SQ_PSN; 296194905Srwatson } else { 297194760Srwatson attr.rq_psn = idx; 298194760Srwatson mask = IBV_QP_RQ_PSN; 299194608Srwatson } 300194608Srwatson ret = ibv_cmd_modify_qp(&qhp->ibv_qp, &attr, mask, &cmd, sizeof cmd); 30125652Sjhay assert(!ret); 30225652Sjhay} 30325652Sjhay 30425652Sjhayint c4iw_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr, 305194760Srwatson struct ibv_send_wr **bad_wr) 30625652Sjhay{ 30711819Sjulian int err = 0; 30811819Sjulian u8 len16; 30925652Sjhay enum fw_wr_opcodes fw_opcode; 31083366Sjulian enum fw_ri_wr_flags fw_flags; 31125652Sjhay struct c4iw_qp *qhp; 31225652Sjhay union t4_wr *wqe, lwqe; 31325652Sjhay u32 num_wrs; 31411819Sjulian struct t4_swsqe *swsqe; 31511819Sjulian u16 idx = 0; 31611819Sjulian 31711819Sjulian qhp = to_c4iw_qp(ibqp); 31811819Sjulian pthread_spin_lock(&qhp->lock); 31911819Sjulian if (t4_wq_in_error(&qhp->wq)) { 320169463Srwatson pthread_spin_unlock(&qhp->lock); 32111819Sjulian return -EINVAL; 32211819Sjulian } 323139928Srwatson num_wrs = t4_sq_avail(&qhp->wq); 324139928Srwatson if (num_wrs == 0) { 325139928Srwatson pthread_spin_unlock(&qhp->lock); 32611819Sjulian return -ENOMEM; 32711819Sjulian } 32811819Sjulian while (wr) { 32911819Sjulian if (num_wrs == 0) { 330169463Srwatson err = -ENOMEM; 33111819Sjulian *bad_wr = wr; 33211819Sjulian break; 33311819Sjulian } 334139928Srwatson 335139928Srwatson wqe = &lwqe; 336139928Srwatson fw_flags = 0; 337139559Srwatson if (wr->send_flags & IBV_SEND_SOLICITED) 338157128Srwatson fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; 339157128Srwatson if (wr->send_flags & IBV_SEND_SIGNALED || qhp->sq_sig_all) 340157128Srwatson fw_flags |= FW_RI_COMPLETION_FLAG; 341157128Srwatson swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; 342169463Srwatson switch (wr->opcode) { 343157128Srwatson case IBV_WR_SEND: 344157128Srwatson INC_STAT(send); 345157128Srwatson if (wr->send_flags & IBV_SEND_FENCE) 346157128Srwatson fw_flags |= FW_RI_READ_FENCE_FLAG; 347157128Srwatson fw_opcode = FW_RI_SEND_WR; 348157128Srwatson swsqe->opcode = FW_RI_SEND; 349157128Srwatson err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); 35025652Sjhay break; 351139557Srwatson case IBV_WR_RDMA_WRITE: 352139444Srwatson INC_STAT(write); 353139925Srwatson fw_opcode = FW_RI_RDMA_WRITE_WR; 354184205Sdes swsqe->opcode = FW_RI_RDMA_WRITE; 35511819Sjulian err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); 35611819Sjulian break; 35711819Sjulian case IBV_WR_RDMA_READ: 358169463Srwatson INC_STAT(read); 35911819Sjulian fw_opcode = FW_RI_RDMA_READ_WR; 36028270Swollman swsqe->opcode = FW_RI_READ_REQ; 361139584Srwatson fw_flags = 0; 36228270Swollman err = build_rdma_read(wqe, wr, &len16); 36325652Sjhay if (err) 36411819Sjulian break; 36511819Sjulian swsqe->read_len = wr->sg_list ? wr->sg_list[0].length : 0; 366139928Srwatson if (!qhp->wq.sq.oldest_read) 36711819Sjulian qhp->wq.sq.oldest_read = swsqe; 368139928Srwatson break; 369139924Srwatson default: 37011819Sjulian PDBG("%s post of type=%d TBD!\n", __func__, 37111819Sjulian wr->opcode); 37211819Sjulian err = -EINVAL; 373169463Srwatson } 37411819Sjulian if (err) { 37528270Swollman *bad_wr = wr; 376139928Srwatson break; 37728270Swollman } 378139587Srwatson swsqe->idx = qhp->wq.sq.pidx; 37911819Sjulian swsqe->complete = 0; 38011819Sjulian swsqe->signaled = (wr->send_flags & IBV_SEND_SIGNALED) || 381139928Srwatson qhp->sq_sig_all; 38211819Sjulian swsqe->flushed = 0; 383139928Srwatson swsqe->wr_id = wr->wr_id; 384139587Srwatson 38511819Sjulian init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); 38611819Sjulian PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x\n", 38711819Sjulian __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, 388169463Srwatson swsqe->opcode); 38911819Sjulian wr = wr->next; 390169463Srwatson num_wrs--; 39111819Sjulian copy_wr_to_sq(&qhp->wq, wqe, len16); 39211819Sjulian t4_sq_produce(&qhp->wq, len16); 39311819Sjulian idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); 394139928Srwatson } 395139928Srwatson if (t4_wq_db_enabled(&qhp->wq)) { 39611819Sjulian t4_ring_sq_db(&qhp->wq, idx, dev_is_t5(qhp->rhp), 397139444Srwatson len16, wqe); 39811819Sjulian } else 39911819Sjulian ring_kernel_db(qhp, qhp->wq.sq.qid, idx); 40011819Sjulian qhp->wq.sq.queue[qhp->wq.sq.size].status.host_wq_pidx = \ 40111819Sjulian (qhp->wq.sq.wq_pidx); 40211819Sjulian pthread_spin_unlock(&qhp->lock); 40311819Sjulian return err; 40411819Sjulian} 40511819Sjulian 40611819Sjulianint c4iw_post_receive(struct ibv_qp *ibqp, struct ibv_recv_wr *wr, 40711819Sjulian struct ibv_recv_wr **bad_wr) 40811819Sjulian{ 40911819Sjulian int err = 0; 41011819Sjulian struct c4iw_qp *qhp; 41111819Sjulian union t4_recv_wr *wqe, lwqe; 41211819Sjulian u32 num_wrs; 41311819Sjulian u8 len16 = 0; 41411819Sjulian u16 idx = 0; 41511819Sjulian 41611819Sjulian qhp = to_c4iw_qp(ibqp); 41711819Sjulian pthread_spin_lock(&qhp->lock); 41825652Sjhay if (t4_wq_in_error(&qhp->wq)) { 41911819Sjulian pthread_spin_unlock(&qhp->lock); 42011819Sjulian return -EINVAL; 42111819Sjulian } 42211819Sjulian INC_STAT(recv); 42311819Sjulian num_wrs = t4_rq_avail(&qhp->wq); 42411819Sjulian if (num_wrs == 0) { 42511819Sjulian pthread_spin_unlock(&qhp->lock); 42611819Sjulian return -ENOMEM; 42711819Sjulian } 42811819Sjulian while (wr) { 429 if (wr->num_sge > T4_MAX_RECV_SGE) { 430 err = -EINVAL; 431 *bad_wr = wr; 432 break; 433 } 434 wqe = &lwqe; 435 if (num_wrs) 436 err = build_rdma_recv(qhp, wqe, wr, &len16); 437 else 438 err = -ENOMEM; 439 if (err) { 440 *bad_wr = wr; 441 break; 442 } 443 444 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; 445 446 wqe->recv.opcode = FW_RI_RECV_WR; 447 wqe->recv.r1 = 0; 448 wqe->recv.wrid = qhp->wq.rq.pidx; 449 wqe->recv.r2[0] = 0; 450 wqe->recv.r2[1] = 0; 451 wqe->recv.r2[2] = 0; 452 wqe->recv.len16 = len16; 453 PDBG("%s cookie 0x%llx pidx %u\n", __func__, 454 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); 455 copy_wr_to_rq(&qhp->wq, wqe, len16); 456 t4_rq_produce(&qhp->wq, len16); 457 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); 458 wr = wr->next; 459 num_wrs--; 460 } 461 if (t4_wq_db_enabled(&qhp->wq)) 462 t4_ring_rq_db(&qhp->wq, idx, dev_is_t5(qhp->rhp), 463 len16, wqe); 464 else 465 ring_kernel_db(qhp, qhp->wq.rq.qid, idx); 466 qhp->wq.rq.queue[qhp->wq.rq.size].status.host_wq_pidx = \ 467 (qhp->wq.rq.wq_pidx); 468 pthread_spin_unlock(&qhp->lock); 469 return err; 470} 471 472static void update_qp_state(struct c4iw_qp *qhp) 473{ 474 struct ibv_query_qp cmd; 475 struct ibv_qp_attr attr; 476 struct ibv_qp_init_attr iattr; 477 int ret; 478 479 ret = ibv_cmd_query_qp(&qhp->ibv_qp, &attr, IBV_QP_STATE, &iattr, 480 &cmd, sizeof cmd); 481 assert(!ret); 482 if (!ret) 483 qhp->ibv_qp.state = attr.qp_state; 484} 485 486/* 487 * Assumes qhp lock is held. 488 */ 489void c4iw_flush_qp(struct c4iw_qp *qhp) 490{ 491 struct c4iw_cq *rchp, *schp; 492 int count; 493 494 if (qhp->wq.flushed) 495 return; 496 497 update_qp_state(qhp); 498 499 rchp = to_c4iw_cq(qhp->ibv_qp.recv_cq); 500 schp = to_c4iw_cq(qhp->ibv_qp.send_cq); 501 502 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); 503 qhp->wq.flushed = 1; 504 pthread_spin_unlock(&qhp->lock); 505 506 /* locking heirarchy: cq lock first, then qp lock. */ 507 pthread_spin_lock(&rchp->lock); 508 pthread_spin_lock(&qhp->lock); 509 c4iw_flush_hw_cq(rchp); 510 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); 511 c4iw_flush_rq(&qhp->wq, &rchp->cq, count); 512 pthread_spin_unlock(&qhp->lock); 513 pthread_spin_unlock(&rchp->lock); 514 515 /* locking heirarchy: cq lock first, then qp lock. */ 516 pthread_spin_lock(&schp->lock); 517 pthread_spin_lock(&qhp->lock); 518 if (schp != rchp) 519 c4iw_flush_hw_cq(schp); 520 c4iw_flush_sq(qhp); 521 pthread_spin_unlock(&qhp->lock); 522 pthread_spin_unlock(&schp->lock); 523 pthread_spin_lock(&qhp->lock); 524} 525 526void c4iw_flush_qps(struct c4iw_dev *dev) 527{ 528 int i; 529 530 pthread_spin_lock(&dev->lock); 531 for (i=0; i < dev->max_qp; i++) { 532 struct c4iw_qp *qhp = dev->qpid2ptr[i]; 533 if (qhp) { 534 if (!qhp->wq.flushed && t4_wq_in_error(&qhp->wq)) { 535 pthread_spin_lock(&qhp->lock); 536 c4iw_flush_qp(qhp); 537 pthread_spin_unlock(&qhp->lock); 538 } 539 } 540 } 541 pthread_spin_unlock(&dev->lock); 542} 543