1/*-
2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3 *
4 * Copyright (c) 2021 - 2023 Intel Corporation
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
11 *
12 *   Redistribution and use in source and binary forms, with or
13 *   without modification, are permitted provided that the following
14 *   conditions are met:
15 *
16 *    - Redistributions of source code must retain the above
17 *	copyright notice, this list of conditions and the following
18 *	disclaimer.
19 *
20 *    - Redistributions in binary form must reproduce the above
21 *	copyright notice, this list of conditions and the following
22 *	disclaimer in the documentation and/or other materials
23 *	provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include "osdep.h"
36#include "ice_rdma.h"
37#include "irdma_di_if.h"
38#include "irdma_main.h"
39#include <sys/gsb_crc32.h>
40#include <netinet/in_fib.h>
41#include <netinet6/in6_fib.h>
42#include <net/route/nhop.h>
43#include <net/if_llatbl.h>
44
45/* additional QP debuging option. Keep false unless needed */
46bool irdma_upload_context = false;
47
48inline u32
49irdma_rd32(struct irdma_dev_ctx *dev_ctx, u32 reg){
50
51	KASSERT(reg < dev_ctx->mem_bus_space_size,
52		("irdma: register offset %#jx too large (max is %#jx)",
53		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
54
55	return (bus_space_read_4(dev_ctx->mem_bus_space_tag,
56				 dev_ctx->mem_bus_space_handle, reg));
57}
58
59inline void
60irdma_wr32(struct irdma_dev_ctx *dev_ctx, u32 reg, u32 value)
61{
62
63	KASSERT(reg < dev_ctx->mem_bus_space_size,
64		("irdma: register offset %#jx too large (max is %#jx)",
65		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
66
67	bus_space_write_4(dev_ctx->mem_bus_space_tag,
68			  dev_ctx->mem_bus_space_handle, reg, value);
69}
70
71inline u64
72irdma_rd64(struct irdma_dev_ctx *dev_ctx, u32 reg){
73
74	KASSERT(reg < dev_ctx->mem_bus_space_size,
75		("irdma: register offset %#jx too large (max is %#jx)",
76		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
77
78	return (bus_space_read_8(dev_ctx->mem_bus_space_tag,
79				 dev_ctx->mem_bus_space_handle, reg));
80}
81
82inline void
83irdma_wr64(struct irdma_dev_ctx *dev_ctx, u32 reg, u64 value)
84{
85
86	KASSERT(reg < dev_ctx->mem_bus_space_size,
87		("irdma: register offset %#jx too large (max is %#jx)",
88		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
89
90	bus_space_write_8(dev_ctx->mem_bus_space_tag,
91			  dev_ctx->mem_bus_space_handle, reg, value);
92
93}
94
95void
96irdma_request_reset(struct irdma_pci_f *rf)
97{
98	struct ice_rdma_peer *peer = rf->peer_info;
99	struct ice_rdma_request req = {0};
100
101	req.type = ICE_RDMA_EVENT_RESET;
102
103	printf("%s:%d requesting pf-reset\n", __func__, __LINE__);
104	IRDMA_DI_REQ_HANDLER(peer, &req);
105}
106
107int
108irdma_register_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
109{
110	struct irdma_device *iwdev = vsi->back_vsi;
111	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
112	struct ice_rdma_request req = {0};
113	struct ice_rdma_qset_update *res = &req.res;
114
115	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
116	res->cnt_req = 1;
117	res->res_type = ICE_RDMA_QSET_ALLOC;
118	res->qsets.qs_handle = tc_node->qs_handle;
119	res->qsets.tc = tc_node->traffic_class;
120	res->qsets.vsi_id = vsi->vsi_idx;
121
122	IRDMA_DI_REQ_HANDLER(peer, &req);
123
124	tc_node->l2_sched_node_id = res->qsets.teid;
125	vsi->qos[tc_node->user_pri].l2_sched_node_id =
126	    res->qsets.teid;
127
128	return 0;
129}
130
131void
132irdma_unregister_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
133{
134	struct irdma_device *iwdev = vsi->back_vsi;
135	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
136	struct ice_rdma_request req = {0};
137	struct ice_rdma_qset_update *res = &req.res;
138
139	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
140	res->res_allocated = 1;
141	res->res_type = ICE_RDMA_QSET_FREE;
142	res->qsets.vsi_id = vsi->vsi_idx;
143	res->qsets.teid = tc_node->l2_sched_node_id;
144	res->qsets.qs_handle = tc_node->qs_handle;
145
146	IRDMA_DI_REQ_HANDLER(peer, &req);
147}
148
149void *
150hw_to_dev(struct irdma_hw *hw)
151{
152	struct irdma_pci_f *rf;
153
154	rf = container_of(hw, struct irdma_pci_f, hw);
155	return rf->pcidev;
156}
157
158void
159irdma_free_hash_desc(void *desc)
160{
161	return;
162}
163
164int
165irdma_init_hash_desc(void **desc)
166{
167	return 0;
168}
169
170int
171irdma_ieq_check_mpacrc(void *desc,
172		       void *addr, u32 len, u32 val)
173{
174	u32 crc = calculate_crc32c(0xffffffff, addr, len) ^ 0xffffffff;
175	int ret_code = 0;
176
177	if (crc != val) {
178		irdma_pr_err("mpa crc check fail %x %x\n", crc, val);
179		ret_code = -EINVAL;
180	}
181	printf("%s: result crc=%x value=%x\n", __func__, crc, val);
182	return ret_code;
183}
184
185static u_int
186irdma_add_ipv6_cb(void *arg, struct ifaddr *addr, u_int count __unused)
187{
188	struct irdma_device *iwdev = arg;
189	struct sockaddr_in6 *sin6;
190	u32 local_ipaddr6[4] = {};
191	char ip6buf[INET6_ADDRSTRLEN];
192	u8 *mac_addr;
193
194	sin6 = (struct sockaddr_in6 *)addr->ifa_addr;
195
196	irdma_copy_ip_ntohl(local_ipaddr6, (u32 *)&sin6->sin6_addr);
197
198	mac_addr = if_getlladdr(addr->ifa_ifp);
199
200	printf("%s:%d IP=%s, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
201	       __func__, __LINE__,
202	       ip6_sprintf(ip6buf, &sin6->sin6_addr),
203	       mac_addr[0], mac_addr[1], mac_addr[2],
204	       mac_addr[3], mac_addr[4], mac_addr[5]);
205
206	irdma_manage_arp_cache(iwdev->rf, mac_addr, local_ipaddr6,
207			       IRDMA_ARP_ADD);
208	return (0);
209}
210
211/**
212 * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
213 * @iwdev: irdma device
214 * @ifp: interface network device pointer
215 */
216static void
217irdma_add_ipv6_addr(struct irdma_device *iwdev, struct ifnet *ifp)
218{
219	if_addr_rlock(ifp);
220	if_foreach_addr_type(ifp, AF_INET6, irdma_add_ipv6_cb, iwdev);
221	if_addr_runlock(ifp);
222}
223
224static u_int
225irdma_add_ipv4_cb(void *arg, struct ifaddr *addr, u_int count __unused)
226{
227	struct irdma_device *iwdev = arg;
228	struct sockaddr_in *sin;
229	u32 ip_addr[4] = {};
230	uint8_t *mac_addr;
231
232	sin = (struct sockaddr_in *)addr->ifa_addr;
233
234	ip_addr[0] = ntohl(sin->sin_addr.s_addr);
235
236	mac_addr = if_getlladdr(addr->ifa_ifp);
237
238	printf("%s:%d IP=%d.%d.%d.%d, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
239	       __func__, __LINE__,
240	       ip_addr[0] >> 24,
241	       (ip_addr[0] >> 16) & 0xFF,
242	       (ip_addr[0] >> 8) & 0xFF,
243	       ip_addr[0] & 0xFF,
244	       mac_addr[0], mac_addr[1], mac_addr[2],
245	       mac_addr[3], mac_addr[4], mac_addr[5]);
246
247	irdma_manage_arp_cache(iwdev->rf, mac_addr, ip_addr,
248			       IRDMA_ARP_ADD);
249	return (0);
250}
251
252/**
253 * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
254 * @iwdev: irdma device
255 * @ifp: interface network device pointer
256 */
257static void
258irdma_add_ipv4_addr(struct irdma_device *iwdev, struct ifnet *ifp)
259{
260	if_addr_rlock(ifp);
261	if_foreach_addr_type(ifp, AF_INET, irdma_add_ipv4_cb, iwdev);
262	if_addr_runlock(ifp);
263}
264
265/**
266 * irdma_add_ip - add ip addresses
267 * @iwdev: irdma device
268 *
269 * Add ipv4/ipv6 addresses to the arp cache
270 */
271void
272irdma_add_ip(struct irdma_device *iwdev)
273{
274	struct ifnet *ifp = iwdev->netdev;
275	struct ifnet *ifv;
276	struct epoch_tracker et;
277	int i;
278
279	irdma_add_ipv4_addr(iwdev, ifp);
280	irdma_add_ipv6_addr(iwdev, ifp);
281	for (i = 0; if_getvlantrunk(ifp) != NULL && i < VLAN_N_VID; ++i) {
282		NET_EPOCH_ENTER(et);
283		ifv = VLAN_DEVAT(ifp, i);
284		NET_EPOCH_EXIT(et);
285		if (!ifv)
286			continue;
287		irdma_add_ipv4_addr(iwdev, ifv);
288		irdma_add_ipv6_addr(iwdev, ifv);
289	}
290}
291
292static void
293irdma_ifaddrevent_handler(void *arg, struct ifnet *ifp, struct ifaddr *ifa, int event)
294{
295	struct irdma_pci_f *rf = arg;
296	struct ifnet *ifv = NULL;
297	struct sockaddr_in *sin;
298	struct epoch_tracker et;
299	int arp_index = 0, i = 0;
300	u32 ip[4] = {};
301
302	if (!ifa || !ifa->ifa_addr || !ifp)
303		return;
304	if (rf->iwdev->netdev != ifp) {
305		for (i = 0; if_getvlantrunk(rf->iwdev->netdev) != NULL && i < VLAN_N_VID; ++i) {
306			NET_EPOCH_ENTER(et);
307			ifv = VLAN_DEVAT(rf->iwdev->netdev, i);
308			NET_EPOCH_EXIT(et);
309			if (ifv == ifp)
310				break;
311		}
312		if (ifv != ifp)
313			return;
314	}
315	sin = (struct sockaddr_in *)ifa->ifa_addr;
316
317	switch (event) {
318	case IFADDR_EVENT_ADD:
319		if (sin->sin_family == AF_INET)
320			irdma_add_ipv4_addr(rf->iwdev, ifp);
321		else if (sin->sin_family == AF_INET6)
322			irdma_add_ipv6_addr(rf->iwdev, ifp);
323		break;
324	case IFADDR_EVENT_DEL:
325		if (sin->sin_family == AF_INET) {
326			ip[0] = ntohl(sin->sin_addr.s_addr);
327		} else if (sin->sin_family == AF_INET6) {
328			irdma_copy_ip_ntohl(ip, (u32 *)&((struct sockaddr_in6 *)sin)->sin6_addr);
329		} else {
330			break;
331		}
332		for_each_set_bit(arp_index, rf->allocated_arps, rf->arp_table_size) {
333			if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) {
334				irdma_manage_arp_cache(rf, rf->arp_table[arp_index].mac_addr,
335						       rf->arp_table[arp_index].ip_addr,
336						       IRDMA_ARP_DELETE);
337			}
338		}
339		break;
340	default:
341		break;
342	}
343}
344
345void
346irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf)
347{
348	rf->irdma_ifaddr_event = EVENTHANDLER_REGISTER(ifaddr_event_ext,
349						       irdma_ifaddrevent_handler,
350						       rf,
351						       EVENTHANDLER_PRI_ANY);
352}
353
354void
355irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf)
356{
357	EVENTHANDLER_DEREGISTER(ifaddr_event_ext, rf->irdma_ifaddr_event);
358}
359
360static int
361irdma_get_route_ifp(struct sockaddr *dst_sin, struct ifnet *netdev,
362		    struct ifnet **ifp, struct sockaddr **nexthop, bool *gateway)
363{
364	struct nhop_object *nh;
365
366	if (dst_sin->sa_family == AF_INET6)
367		nh = fib6_lookup(RT_DEFAULT_FIB, &((struct sockaddr_in6 *)dst_sin)->sin6_addr,
368				 ((struct sockaddr_in6 *)dst_sin)->sin6_scope_id, NHR_NONE, 0);
369	else
370		nh = fib4_lookup(RT_DEFAULT_FIB, ((struct sockaddr_in *)dst_sin)->sin_addr, 0, NHR_NONE, 0);
371	if (!nh || (nh->nh_ifp != netdev &&
372		    rdma_vlan_dev_real_dev(nh->nh_ifp) != netdev))
373		goto rt_not_found;
374	*gateway = (nh->nh_flags & NHF_GATEWAY) ? true : false;
375	*nexthop = (*gateway) ? &nh->gw_sa : dst_sin;
376	*ifp = nh->nh_ifp;
377
378	return 0;
379
380rt_not_found:
381	pr_err("irdma: route not found\n");
382	return -ENETUNREACH;
383}
384
385/**
386 * irdma_get_dst_mac - get destination mac address
387 * @cm_node: connection's node
388 * @dst_sin: destination address information
389 * @dst_mac: mac address array to return
390 */
391int
392irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin, u8 *dst_mac)
393{
394	struct ifnet *netdev = cm_node->iwdev->netdev;
395#ifdef VIMAGE
396	struct vnet *vnet = irdma_cmid_to_vnet(cm_node->cm_id);
397#endif
398	struct ifnet *ifp;
399	struct llentry *lle;
400	struct sockaddr *nexthop;
401	struct epoch_tracker et;
402	int err;
403	bool gateway;
404
405	NET_EPOCH_ENTER(et);
406	CURVNET_SET_QUIET(vnet);
407	err = irdma_get_route_ifp(dst_sin, netdev, &ifp, &nexthop, &gateway);
408	if (err)
409		goto get_route_fail;
410
411	if (dst_sin->sa_family == AF_INET) {
412		err = arpresolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
413	} else if (dst_sin->sa_family == AF_INET6) {
414		err = nd6_resolve(ifp, LLE_SF(AF_INET6, gateway), NULL, nexthop,
415				  dst_mac, NULL, &lle);
416	} else {
417		err = -EPROTONOSUPPORT;
418	}
419
420get_route_fail:
421	CURVNET_RESTORE();
422	NET_EPOCH_EXIT(et);
423	if (err) {
424		pr_err("failed to resolve neighbor address (err=%d)\n",
425		       err);
426		return -ENETUNREACH;
427	}
428
429	return 0;
430}
431
432/**
433 * irdma_addr_resolve_neigh - resolve neighbor address
434 * @cm_node: connection's node
435 * @dst_ip: remote ip address
436 * @arpindex: if there is an arp entry
437 */
438int
439irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node,
440			 u32 dst_ip, int arpindex)
441{
442	struct irdma_device *iwdev = cm_node->iwdev;
443	struct sockaddr_in dst_sin = {};
444	int err;
445	u32 ip[4] = {};
446	u8 dst_mac[MAX_ADDR_LEN];
447
448	dst_sin.sin_len = sizeof(dst_sin);
449	dst_sin.sin_family = AF_INET;
450	dst_sin.sin_port = 0;
451	dst_sin.sin_addr.s_addr = htonl(dst_ip);
452
453	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_sin, dst_mac);
454	if (err)
455		return arpindex;
456
457	ip[0] = dst_ip;
458
459	return irdma_add_arp(iwdev->rf, ip, dst_mac);
460}
461
462/**
463 * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
464 * @cm_node: connection's node
465 * @dest: remote ip address
466 * @arpindex: if there is an arp entry
467 */
468int
469irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node,
470			      u32 *dest, int arpindex)
471{
472	struct irdma_device *iwdev = cm_node->iwdev;
473	struct sockaddr_in6 dst_addr = {};
474	int err;
475	u8 dst_mac[MAX_ADDR_LEN];
476
477	dst_addr.sin6_family = AF_INET6;
478	dst_addr.sin6_len = sizeof(dst_addr);
479	dst_addr.sin6_scope_id = if_getindex(iwdev->netdev);
480
481	irdma_copy_ip_htonl(dst_addr.sin6_addr.__u6_addr.__u6_addr32, dest);
482	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_addr, dst_mac);
483	if (err)
484		return arpindex;
485
486	return irdma_add_arp(iwdev->rf, dest, dst_mac);
487}
488
489int
490irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
491			    struct irdma_cm_info *cm_info)
492{
493#ifdef VIMAGE
494	struct vnet *vnet = irdma_cmid_to_vnet(cm_node->cm_id);
495#endif
496	int arpindex;
497	int oldarpindex;
498	bool is_lpb = false;
499
500	CURVNET_SET_QUIET(vnet);
501	is_lpb = cm_node->ipv4 ?
502	    irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0]) :
503	    irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr);
504	CURVNET_RESTORE();
505	if (is_lpb) {
506		cm_node->do_lpb = true;
507		arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
508					   NULL,
509					   IRDMA_ARP_RESOLVE);
510	} else {
511		oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
512					      NULL,
513					      IRDMA_ARP_RESOLVE);
514		if (cm_node->ipv4)
515			arpindex = irdma_addr_resolve_neigh(cm_node,
516							    cm_info->rem_addr[0],
517							    oldarpindex);
518		else
519			arpindex = irdma_addr_resolve_neigh_ipv6(cm_node,
520								 cm_info->rem_addr,
521								 oldarpindex);
522	}
523	return arpindex;
524}
525
526/**
527 * irdma_add_handler - add a handler to the list
528 * @hdl: handler to be added to the handler list
529 */
530void
531irdma_add_handler(struct irdma_handler *hdl)
532{
533	unsigned long flags;
534
535	spin_lock_irqsave(&irdma_handler_lock, flags);
536	list_add(&hdl->list, &irdma_handlers);
537	spin_unlock_irqrestore(&irdma_handler_lock, flags);
538}
539
540/**
541 * irdma_del_handler - delete a handler from the list
542 * @hdl: handler to be deleted from the handler list
543 */
544void
545irdma_del_handler(struct irdma_handler *hdl)
546{
547	unsigned long flags;
548
549	spin_lock_irqsave(&irdma_handler_lock, flags);
550	list_del(&hdl->list);
551	spin_unlock_irqrestore(&irdma_handler_lock, flags);
552}
553
554/**
555 * irdma_set_rf_user_cfg_params - apply user configurable settings
556 * @rf: RDMA PCI function
557 */
558void
559irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf)
560{
561	int en_rem_endpoint_trk = 0;
562	int limits_sel = 4;
563
564	rf->en_rem_endpoint_trk = en_rem_endpoint_trk;
565	rf->limits_sel = limits_sel;
566	rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
567	/* Enable DCQCN algorithm by default */
568	rf->dcqcn_ena = true;
569}
570
571/**
572 * irdma_sysctl_dcqcn_update - handle dcqcn_ena sysctl update
573 * @arg1: pointer to rf
574 * @arg2: unused
575 * @oidp: sysctl oid structure
576 * @req: sysctl request pointer
577 */
578static int
579irdma_sysctl_dcqcn_update(SYSCTL_HANDLER_ARGS)
580{
581	struct irdma_pci_f *rf = (struct irdma_pci_f *)arg1;
582	int ret;
583	u8 dcqcn_ena = rf->dcqcn_ena;
584
585	ret = sysctl_handle_8(oidp, &dcqcn_ena, 0, req);
586	if ((ret) || (req->newptr == NULL))
587		return ret;
588	if (dcqcn_ena == 0)
589		rf->dcqcn_ena = false;
590	else
591		rf->dcqcn_ena = true;
592
593	return 0;
594}
595
596enum irdma_cqp_stats_info {
597	IRDMA_CQP_REQ_CMDS = 28,
598	IRDMA_CQP_CMPL_CMDS = 29
599};
600
601static int
602irdma_sysctl_cqp_stats(SYSCTL_HANDLER_ARGS)
603{
604	struct irdma_sc_cqp *cqp = (struct irdma_sc_cqp *)arg1;
605	char rslt[192] = "no cqp available yet";
606	int rslt_size = sizeof(rslt) - 1;
607	int option = (int)arg2;
608
609	if (!cqp) {
610		return sysctl_handle_string(oidp, rslt, sizeof(rslt), req);
611	}
612
613	snprintf(rslt, sizeof(rslt), "");
614	switch (option) {
615	case IRDMA_CQP_REQ_CMDS:
616		snprintf(rslt, rslt_size, "%lu", cqp->requested_ops);
617		break;
618	case IRDMA_CQP_CMPL_CMDS:
619		snprintf(rslt, rslt_size, "%lu", atomic64_read(&cqp->completed_ops));
620		break;
621	}
622
623	return sysctl_handle_string(oidp, rslt, sizeof(rslt), req);
624}
625
626struct irdma_sw_stats_tunable_info {
627	u8 op_type;
628	const char name[32];
629	const char desc[32];
630	uintptr_t value;
631};
632
633static const struct irdma_sw_stats_tunable_info irdma_sws_list[] = {
634	{IRDMA_OP_CEQ_DESTROY, "ceq_destroy", "ceq_destroy", 0},
635	{IRDMA_OP_AEQ_DESTROY, "aeq_destroy", "aeq_destroy", 0},
636	{IRDMA_OP_DELETE_ARP_CACHE_ENTRY, "delete_arp_cache_entry",
637	"delete_arp_cache_entry", 0},
638	{IRDMA_OP_MANAGE_APBVT_ENTRY, "manage_apbvt_entry",
639	"manage_apbvt_entry", 0},
640	{IRDMA_OP_CEQ_CREATE, "ceq_create", "ceq_create", 0},
641	{IRDMA_OP_AEQ_CREATE, "aeq_create", "aeq_create", 0},
642	{IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY, "manage_qhash_table_entry",
643	"manage_qhash_table_entry", 0},
644	{IRDMA_OP_QP_MODIFY, "qp_modify", "qp_modify", 0},
645	{IRDMA_OP_QP_UPLOAD_CONTEXT, "qp_upload_context", "qp_upload_context",
646	0},
647	{IRDMA_OP_CQ_CREATE, "cq_create", "cq_create", 0},
648	{IRDMA_OP_CQ_DESTROY, "cq_destroy", "cq_destroy", 0},
649	{IRDMA_OP_QP_CREATE, "qp_create", "qp_create", 0},
650	{IRDMA_OP_QP_DESTROY, "qp_destroy", "qp_destroy", 0},
651	{IRDMA_OP_ALLOC_STAG, "alloc_stag", "alloc_stag", 0},
652	{IRDMA_OP_MR_REG_NON_SHARED, "mr_reg_non_shared", "mr_reg_non_shared",
653	0},
654	{IRDMA_OP_DEALLOC_STAG, "dealloc_stag", "dealloc_stag", 0},
655	{IRDMA_OP_MW_ALLOC, "mw_alloc", "mw_alloc", 0},
656	{IRDMA_OP_QP_FLUSH_WQES, "qp_flush_wqes", "qp_flush_wqes", 0},
657	{IRDMA_OP_ADD_ARP_CACHE_ENTRY, "add_arp_cache_entry",
658	"add_arp_cache_entry", 0},
659	{IRDMA_OP_MANAGE_PUSH_PAGE, "manage_push_page", "manage_push_page", 0},
660	{IRDMA_OP_UPDATE_PE_SDS, "update_pe_sds", "update_pe_sds", 0},
661	{IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE, "manage_hmc_pm_func_table",
662	"manage_hmc_pm_func_table", 0},
663	{IRDMA_OP_SUSPEND, "suspend", "suspend", 0},
664	{IRDMA_OP_RESUME, "resume", "resume", 0},
665	{25, "manage_vchnl_req_pble_bp", "manage_vchnl_req_pble_bp", 0},
666	{IRDMA_OP_QUERY_FPM_VAL, "query_fpm_val", "query_fpm_val", 0},
667	{IRDMA_OP_COMMIT_FPM_VAL, "commit_fpm_val", "commit_fpm_val", 0},
668	{IRDMA_OP_AH_CREATE, "ah_create", "ah_create", 0},
669	{IRDMA_OP_AH_MODIFY, "ah_modify", "ah_modify", 0},
670	{IRDMA_OP_AH_DESTROY, "ah_destroy", "ah_destroy", 0},
671	{IRDMA_OP_MC_CREATE, "mc_create", "mc_create", 0},
672	{IRDMA_OP_MC_DESTROY, "mc_destroy", "mc_destroy", 0},
673	{IRDMA_OP_MC_MODIFY, "mc_modify", "mc_modify", 0},
674	{IRDMA_OP_STATS_ALLOCATE, "stats_allocate", "stats_allocate", 0},
675	{IRDMA_OP_STATS_FREE, "stats_free", "stats_free", 0},
676	{IRDMA_OP_STATS_GATHER, "stats_gather", "stats_gather", 0},
677	{IRDMA_OP_WS_ADD_NODE, "ws_add_node", "ws_add_node", 0},
678	{IRDMA_OP_WS_MODIFY_NODE, "ws_modify_node", "ws_modify_node", 0},
679	{IRDMA_OP_WS_DELETE_NODE, "ws_delete_node", "ws_delete_node", 0},
680	{IRDMA_OP_WS_FAILOVER_START, "ws_failover_start", "ws_failover_start",
681	0},
682	{IRDMA_OP_WS_FAILOVER_COMPLETE, "ws_failover_complete",
683	"ws_failover_complete", 0},
684	{IRDMA_OP_SET_UP_MAP, "set_up_map", "set_up_map", 0},
685	{IRDMA_OP_GEN_AE, "gen_ae", "gen_ae", 0},
686	{IRDMA_OP_QUERY_RDMA_FEATURES, "query_rdma_features",
687	"query_rdma_features", 0},
688	{IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY, "alloc_local_mac_entry",
689	"alloc_local_mac_entry", 0},
690	{IRDMA_OP_ADD_LOCAL_MAC_ENTRY, "add_local_mac_entry",
691	"add_local_mac_entry", 0},
692	{IRDMA_OP_DELETE_LOCAL_MAC_ENTRY, "delete_local_mac_entry",
693	"delete_local_mac_entry", 0},
694	{IRDMA_OP_CQ_MODIFY, "cq_modify", "cq_modify", 0}
695};
696
697static const struct irdma_sw_stats_tunable_info irdma_cmcs_list[] = {
698	{0, "cm_nodes_created", "cm_nodes_created",
699	offsetof(struct irdma_cm_core, stats_nodes_created)},
700	{0, "cm_nodes_destroyed", "cm_nodes_destroyed",
701	offsetof(struct irdma_cm_core, stats_nodes_destroyed)},
702	{0, "cm_listen_created", "cm_listen_created",
703	offsetof(struct irdma_cm_core, stats_listen_created)},
704	{0, "cm_listen_destroyed", "cm_listen_destroyed",
705	offsetof(struct irdma_cm_core, stats_listen_destroyed)},
706	{0, "cm_listen_nodes_created", "cm_listen_nodes_created",
707	offsetof(struct irdma_cm_core, stats_listen_nodes_created)},
708	{0, "cm_listen_nodes_destroyed", "cm_listen_nodes_destroyed",
709	offsetof(struct irdma_cm_core, stats_listen_nodes_destroyed)},
710	{0, "cm_lpbs", "cm_lpbs", offsetof(struct irdma_cm_core, stats_lpbs)},
711	{0, "cm_accepts", "cm_accepts", offsetof(struct irdma_cm_core,
712						 stats_accepts)},
713	{0, "cm_rejects", "cm_rejects", offsetof(struct irdma_cm_core,
714						 stats_rejects)},
715	{0, "cm_connect_errs", "cm_connect_errs",
716	offsetof(struct irdma_cm_core, stats_connect_errs)},
717	{0, "cm_passive_errs", "cm_passive_errs",
718	offsetof(struct irdma_cm_core, stats_passive_errs)},
719	{0, "cm_pkt_retrans", "cm_pkt_retrans", offsetof(struct irdma_cm_core,
720							 stats_pkt_retrans)},
721	{0, "cm_backlog_drops", "cm_backlog_drops",
722	offsetof(struct irdma_cm_core, stats_backlog_drops)},
723};
724
725static const struct irdma_sw_stats_tunable_info irdma_ilqs32_list[] = {
726	{0, "ilq_avail_buf_count", "ilq_avail_buf_count",
727	offsetof(struct irdma_puda_rsrc, avail_buf_count)},
728	{0, "ilq_alloc_buf_count", "ilq_alloc_buf_count",
729	offsetof(struct irdma_puda_rsrc, alloc_buf_count)}
730};
731
732static const struct irdma_sw_stats_tunable_info irdma_ilqs_list[] = {
733	{0, "ilq_stats_buf_alloc_fail", "ilq_stats_buf_alloc_fail",
734	offsetof(struct irdma_puda_rsrc, stats_buf_alloc_fail)},
735	{0, "ilq_stats_pkt_rcvd", "ilq_stats_pkt_rcvd",
736	offsetof(struct irdma_puda_rsrc, stats_pkt_rcvd)},
737	{0, "ilq_stats_pkt_sent", "ilq_stats_pkt_sent",
738	offsetof(struct irdma_puda_rsrc, stats_pkt_sent)},
739	{0, "ilq_stats_rcvd_pkt_err", "ilq_stats_rcvd_pkt_err",
740	offsetof(struct irdma_puda_rsrc, stats_rcvd_pkt_err)},
741	{0, "ilq_stats_sent_pkt_q", "ilq_stats_sent_pkt_q",
742	offsetof(struct irdma_puda_rsrc, stats_sent_pkt_q)}
743};
744
745static const struct irdma_sw_stats_tunable_info irdma_ieqs32_list[] = {
746	{0, "ieq_avail_buf_count", "ieq_avail_buf_count",
747	offsetof(struct irdma_puda_rsrc, avail_buf_count)},
748	{0, "ieq_alloc_buf_count", "ieq_alloc_buf_count",
749	offsetof(struct irdma_puda_rsrc, alloc_buf_count)}
750};
751
752static const struct irdma_sw_stats_tunable_info irdma_ieqs_list[] = {
753	{0, "ieq_stats_buf_alloc_fail", "ieq_stats_buf_alloc_fail",
754	offsetof(struct irdma_puda_rsrc, stats_buf_alloc_fail)},
755	{0, "ieq_stats_pkt_rcvd", "ieq_stats_pkt_rcvd",
756	offsetof(struct irdma_puda_rsrc, stats_pkt_rcvd)},
757	{0, "ieq_stats_pkt_sent", "ieq_stats_pkt_sent",
758	offsetof(struct irdma_puda_rsrc, stats_pkt_sent)},
759	{0, "ieq_stats_rcvd_pkt_err", "ieq_stats_rcvd_pkt_err",
760	offsetof(struct irdma_puda_rsrc, stats_rcvd_pkt_err)},
761	{0, "ieq_stats_sent_pkt_q", "ieq_stats_sent_pkt_q",
762	offsetof(struct irdma_puda_rsrc, stats_sent_pkt_q)},
763	{0, "ieq_stats_bad_qp_id", "ieq_stats_bad_qp_id",
764	offsetof(struct irdma_puda_rsrc, stats_bad_qp_id)},
765	{0, "ieq_fpdu_processed", "ieq_fpdu_processed",
766	offsetof(struct irdma_puda_rsrc, fpdu_processed)},
767	{0, "ieq_bad_seq_num", "ieq_bad_seq_num",
768	offsetof(struct irdma_puda_rsrc, bad_seq_num)},
769	{0, "ieq_crc_err", "ieq_crc_err", offsetof(struct irdma_puda_rsrc,
770						   crc_err)},
771	{0, "ieq_pmode_count", "ieq_pmode_count",
772	offsetof(struct irdma_puda_rsrc, pmode_count)},
773	{0, "ieq_partials_handled", "ieq_partials_handled",
774	offsetof(struct irdma_puda_rsrc, partials_handled)},
775};
776
777/**
778 * irdma_dcqcn_tunables_init - create tunables for dcqcn settings
779 * @rf: RDMA PCI function
780 *
781 * Create DCQCN related sysctls for the driver.
782 * dcqcn_ena is writeable settings and applicable to next QP creation or
783 * context setting.
784 * all other settings are of RDTUN type (read on driver load) and are
785 * applicable only to CQP creation.
786 */
787void
788irdma_dcqcn_tunables_init(struct irdma_pci_f *rf)
789{
790	struct sysctl_oid_list *irdma_sysctl_oid_list;
791
792	irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
793
794	SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
795			OID_AUTO, "dcqcn_enable", CTLFLAG_RW | CTLTYPE_U8, rf, 0,
796			irdma_sysctl_dcqcn_update, "A",
797			"enables DCQCN algorithm for RoCEv2 on all ports, default=true");
798
799	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
800		      OID_AUTO, "dcqcn_cc_cfg_valid", CTLFLAG_RDTUN,
801		      &rf->dcqcn_params.cc_cfg_valid, 0,
802		      "set DCQCN parameters to be valid, default=false");
803
804	rf->dcqcn_params.min_dec_factor = 1;
805	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
806		      OID_AUTO, "dcqcn_min_dec_factor", CTLFLAG_RDTUN,
807		      &rf->dcqcn_params.min_dec_factor, 0,
808		    "set minimum percentage factor by which tx rate can be changed for CNP, Range: 1-100, default=1");
809
810	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
811		      OID_AUTO, "dcqcn_min_rate_MBps", CTLFLAG_RDTUN,
812		      &rf->dcqcn_params.min_rate, 0,
813		      "set minimum rate limit value, in MBits per second, default=0");
814
815	rf->dcqcn_params.dcqcn_f = 5;
816	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
817		      OID_AUTO, "dcqcn_F", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_f, 0,
818		      "set number of times to stay in each stage of bandwidth recovery, default=5");
819
820	rf->dcqcn_params.dcqcn_t = 0x37;
821	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
822		       OID_AUTO, "dcqcn_T", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_t, 0,
823		       "number of us to elapse before increasing the CWND in DCQCN mode, default=0x37");
824
825	rf->dcqcn_params.dcqcn_b = 0x249f0;
826	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
827		       OID_AUTO, "dcqcn_B", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_b, 0,
828		       "set number of MSS to add to the congestion window in additive increase mode, default=0x249f0");
829
830	rf->dcqcn_params.rai_factor = 1;
831	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
832		       OID_AUTO, "dcqcn_rai_factor", CTLFLAG_RDTUN,
833		       &rf->dcqcn_params.rai_factor, 0,
834		       "set number of MSS to add to the congestion window in additive increase mode, default=1");
835
836	rf->dcqcn_params.hai_factor = 5;
837	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
838		       OID_AUTO, "dcqcn_hai_factor", CTLFLAG_RDTUN,
839		       &rf->dcqcn_params.hai_factor, 0,
840		       "set number of MSS to add to the congestion window in hyperactive increase mode, default=5");
841
842	rf->dcqcn_params.rreduce_mperiod = 50;
843	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
844		       OID_AUTO, "dcqcn_rreduce_mperiod", CTLFLAG_RDTUN,
845		       &rf->dcqcn_params.rreduce_mperiod, 0,
846		       "set minimum time between 2 consecutive rate reductions for a single flow, default=50");
847}
848
849/**
850 * irdma_sysctl_settings - sysctl runtime settings init
851 * @rf: RDMA PCI function
852 */
853void
854irdma_sysctl_settings(struct irdma_pci_f *rf)
855{
856	struct sysctl_oid_list *irdma_sysctl_oid_list;
857
858	irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
859
860	SYSCTL_ADD_BOOL(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
861			OID_AUTO, "upload_context", CTLFLAG_RWTUN,
862			&irdma_upload_context, 0,
863			"allow for generating QP's upload context, default=0");
864}
865
866void
867irdma_sw_stats_tunables_init(struct irdma_pci_f *rf)
868{
869	struct sysctl_oid_list *sws_oid_list;
870	struct sysctl_ctx_list *irdma_ctx = &rf->tun_info.irdma_sysctl_ctx;
871	struct irdma_sc_dev *dev = &rf->sc_dev;
872	struct irdma_cm_core *cm_core = &rf->iwdev->cm_core;
873	struct irdma_puda_rsrc *ilq = rf->iwdev->vsi.ilq;
874	struct irdma_puda_rsrc *ieq = rf->iwdev->vsi.ieq;
875	u64 *ll_ptr;
876	u32 *l_ptr;
877	int cqp_stat_cnt = sizeof(irdma_sws_list) / sizeof(struct irdma_sw_stats_tunable_info);
878	int cmcore_stat_cnt = sizeof(irdma_cmcs_list) / sizeof(struct irdma_sw_stats_tunable_info);
879	int ilqs_stat_cnt = sizeof(irdma_ilqs_list) / sizeof(struct irdma_sw_stats_tunable_info);
880	int ilqs32_stat_cnt = sizeof(irdma_ilqs32_list) / sizeof(struct irdma_sw_stats_tunable_info);
881	int ieqs_stat_cnt = sizeof(irdma_ieqs_list) / sizeof(struct irdma_sw_stats_tunable_info);
882	int ieqs32_stat_cnt = sizeof(irdma_ieqs32_list) / sizeof(struct irdma_sw_stats_tunable_info);
883	int i;
884
885	sws_oid_list = SYSCTL_CHILDREN(rf->tun_info.sws_sysctl_tree);
886
887	for (i = 0; i < cqp_stat_cnt; ++i) {
888		SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO,
889			       irdma_sws_list[i].name, CTLFLAG_RD,
890			       &dev->cqp_cmd_stats[irdma_sws_list[i].op_type],
891			       0, irdma_sws_list[i].desc);
892	}
893	SYSCTL_ADD_PROC(irdma_ctx, sws_oid_list, OID_AUTO,
894			"req_cmds", CTLFLAG_RD | CTLTYPE_STRING,
895			dev->cqp, IRDMA_CQP_REQ_CMDS, irdma_sysctl_cqp_stats, "A",
896			"req_cmds");
897	SYSCTL_ADD_PROC(irdma_ctx, sws_oid_list, OID_AUTO,
898			"cmpl_cmds", CTLFLAG_RD | CTLTYPE_STRING,
899			dev->cqp, IRDMA_CQP_CMPL_CMDS, irdma_sysctl_cqp_stats, "A",
900			"cmpl_cmds");
901	for (i = 0; i < cmcore_stat_cnt; ++i) {
902		ll_ptr = (u64 *)((uintptr_t)cm_core + irdma_cmcs_list[i].value);
903		SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO,
904			       irdma_cmcs_list[i].name, CTLFLAG_RD, ll_ptr,
905			       0, irdma_cmcs_list[i].desc);
906	}
907	for (i = 0; ilq && i < ilqs_stat_cnt; ++i) {
908		ll_ptr = (u64 *)((uintptr_t)ilq + irdma_ilqs_list[i].value);
909		SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO,
910			       irdma_ilqs_list[i].name, CTLFLAG_RD, ll_ptr,
911			       0, irdma_ilqs_list[i].desc);
912	}
913	for (i = 0; ilq && i < ilqs32_stat_cnt; ++i) {
914		l_ptr = (u32 *)((uintptr_t)ilq + irdma_ilqs32_list[i].value);
915		SYSCTL_ADD_U32(irdma_ctx, sws_oid_list, OID_AUTO,
916			       irdma_ilqs32_list[i].name, CTLFLAG_RD, l_ptr,
917			       0, irdma_ilqs32_list[i].desc);
918	}
919	for (i = 0; ieq && i < ieqs_stat_cnt; ++i) {
920		ll_ptr = (u64 *)((uintptr_t)ieq + irdma_ieqs_list[i].value);
921		SYSCTL_ADD_U64(irdma_ctx, sws_oid_list, OID_AUTO,
922			       irdma_ieqs_list[i].name, CTLFLAG_RD, ll_ptr,
923			       0, irdma_ieqs_list[i].desc);
924	}
925	for (i = 0; ieq && i < ieqs32_stat_cnt; ++i) {
926		l_ptr = (u32 *)((uintptr_t)ieq + irdma_ieqs32_list[i].value);
927		SYSCTL_ADD_U32(irdma_ctx, sws_oid_list, OID_AUTO,
928			       irdma_ieqs32_list[i].name, CTLFLAG_RD, l_ptr,
929			       0, irdma_ieqs32_list[i].desc);
930	}
931}
932
933/**
934 * irdma_dmamap_cb - callback for bus_dmamap_load
935 */
936static void
937irdma_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
938{
939	if (error)
940		return;
941	*(bus_addr_t *) arg = segs->ds_addr;
942	return;
943}
944
945/**
946 * irdma_allocate_dma_mem - allocate dma memory
947 * @hw: pointer to hw structure
948 * @mem: structure holding memory information
949 * @size: requested size
950 * @alignment: requested alignment
951 */
952void *
953irdma_allocate_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem,
954		       u64 size, u32 alignment)
955{
956	struct irdma_dev_ctx *dev_ctx = (struct irdma_dev_ctx *)hw->dev_context;
957	device_t dev = dev_ctx->dev;
958	void *va;
959	int ret;
960
961	ret = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
962				 alignment, 0,	/* alignment, bounds */
963				 BUS_SPACE_MAXADDR,	/* lowaddr */
964				 BUS_SPACE_MAXADDR,	/* highaddr */
965				 NULL, NULL,	/* filter, filterarg */
966				 size,	/* maxsize */
967				 1,	/* nsegments */
968				 size,	/* maxsegsize */
969				 BUS_DMA_ALLOCNOW,	/* flags */
970				 NULL,	/* lockfunc */
971				 NULL,	/* lockfuncarg */
972				 &mem->tag);
973	if (ret != 0) {
974		device_printf(dev, "%s: bus_dma_tag_create failed, error %u\n",
975			      __func__, ret);
976		goto fail_0;
977	}
978	ret = bus_dmamem_alloc(mem->tag, (void **)&va,
979			       BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
980	if (ret != 0) {
981		device_printf(dev, "%s: bus_dmamem_alloc failed, error %u\n",
982			      __func__, ret);
983		goto fail_1;
984	}
985	ret = bus_dmamap_load(mem->tag, mem->map, va, size,
986			      irdma_dmamap_cb, &mem->pa, BUS_DMA_NOWAIT);
987	if (ret != 0) {
988		device_printf(dev, "%s: bus_dmamap_load failed, error %u\n",
989			      __func__, ret);
990		goto fail_2;
991	}
992	mem->nseg = 1;
993	mem->size = size;
994	bus_dmamap_sync(mem->tag, mem->map,
995			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
996
997	return va;
998fail_2:
999	bus_dmamem_free(mem->tag, va, mem->map);
1000fail_1:
1001	bus_dma_tag_destroy(mem->tag);
1002fail_0:
1003	mem->map = NULL;
1004	mem->tag = NULL;
1005
1006	return NULL;
1007}
1008
1009/**
1010 * irdma_free_dma_mem - Memory free helper fn
1011 * @hw: pointer to hw structure
1012 * @mem: ptr to mem struct to free
1013 */
1014int
1015irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem)
1016{
1017	if (!mem)
1018		return -EINVAL;
1019	bus_dmamap_sync(mem->tag, mem->map,
1020			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1021	bus_dmamap_unload(mem->tag, mem->map);
1022	if (!mem->va)
1023		return -ENOMEM;
1024	bus_dmamem_free(mem->tag, mem->va, mem->map);
1025	bus_dma_tag_destroy(mem->tag);
1026
1027	mem->va = NULL;
1028
1029	return 0;
1030}
1031
1032void
1033irdma_cleanup_dead_qps(struct irdma_sc_vsi *vsi)
1034{
1035	struct irdma_sc_qp *qp = NULL;
1036	struct irdma_qp *iwqp;
1037	struct irdma_pci_f *rf;
1038	u8 i;
1039
1040	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
1041		qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
1042		while (qp) {
1043			if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_UDA) {
1044				qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
1045				continue;
1046			}
1047			iwqp = qp->qp_uk.back_qp;
1048			rf = iwqp->iwdev->rf;
1049			irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem);
1050			irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem);
1051
1052			kfree(iwqp->kqp.sq_wrid_mem);
1053			kfree(iwqp->kqp.rq_wrid_mem);
1054			qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
1055			kfree(iwqp);
1056		}
1057	}
1058}
1059