1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2024 Chelsio Communications, Inc.
5 * Written by: John Baldwin <jhb@FreeBSD.org>
6 */
7
8#include <sys/sysctl.h>
9#include <errno.h>
10#include <fcntl.h>
11#include <stdio.h>
12#include <stdlib.h>
13#include <string.h>
14#include <unistd.h>
15#include <uuid.h>
16
17#include "libnvmf.h"
18#include "internal.h"
19
20static void
21nvmf_init_sqe(void *sqe, uint8_t opcode)
22{
23	struct nvme_command *cmd = sqe;
24
25	memset(cmd, 0, sizeof(*cmd));
26	cmd->opc = opcode;
27}
28
29static void
30nvmf_init_fabrics_sqe(void *sqe, uint8_t fctype)
31{
32	struct nvmf_capsule_cmd *cmd = sqe;
33
34	nvmf_init_sqe(sqe, NVME_OPC_FABRICS_COMMANDS);
35	cmd->fctype = fctype;
36}
37
38struct nvmf_qpair *
39nvmf_connect(struct nvmf_association *na,
40    const struct nvmf_qpair_params *params, uint16_t qid, u_int queue_size,
41    const uint8_t hostid[16], uint16_t cntlid, const char *subnqn,
42    const char *hostnqn, uint32_t kato)
43{
44	struct nvmf_fabric_connect_cmd cmd;
45	struct nvmf_fabric_connect_data data;
46	const struct nvmf_fabric_connect_rsp *rsp;
47	struct nvmf_qpair *qp;
48	struct nvmf_capsule *cc, *rc;
49	int error;
50	uint16_t sqhd, status;
51
52	qp = NULL;
53	cc = NULL;
54	rc = NULL;
55	na_clear_error(na);
56	if (na->na_controller) {
57		na_error(na, "Cannot connect on a controller");
58		goto error;
59	}
60
61	if (params->admin != (qid == 0)) {
62		na_error(na, "Admin queue must use Queue ID 0");
63		goto error;
64	}
65
66	if (qid == 0) {
67		if (queue_size < NVME_MIN_ADMIN_ENTRIES ||
68		    queue_size > NVME_MAX_ADMIN_ENTRIES) {
69			na_error(na, "Invalid queue size %u", queue_size);
70			goto error;
71		}
72	} else {
73		if (queue_size < NVME_MIN_IO_ENTRIES ||
74		    queue_size > NVME_MAX_IO_ENTRIES) {
75			na_error(na, "Invalid queue size %u", queue_size);
76			goto error;
77		}
78
79		/* KATO is only for Admin queues. */
80		if (kato != 0) {
81			na_error(na, "Cannot set KATO on I/O queues");
82			goto error;
83		}
84	}
85
86	qp = nvmf_allocate_qpair(na, params);
87	if (qp == NULL)
88		goto error;
89
90	nvmf_init_fabrics_sqe(&cmd, NVMF_FABRIC_COMMAND_CONNECT);
91	cmd.recfmt = 0;
92	cmd.qid = htole16(qid);
93
94	/* N.B. sqsize is 0's based. */
95	cmd.sqsize = htole16(queue_size - 1);
96	if (!na->na_params.sq_flow_control)
97		cmd.cattr |= NVMF_CONNECT_ATTR_DISABLE_SQ_FC;
98	cmd.kato = htole32(kato);
99
100	cc = nvmf_allocate_command(qp, &cmd);
101	if (cc == NULL) {
102		na_error(na, "Failed to allocate command capsule: %s",
103		    strerror(errno));
104		goto error;
105	}
106
107	memset(&data, 0, sizeof(data));
108	memcpy(data.hostid, hostid, sizeof(data.hostid));
109	data.cntlid = htole16(cntlid);
110	strlcpy(data.subnqn, subnqn, sizeof(data.subnqn));
111	strlcpy(data.hostnqn, hostnqn, sizeof(data.hostnqn));
112
113	error = nvmf_capsule_append_data(cc, &data, sizeof(data), true);
114	if (error != 0) {
115		na_error(na, "Failed to append data to CONNECT capsule: %s",
116		    strerror(error));
117		goto error;
118	}
119
120	error = nvmf_transmit_capsule(cc);
121	if (error != 0) {
122		na_error(na, "Failed to transmit CONNECT capsule: %s",
123		    strerror(errno));
124		goto error;
125	}
126
127	error = nvmf_receive_capsule(qp, &rc);
128	if (error != 0) {
129		na_error(na, "Failed to receive CONNECT response: %s",
130		    strerror(error));
131		goto error;
132	}
133
134	rsp = (const struct nvmf_fabric_connect_rsp *)&rc->nc_cqe;
135	status = le16toh(rc->nc_cqe.status);
136	if (status != 0) {
137		if (NVME_STATUS_GET_SC(status) == NVMF_FABRIC_SC_INVALID_PARAM)
138			na_error(na,
139			    "CONNECT invalid parameter IATTR: %#x IPO: %#x",
140			    rsp->status_code_specific.invalid.iattr,
141			    rsp->status_code_specific.invalid.ipo);
142		else
143			na_error(na, "CONNECT failed, status %#x", status);
144		goto error;
145	}
146
147	if (rc->nc_cqe.cid != cmd.cid) {
148		na_error(na, "Mismatched CID in CONNECT response");
149		goto error;
150	}
151
152	if (!rc->nc_sqhd_valid) {
153		na_error(na, "CONNECT response without valid SQHD");
154		goto error;
155	}
156
157	sqhd = le16toh(rsp->sqhd);
158	if (sqhd == 0xffff) {
159		if (na->na_params.sq_flow_control) {
160			na_error(na, "Controller disabled SQ flow control");
161			goto error;
162		}
163		qp->nq_flow_control = false;
164	} else {
165		qp->nq_flow_control = true;
166		qp->nq_sqhd = sqhd;
167		qp->nq_sqtail = sqhd;
168	}
169
170	if (rsp->status_code_specific.success.authreq) {
171		na_error(na, "CONNECT response requests authentication\n");
172		goto error;
173	}
174
175	qp->nq_qsize = queue_size;
176	qp->nq_cntlid = le16toh(rsp->status_code_specific.success.cntlid);
177	qp->nq_kato = kato;
178	/* XXX: Save qid in qp? */
179	return (qp);
180
181error:
182	if (rc != NULL)
183		nvmf_free_capsule(rc);
184	if (cc != NULL)
185		nvmf_free_capsule(cc);
186	if (qp != NULL)
187		nvmf_free_qpair(qp);
188	return (NULL);
189}
190
191uint16_t
192nvmf_cntlid(struct nvmf_qpair *qp)
193{
194	return (qp->nq_cntlid);
195}
196
197int
198nvmf_host_transmit_command(struct nvmf_capsule *nc)
199{
200	struct nvmf_qpair *qp = nc->nc_qpair;
201	uint16_t new_sqtail;
202	int error;
203
204	/* Fail if the queue is full. */
205	new_sqtail = (qp->nq_sqtail + 1) % qp->nq_qsize;
206	if (new_sqtail == qp->nq_sqhd)
207		return (EBUSY);
208
209	nc->nc_sqe.cid = htole16(qp->nq_cid);
210
211	/* 4.2 Skip CID of 0xFFFF. */
212	qp->nq_cid++;
213	if (qp->nq_cid == 0xFFFF)
214		qp->nq_cid = 0;
215
216	error = nvmf_transmit_capsule(nc);
217	if (error != 0)
218		return (error);
219
220	qp->nq_sqtail = new_sqtail;
221	return (0);
222}
223
224/* Receive a single capsule and update SQ FC accounting. */
225static int
226nvmf_host_receive_capsule(struct nvmf_qpair *qp, struct nvmf_capsule **ncp)
227{
228	struct nvmf_capsule *nc;
229	int error;
230
231	/* If the SQ is empty, there is no response to wait for. */
232	if (qp->nq_sqhd == qp->nq_sqtail)
233		return (EWOULDBLOCK);
234
235	error = nvmf_receive_capsule(qp, &nc);
236	if (error != 0)
237		return (error);
238
239	if (qp->nq_flow_control) {
240		if (nc->nc_sqhd_valid)
241			qp->nq_sqhd = le16toh(nc->nc_cqe.sqhd);
242	} else {
243		/*
244		 * If SQ FC is disabled, just advance the head for
245		 * each response capsule received so that we track the
246		 * number of outstanding commands.
247		 */
248		qp->nq_sqhd = (qp->nq_sqhd + 1) % qp->nq_qsize;
249	}
250	*ncp = nc;
251	return (0);
252}
253
254int
255nvmf_host_receive_response(struct nvmf_qpair *qp, struct nvmf_capsule **ncp)
256{
257	struct nvmf_capsule *nc;
258
259	/* Return the oldest previously received response. */
260	if (!TAILQ_EMPTY(&qp->nq_rx_capsules)) {
261		nc = TAILQ_FIRST(&qp->nq_rx_capsules);
262		TAILQ_REMOVE(&qp->nq_rx_capsules, nc, nc_link);
263		*ncp = nc;
264		return (0);
265	}
266
267	return (nvmf_host_receive_capsule(qp, ncp));
268}
269
270int
271nvmf_host_wait_for_response(struct nvmf_capsule *cc,
272    struct nvmf_capsule **rcp)
273{
274	struct nvmf_qpair *qp = cc->nc_qpair;
275	struct nvmf_capsule *rc;
276	int error;
277
278	/* Check if a response was already received. */
279	TAILQ_FOREACH(rc, &qp->nq_rx_capsules, nc_link) {
280		if (rc->nc_cqe.cid == cc->nc_sqe.cid) {
281			TAILQ_REMOVE(&qp->nq_rx_capsules, rc, nc_link);
282			*rcp = rc;
283			return (0);
284		}
285	}
286
287	/* Wait for a response. */
288	for (;;) {
289		error = nvmf_host_receive_capsule(qp, &rc);
290		if (error != 0)
291			return (error);
292
293		if (rc->nc_cqe.cid != cc->nc_sqe.cid) {
294			TAILQ_INSERT_TAIL(&qp->nq_rx_capsules, rc, nc_link);
295			continue;
296		}
297
298		*rcp = rc;
299		return (0);
300	}
301}
302
303struct nvmf_capsule *
304nvmf_keepalive(struct nvmf_qpair *qp)
305{
306	struct nvme_command cmd;
307
308	if (!qp->nq_admin) {
309		errno = EINVAL;
310		return (NULL);
311	}
312
313	nvmf_init_sqe(&cmd, NVME_OPC_KEEP_ALIVE);
314
315	return (nvmf_allocate_command(qp, &cmd));
316}
317
318static struct nvmf_capsule *
319nvmf_get_property(struct nvmf_qpair *qp, uint32_t offset, uint8_t size)
320{
321	struct nvmf_fabric_prop_get_cmd cmd;
322
323	nvmf_init_fabrics_sqe(&cmd, NVMF_FABRIC_COMMAND_PROPERTY_GET);
324	switch (size) {
325	case 4:
326		cmd.attrib.size = NVMF_PROP_SIZE_4;
327		break;
328	case 8:
329		cmd.attrib.size = NVMF_PROP_SIZE_8;
330		break;
331	default:
332		errno = EINVAL;
333		return (NULL);
334	}
335	cmd.ofst = htole32(offset);
336
337	return (nvmf_allocate_command(qp, &cmd));
338}
339
340int
341nvmf_read_property(struct nvmf_qpair *qp, uint32_t offset, uint8_t size,
342    uint64_t *value)
343{
344	struct nvmf_capsule *cc, *rc;
345	const struct nvmf_fabric_prop_get_rsp *rsp;
346	uint16_t status;
347	int error;
348
349	if (!qp->nq_admin)
350		return (EINVAL);
351
352	cc = nvmf_get_property(qp, offset, size);
353	if (cc == NULL)
354		return (errno);
355
356	error = nvmf_host_transmit_command(cc);
357	if (error != 0) {
358		nvmf_free_capsule(cc);
359		return (error);
360	}
361
362	error = nvmf_host_wait_for_response(cc, &rc);
363	nvmf_free_capsule(cc);
364	if (error != 0)
365		return (error);
366
367	rsp = (const struct nvmf_fabric_prop_get_rsp *)&rc->nc_cqe;
368	status = le16toh(rc->nc_cqe.status);
369	if (status != 0) {
370		printf("NVMF: PROPERTY_GET failed, status %#x\n", status);
371		nvmf_free_capsule(rc);
372		return (EIO);
373	}
374
375	if (size == 8)
376		*value = le64toh(rsp->value.u64);
377	else
378		*value = le32toh(rsp->value.u32.low);
379	nvmf_free_capsule(rc);
380	return (0);
381}
382
383static struct nvmf_capsule *
384nvmf_set_property(struct nvmf_qpair *qp, uint32_t offset, uint8_t size,
385    uint64_t value)
386{
387	struct nvmf_fabric_prop_set_cmd cmd;
388
389	nvmf_init_fabrics_sqe(&cmd, NVMF_FABRIC_COMMAND_PROPERTY_SET);
390	switch (size) {
391	case 4:
392		cmd.attrib.size = NVMF_PROP_SIZE_4;
393		cmd.value.u32.low = htole32(value);
394		break;
395	case 8:
396		cmd.attrib.size = NVMF_PROP_SIZE_8;
397		cmd.value.u64 = htole64(value);
398		break;
399	default:
400		errno = EINVAL;
401		return (NULL);
402	}
403	cmd.ofst = htole32(offset);
404
405	return (nvmf_allocate_command(qp, &cmd));
406}
407
408int
409nvmf_write_property(struct nvmf_qpair *qp, uint32_t offset, uint8_t size,
410    uint64_t value)
411{
412	struct nvmf_capsule *cc, *rc;
413	uint16_t status;
414	int error;
415
416	if (!qp->nq_admin)
417		return (EINVAL);
418
419	cc = nvmf_set_property(qp, offset, size, value);
420	if (cc == NULL)
421		return (errno);
422
423	error = nvmf_host_transmit_command(cc);
424	if (error != 0) {
425		nvmf_free_capsule(cc);
426		return (error);
427	}
428
429	error = nvmf_host_wait_for_response(cc, &rc);
430	nvmf_free_capsule(cc);
431	if (error != 0)
432		return (error);
433
434	status = le16toh(rc->nc_cqe.status);
435	if (status != 0) {
436		printf("NVMF: PROPERTY_SET failed, status %#x\n", status);
437		nvmf_free_capsule(rc);
438		return (EIO);
439	}
440
441	nvmf_free_capsule(rc);
442	return (0);
443}
444
445int
446nvmf_hostid_from_hostuuid(uint8_t hostid[16])
447{
448	char hostuuid_str[64];
449	uuid_t hostuuid;
450	size_t len;
451	uint32_t status;
452
453	len = sizeof(hostuuid_str);
454	if (sysctlbyname("kern.hostuuid", hostuuid_str, &len, NULL, 0) != 0)
455		return (errno);
456
457	uuid_from_string(hostuuid_str, &hostuuid, &status);
458	switch (status) {
459	case uuid_s_ok:
460		break;
461	case uuid_s_no_memory:
462		return (ENOMEM);
463	default:
464		return (EINVAL);
465	}
466
467	uuid_enc_le(hostid, &hostuuid);
468	return (0);
469}
470
471int
472nvmf_nqn_from_hostuuid(char nqn[NVMF_NQN_MAX_LEN])
473{
474	char hostuuid_str[64];
475	size_t len;
476
477	len = sizeof(hostuuid_str);
478	if (sysctlbyname("kern.hostuuid", hostuuid_str, &len, NULL, 0) != 0)
479		return (errno);
480
481	strlcpy(nqn, NVMF_NQN_UUID_PRE, NVMF_NQN_MAX_LEN);
482	strlcat(nqn, hostuuid_str, NVMF_NQN_MAX_LEN);
483	return (0);
484}
485
486int
487nvmf_host_identify_controller(struct nvmf_qpair *qp,
488    struct nvme_controller_data *cdata)
489{
490	struct nvme_command cmd;
491	struct nvmf_capsule *cc, *rc;
492	int error;
493	uint16_t status;
494
495	if (!qp->nq_admin)
496		return (EINVAL);
497
498	nvmf_init_sqe(&cmd, NVME_OPC_IDENTIFY);
499
500	/* 5.15.1 Use CNS of 0x01 for controller data. */
501	cmd.cdw10 = htole32(1);
502
503	cc = nvmf_allocate_command(qp, &cmd);
504	if (cc == NULL)
505		return (errno);
506
507	error = nvmf_capsule_append_data(cc, cdata, sizeof(*cdata), false);
508	if (error != 0) {
509		nvmf_free_capsule(cc);
510		return (error);
511	}
512
513	error = nvmf_host_transmit_command(cc);
514	if (error != 0) {
515		nvmf_free_capsule(cc);
516		return (error);
517	}
518
519	error = nvmf_host_wait_for_response(cc, &rc);
520	nvmf_free_capsule(cc);
521	if (error != 0)
522		return (error);
523
524	status = le16toh(rc->nc_cqe.status);
525	if (status != 0) {
526		printf("NVMF: IDENTIFY failed, status %#x\n", status);
527		nvmf_free_capsule(rc);
528		return (EIO);
529	}
530
531	nvmf_free_capsule(rc);
532	return (0);
533}
534
535int
536nvmf_host_identify_namespace(struct nvmf_qpair *qp, uint32_t nsid,
537    struct nvme_namespace_data *nsdata)
538{
539	struct nvme_command cmd;
540	struct nvmf_capsule *cc, *rc;
541	int error;
542	uint16_t status;
543
544	if (!qp->nq_admin)
545		return (EINVAL);
546
547	nvmf_init_sqe(&cmd, NVME_OPC_IDENTIFY);
548
549	/* 5.15.1 Use CNS of 0x00 for namespace data. */
550	cmd.cdw10 = htole32(0);
551	cmd.nsid = htole32(nsid);
552
553	cc = nvmf_allocate_command(qp, &cmd);
554	if (cc == NULL)
555		return (errno);
556
557	error = nvmf_capsule_append_data(cc, nsdata, sizeof(*nsdata), false);
558	if (error != 0) {
559		nvmf_free_capsule(cc);
560		return (error);
561	}
562
563	error = nvmf_host_transmit_command(cc);
564	if (error != 0) {
565		nvmf_free_capsule(cc);
566		return (error);
567	}
568
569	error = nvmf_host_wait_for_response(cc, &rc);
570	nvmf_free_capsule(cc);
571	if (error != 0)
572		return (error);
573
574	status = le16toh(rc->nc_cqe.status);
575	if (status != 0) {
576		printf("NVMF: IDENTIFY failed, status %#x\n", status);
577		nvmf_free_capsule(rc);
578		return (EIO);
579	}
580
581	nvmf_free_capsule(rc);
582	return (0);
583}
584
585static int
586nvmf_get_discovery_log_page(struct nvmf_qpair *qp, uint64_t offset, void *buf,
587    size_t len)
588{
589	struct nvme_command cmd;
590	struct nvmf_capsule *cc, *rc;
591	size_t numd;
592	int error;
593	uint16_t status;
594
595	if (len % 4 != 0 || len == 0 || offset % 4 != 0)
596		return (EINVAL);
597
598	numd = (len / 4) - 1;
599	nvmf_init_sqe(&cmd, NVME_OPC_GET_LOG_PAGE);
600	cmd.cdw10 = htole32(numd << 16 | NVME_LOG_DISCOVERY);
601	cmd.cdw11 = htole32(numd >> 16);
602	cmd.cdw12 = htole32(offset);
603	cmd.cdw13 = htole32(offset >> 32);
604
605	cc = nvmf_allocate_command(qp, &cmd);
606	if (cc == NULL)
607		return (errno);
608
609	error = nvmf_capsule_append_data(cc, buf, len, false);
610	if (error != 0) {
611		nvmf_free_capsule(cc);
612		return (error);
613	}
614
615	error = nvmf_host_transmit_command(cc);
616	if (error != 0) {
617		nvmf_free_capsule(cc);
618		return (error);
619	}
620
621	error = nvmf_host_wait_for_response(cc, &rc);
622	nvmf_free_capsule(cc);
623	if (error != 0)
624		return (error);
625
626	status = le16toh(rc->nc_cqe.status);
627	if (NVMEV(NVME_STATUS_SC, status) ==
628	    NVMF_FABRIC_SC_LOG_RESTART_DISCOVERY) {
629		nvmf_free_capsule(rc);
630		return (EAGAIN);
631	}
632	if (status != 0) {
633		printf("NVMF: GET_LOG_PAGE failed, status %#x\n", status);
634		nvmf_free_capsule(rc);
635		return (EIO);
636	}
637
638	nvmf_free_capsule(rc);
639	return (0);
640}
641
642int
643nvmf_host_fetch_discovery_log_page(struct nvmf_qpair *qp,
644    struct nvme_discovery_log **logp)
645{
646	struct nvme_discovery_log hdr, *log;
647	size_t payload_len;
648	int error;
649
650	if (!qp->nq_admin)
651		return (EINVAL);
652
653	log = NULL;
654	for (;;) {
655		error = nvmf_get_discovery_log_page(qp, 0, &hdr, sizeof(hdr));
656		if (error != 0) {
657			free(log);
658			return (error);
659		}
660		nvme_discovery_log_swapbytes(&hdr);
661
662		if (hdr.recfmt != 0) {
663			printf("NVMF: Unsupported discovery log format: %d\n",
664			    hdr.recfmt);
665			free(log);
666			return (EINVAL);
667		}
668
669		if (hdr.numrec > 1024) {
670			printf("NVMF: Too many discovery log entries: %ju\n",
671			    (uintmax_t)hdr.numrec);
672			free(log);
673			return (EFBIG);
674		}
675
676		payload_len = sizeof(log->entries[0]) * hdr.numrec;
677		log = reallocf(log, sizeof(*log) + payload_len);
678		if (log == NULL)
679			return (ENOMEM);
680		*log = hdr;
681		if (hdr.numrec == 0)
682			break;
683
684		error = nvmf_get_discovery_log_page(qp, sizeof(hdr),
685		    log->entries, payload_len);
686		if (error == EAGAIN)
687			continue;
688		if (error != 0) {
689			free(log);
690			return (error);
691		}
692
693		/* Re-read the header and check the generation count. */
694		error = nvmf_get_discovery_log_page(qp, 0, &hdr, sizeof(hdr));
695		if (error != 0) {
696			free(log);
697			return (error);
698		}
699		nvme_discovery_log_swapbytes(&hdr);
700
701		if (log->genctr != hdr.genctr)
702			continue;
703
704		for (u_int i = 0; i < log->numrec; i++)
705			nvme_discovery_log_entry_swapbytes(&log->entries[i]);
706		break;
707	}
708	*logp = log;
709	return (0);
710}
711
712int
713nvmf_host_request_queues(struct nvmf_qpair *qp, u_int requested, u_int *actual)
714{
715	struct nvme_command cmd;
716	struct nvmf_capsule *cc, *rc;
717	int error;
718	uint16_t status;
719
720	if (!qp->nq_admin || requested < 1 || requested > 65535)
721		return (EINVAL);
722
723	/* The number of queues is 0's based. */
724	requested--;
725
726	nvmf_init_sqe(&cmd, NVME_OPC_SET_FEATURES);
727	cmd.cdw10 = htole32(NVME_FEAT_NUMBER_OF_QUEUES);
728
729	/* Same number of completion and submission queues. */
730	cmd.cdw11 = htole32((requested << 16) | requested);
731
732	cc = nvmf_allocate_command(qp, &cmd);
733	if (cc == NULL)
734		return (errno);
735
736	error = nvmf_host_transmit_command(cc);
737	if (error != 0) {
738		nvmf_free_capsule(cc);
739		return (error);
740	}
741
742	error = nvmf_host_wait_for_response(cc, &rc);
743	nvmf_free_capsule(cc);
744	if (error != 0)
745		return (error);
746
747	status = le16toh(rc->nc_cqe.status);
748	if (status != 0) {
749		printf("NVMF: SET_FEATURES failed, status %#x\n", status);
750		nvmf_free_capsule(rc);
751		return (EIO);
752	}
753
754	*actual = (le32toh(rc->nc_cqe.cdw0) & 0xffff) + 1;
755	nvmf_free_capsule(rc);
756	return (0);
757}
758
759static bool
760is_queue_pair_idle(struct nvmf_qpair *qp)
761{
762	if (qp->nq_sqhd != qp->nq_sqtail)
763		return (false);
764	if (!TAILQ_EMPTY(&qp->nq_rx_capsules))
765		return (false);
766	return (true);
767}
768
769static int
770prepare_queues_for_handoff(struct nvmf_handoff_host *hh,
771    struct nvmf_qpair *admin_qp, u_int num_queues,
772    struct nvmf_qpair **io_queues, const struct nvme_controller_data *cdata)
773{
774	struct nvmf_handoff_qpair_params *io;
775	u_int i;
776	int error;
777
778	memset(hh, 0, sizeof(*hh));
779
780	/* All queue pairs must be idle. */
781	if (!is_queue_pair_idle(admin_qp))
782		return (EBUSY);
783	for (i = 0; i < num_queues; i++) {
784		if (!is_queue_pair_idle(io_queues[i]))
785			return (EBUSY);
786	}
787
788	/* First, the admin queue. */
789	hh->trtype = admin_qp->nq_association->na_trtype;
790	hh->kato = admin_qp->nq_kato;
791	error = nvmf_kernel_handoff_params(admin_qp, &hh->admin);
792	if (error)
793		return (error);
794
795	/* Next, the I/O queues. */
796	hh->num_io_queues = num_queues;
797	io = calloc(num_queues, sizeof(*io));
798	for (i = 0; i < num_queues; i++) {
799		error = nvmf_kernel_handoff_params(io_queues[i], &io[i]);
800		if (error) {
801			free(io);
802			return (error);
803		}
804	}
805
806	hh->io = io;
807	hh->cdata = cdata;
808	return (0);
809}
810
811int
812nvmf_handoff_host(struct nvmf_qpair *admin_qp, u_int num_queues,
813    struct nvmf_qpair **io_queues, const struct nvme_controller_data *cdata)
814{
815	struct nvmf_handoff_host hh;
816	u_int i;
817	int error, fd;
818
819	fd = open("/dev/nvmf", O_RDWR);
820	if (fd == -1) {
821		error = errno;
822		goto out;
823	}
824
825	error = prepare_queues_for_handoff(&hh, admin_qp, num_queues, io_queues,
826	    cdata);
827	if (error != 0)
828		goto out;
829
830	if (ioctl(fd, NVMF_HANDOFF_HOST, &hh) == -1)
831		error = errno;
832	free(hh.io);
833
834out:
835	if (fd >= 0)
836		close(fd);
837	for (i = 0; i < num_queues; i++)
838		(void)nvmf_free_qpair(io_queues[i]);
839	(void)nvmf_free_qpair(admin_qp);
840	return (error);
841}
842
843int
844nvmf_disconnect_host(const char *host)
845{
846	int error, fd;
847
848	error = 0;
849	fd = open("/dev/nvmf", O_RDWR);
850	if (fd == -1) {
851		error = errno;
852		goto out;
853	}
854
855	if (ioctl(fd, NVMF_DISCONNECT_HOST, &host) == -1)
856		error = errno;
857
858out:
859	if (fd >= 0)
860		close(fd);
861	return (error);
862}
863
864int
865nvmf_disconnect_all(void)
866{
867	int error, fd;
868
869	error = 0;
870	fd = open("/dev/nvmf", O_RDWR);
871	if (fd == -1) {
872		error = errno;
873		goto out;
874	}
875
876	if (ioctl(fd, NVMF_DISCONNECT_ALL) == -1)
877		error = errno;
878
879out:
880	if (fd >= 0)
881		close(fd);
882	return (error);
883}
884
885int
886nvmf_reconnect_params(int fd, struct nvmf_reconnect_params *rparams)
887{
888	if (ioctl(fd, NVMF_RECONNECT_PARAMS, rparams) == -1)
889		return (errno);
890	return (0);
891}
892
893int
894nvmf_reconnect_host(int fd, struct nvmf_qpair *admin_qp, u_int num_queues,
895    struct nvmf_qpair **io_queues, const struct nvme_controller_data *cdata)
896{
897	struct nvmf_handoff_host hh;
898	u_int i;
899	int error;
900
901	error = prepare_queues_for_handoff(&hh, admin_qp, num_queues, io_queues,
902	    cdata);
903	if (error != 0)
904		goto out;
905
906	if (ioctl(fd, NVMF_RECONNECT_HOST, &hh) == -1)
907		error = errno;
908	free(hh.io);
909
910out:
911	for (i = 0; i < num_queues; i++)
912		(void)nvmf_free_qpair(io_queues[i]);
913	(void)nvmf_free_qpair(admin_qp);
914	return (error);
915}
916