1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright (c) Intel Corporation. All rights reserved.
5 *   Copyright (c) 2017, Western Digital Corporation or its affiliates.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Intel Corporation nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include "nvme_internal.h"
35
36static inline struct nvme_ns_data *nvme_ns_get_data(struct nvme_ns *ns)
37{
38	return &ns->ctrlr->nsdata[ns->id - 1];
39}
40
41static int nvme_ns_identify_update(struct nvme_ns *ns)
42{
43	struct nvme_ctrlr *ctrlr = ns->ctrlr;
44	struct nvme_ns_data *nsdata = nvme_ns_get_data(ns);
45	uint32_t sector_size;
46	int ret;
47
48	ret = nvme_admin_identify_ns(ctrlr, ns->id, nsdata);
49	if (ret != 0) {
50		nvme_err("nvme_identify_namespace failed\n");
51		return ret;
52	}
53
54	sector_size = 1 << nsdata->lbaf[nsdata->flbas.format].lbads;
55
56	ns->sector_size = sector_size;
57	ns->sectors_per_max_io = ctrlr->max_xfer_size / sector_size;
58	ns->sectors_per_stripe = ns->stripe_size / sector_size;
59
60	ns->flags = 0x0000;
61
62	if (ctrlr->cdata.oncs.dsm)
63		ns->flags |= NVME_NS_DEALLOCATE_SUPPORTED;
64
65	if (ctrlr->cdata.vwc.present)
66		ns->flags |= NVME_NS_FLUSH_SUPPORTED;
67
68	if (ctrlr->cdata.oncs.write_zeroes)
69		ns->flags |= NVME_NS_WRITE_ZEROES_SUPPORTED;
70
71	if (nsdata->nsrescap.raw)
72		ns->flags |= NVME_NS_RESERVATION_SUPPORTED;
73
74	ns->md_size = nsdata->lbaf[nsdata->flbas.format].ms;
75	ns->pi_type = NVME_FMT_NVM_PROTECTION_DISABLE;
76
77	if (nsdata->lbaf[nsdata->flbas.format].ms && nsdata->dps.pit) {
78		ns->flags |= NVME_NS_DPS_PI_SUPPORTED;
79		ns->pi_type = nsdata->dps.pit;
80		if (nsdata->flbas.extended)
81			ns->flags |= NVME_NS_EXTENDED_LBA_SUPPORTED;
82	}
83
84	return 0;
85}
86
87/*
88 * Initialize a namespace.
89 */
90int nvme_ns_construct(struct nvme_ctrlr *ctrlr, struct nvme_ns *ns,
91		      unsigned int id)
92{
93	uint32_t pci_devid;
94
95	ns->ctrlr = ctrlr;
96	ns->id = id;
97	ns->stripe_size = 0;
98
99	nvme_pcicfg_read32(ctrlr->pci_dev, &pci_devid, 0);
100	if (pci_devid == INTEL_DC_P3X00_DEVID && ctrlr->cdata.vs[3] != 0)
101		ns->stripe_size = (1 << ctrlr->cdata.vs[3])
102			* ctrlr->min_page_size;
103
104	return nvme_ns_identify_update(ns);
105}
106
107/*
108 * Open a namespace.
109 */
110struct nvme_ns *nvme_ns_open(struct nvme_ctrlr *ctrlr, unsigned int ns_id)
111{
112	struct nvme_ns *ns = NULL;
113
114	pthread_mutex_lock(&ctrlr->lock);
115
116	if (ns_id >= 1 && ns_id <= ctrlr->nr_ns) {
117		ns = &ctrlr->ns[ns_id - 1];
118		ns->open_count++;
119	}
120
121	pthread_mutex_unlock(&ctrlr->lock);
122
123	return ns;
124}
125
126/*
127 * Get the controller of an open name space and lock it,
128 * making sure in the process that the ns handle is valid.
129 */
130static struct nvme_ctrlr *nvme_ns_ctrlr_lock(struct nvme_ns *ns)
131{
132	struct nvme_ctrlr *ctrlr;
133
134	if (!ns)
135		return NULL;
136
137	ctrlr = ns->ctrlr;
138	if (ns->id < 1 ||
139	    ns->id > ctrlr->nr_ns ||
140	    ns != &ctrlr->ns[ns->id - 1])
141		return NULL;
142
143	pthread_mutex_lock(&ctrlr->lock);
144
145	/*
146	 * Between the check and lock, the ns may have gone away.
147	 * So check again, and make sure that the name space is open.
148	 */
149	if (ns->id > ctrlr->nr_ns ||
150	    ns != &ctrlr->ns[ns->id - 1] ||
151	    ns->open_count == 0) {
152		pthread_mutex_unlock(&ctrlr->lock);
153		return NULL;
154	}
155
156	return ctrlr;
157}
158
159/*
160 * Close an open namespace.
161 */
162int nvme_ns_close(struct nvme_ns *ns)
163{
164	struct nvme_ctrlr *ctrlr;
165
166	ctrlr = nvme_ns_ctrlr_lock(ns);
167	if (!ctrlr) {
168		nvme_err("Invalid name space handle\n");
169		return EINVAL;
170	}
171
172	ns->open_count--;
173
174	pthread_mutex_unlock(&ctrlr->lock);
175
176	return 0;
177}
178
179/*
180 * Get namespace information
181 */
182int nvme_ns_stat(struct nvme_ns *ns, struct nvme_ns_stat *ns_stat)
183{
184	struct nvme_ctrlr *ctrlr;
185
186	ctrlr = nvme_ns_ctrlr_lock(ns);
187	if (!ctrlr) {
188		nvme_err("Invalid name space handle\n");
189		return EINVAL;
190	}
191
192	ns_stat->id = ns->id;
193	ns_stat->sector_size = ns->sector_size;
194	ns_stat->sectors = nvme_ns_get_data(ns)->nsze;
195	ns_stat->flags = ns->flags;
196	ns_stat->pi_type = ns->pi_type;
197	ns_stat->md_size = ns->md_size;
198
199	pthread_mutex_unlock(&ctrlr->lock);
200
201	return 0;
202}
203
204/*
205 * Get namespace data
206 */
207int nvme_ns_data(struct nvme_ns *ns, struct nvme_ns_data *nsdata)
208{
209	struct nvme_ctrlr *ctrlr;
210
211	ctrlr = nvme_ns_ctrlr_lock(ns);
212	if (!ctrlr) {
213		nvme_err("Invalid name space handle\n");
214		return EINVAL;
215	}
216
217	memcpy(nsdata, nvme_ns_get_data(ns), sizeof(struct nvme_ns_data));
218
219	pthread_mutex_unlock(&ctrlr->lock);
220
221	return 0;
222}
223
224static struct nvme_request *_nvme_ns_rw(struct nvme_ns *ns,
225			struct nvme_qpair *qpair,
226			const struct nvme_payload *payload, uint64_t lba,
227			uint32_t lba_count, nvme_cmd_cb cb_fn,
228			void *cb_arg, uint32_t opc, uint32_t io_flags,
229			uint16_t apptag_mask, uint16_t apptag);
230
231static struct nvme_request *
232_nvme_ns_split_request(struct nvme_ns *ns,
233		       struct nvme_qpair *qpair,
234		       const struct nvme_payload *payload,
235		       uint64_t lba, uint32_t lba_count,
236		       nvme_cmd_cb cb_fn, void *cb_arg,
237		       uint32_t opc,
238		       uint32_t io_flags,
239		       struct nvme_request *req,
240		       uint32_t sectors_per_max_io,
241		       uint32_t sector_mask,
242		       uint16_t apptag_mask,
243		       uint16_t apptag)
244{
245	uint32_t sector_size = ns->sector_size;
246	uint32_t md_size = ns->md_size;
247	uint32_t remaining_lba_count = lba_count;
248	uint32_t offset = 0;
249	uint32_t md_offset = 0;
250	struct nvme_request *child, *tmp;
251
252	if (ns->flags & NVME_NS_DPS_PI_SUPPORTED) {
253		/* for extended LBA only */
254		if ((ns->flags & NVME_NS_EXTENDED_LBA_SUPPORTED)
255		    && !(io_flags & NVME_IO_FLAGS_PRACT))
256			sector_size += ns->md_size;
257	}
258
259	while (remaining_lba_count > 0) {
260
261		lba_count = sectors_per_max_io - (lba & sector_mask);
262		lba_count = nvme_min(remaining_lba_count, lba_count);
263
264		child = _nvme_ns_rw(ns, qpair, payload, lba, lba_count, cb_fn,
265				    cb_arg, opc, io_flags, apptag_mask, apptag);
266		if (child == NULL) {
267			if (req->child_reqs) {
268				/* free all child nvme_request  */
269				TAILQ_FOREACH_SAFE(child, &req->children,
270						   child_tailq, tmp) {
271					nvme_request_remove_child(req, child);
272					nvme_request_free(child);
273				}
274			}
275			return NULL;
276		}
277
278		child->payload_offset = offset;
279
280		/* for separate metadata buffer only */
281		if (payload->md)
282			child->md_offset = md_offset;
283
284		nvme_request_add_child(req, child);
285
286		remaining_lba_count -= lba_count;
287		lba += lba_count;
288		offset += lba_count * sector_size;
289		md_offset += lba_count * md_size;
290
291	}
292
293	return req;
294}
295
296static struct nvme_request *_nvme_ns_rw(struct nvme_ns *ns,
297					struct nvme_qpair *qpair,
298					const struct nvme_payload *payload,
299					uint64_t lba, uint32_t lba_count,
300					nvme_cmd_cb cb_fn, void *cb_arg,
301					uint32_t opc,
302					uint32_t io_flags,
303					uint16_t apptag_mask,
304					uint16_t apptag)
305{
306	struct nvme_request *req;
307	struct nvme_cmd	*cmd;
308	uint64_t *tmp_lba;
309	uint32_t sector_size;
310	uint32_t sectors_per_max_io;
311	uint32_t sectors_per_stripe;
312
313	/* The bottom 16 bits must be empty */
314	if (io_flags & 0xFFFF)
315		return NULL;
316
317	sector_size = ns->sector_size;
318	sectors_per_max_io = ns->sectors_per_max_io;
319	sectors_per_stripe = ns->sectors_per_stripe;
320
321	if (ns->flags & NVME_NS_DPS_PI_SUPPORTED)
322		/* for extended LBA only */
323		if ((ns->flags & NVME_NS_EXTENDED_LBA_SUPPORTED) &&
324		    !(io_flags & NVME_IO_FLAGS_PRACT))
325			sector_size += ns->md_size;
326
327	req = nvme_request_allocate(qpair, payload,
328				    lba_count * sector_size, cb_fn, cb_arg);
329	if (req == NULL)
330		return NULL;
331
332	/*
333	 * Intel DC P3*00 NVMe controllers benefit from driver-assisted striping.
334	 * If this controller defines a stripe boundary and this I/O spans
335	 * a stripe boundary, split the request into multiple requests and
336	 * submit each separately to hardware.
337	 */
338	if (sectors_per_stripe > 0 &&
339	    (((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe))
340		return _nvme_ns_split_request(ns, qpair, payload, lba,
341					      lba_count, cb_fn, cb_arg, opc,
342					      io_flags, req, sectors_per_stripe,
343					      sectors_per_stripe - 1,
344					      apptag_mask, apptag);
345
346	if (lba_count > sectors_per_max_io)
347		return _nvme_ns_split_request(ns, qpair, payload, lba,
348					      lba_count, cb_fn, cb_arg, opc,
349					      io_flags, req, sectors_per_max_io,
350					      0, apptag_mask, apptag);
351
352	cmd = &req->cmd;
353	cmd->opc = opc;
354	cmd->nsid = ns->id;
355
356	tmp_lba = (uint64_t *)&cmd->cdw10;
357	*tmp_lba = lba;
358
359	if (ns->flags & NVME_NS_DPS_PI_SUPPORTED) {
360		switch (ns->pi_type) {
361		case NVME_FMT_NVM_PROTECTION_TYPE1:
362		case NVME_FMT_NVM_PROTECTION_TYPE2:
363			cmd->cdw14 = (uint32_t)lba;
364			break;
365		}
366	}
367
368	cmd->cdw12 = lba_count - 1;
369	cmd->cdw12 |= io_flags;
370
371	cmd->cdw15 = apptag_mask;
372	cmd->cdw15 = (cmd->cdw15 << 16 | apptag);
373
374	return req;
375}
376
377int nvme_ns_read(struct nvme_ns *ns, struct nvme_qpair *qpair,
378		 void *buffer,
379		 uint64_t lba, uint32_t lba_count,
380		 nvme_cmd_cb cb_fn, void *cb_arg,
381		 unsigned int io_flags)
382{
383	struct nvme_request *req;
384	struct nvme_payload payload;
385
386	payload.type = NVME_PAYLOAD_TYPE_CONTIG;
387	payload.u.contig = buffer;
388	payload.md = NULL;
389
390	req = _nvme_ns_rw(ns, qpair, &payload, lba, lba_count, cb_fn, cb_arg,
391			  NVME_OPC_READ, io_flags, 0, 0);
392	if (req != NULL)
393		return nvme_qpair_submit_request(qpair, req);
394
395	return ENOMEM;
396}
397
398int nvme_ns_read_with_md(struct nvme_ns *ns, struct nvme_qpair *qpair,
399			 void *buffer, void *metadata,
400			 uint64_t lba, uint32_t lba_count,
401			 nvme_cmd_cb cb_fn, void *cb_arg,
402			 unsigned int io_flags,
403			 uint16_t apptag_mask, uint16_t apptag)
404{
405	struct nvme_request *req;
406	struct nvme_payload payload;
407
408	payload.type = NVME_PAYLOAD_TYPE_CONTIG;
409	payload.u.contig = buffer;
410	payload.md = metadata;
411
412	req = _nvme_ns_rw(ns, qpair, &payload, lba, lba_count, cb_fn, cb_arg,
413			  NVME_OPC_READ, io_flags, apptag_mask, apptag);
414	if (req != NULL)
415		return nvme_qpair_submit_request(qpair, req);
416
417	return ENOMEM;
418}
419
420int nvme_ns_readv(struct nvme_ns *ns, struct nvme_qpair *qpair,
421		  uint64_t lba, uint32_t lba_count,
422		  nvme_cmd_cb cb_fn, void *cb_arg,
423		  unsigned int io_flags,
424		  nvme_req_reset_sgl_cb reset_sgl_fn,
425		  nvme_req_next_sge_cb next_sge_fn)
426{
427	struct nvme_request *req;
428	struct nvme_payload payload;
429
430	if (reset_sgl_fn == NULL || next_sge_fn == NULL)
431		return EINVAL;
432
433	payload.type = NVME_PAYLOAD_TYPE_SGL;
434	payload.md = NULL;
435	payload.u.sgl.reset_sgl_fn = reset_sgl_fn;
436	payload.u.sgl.next_sge_fn = next_sge_fn;
437	payload.u.sgl.cb_arg = cb_arg;
438
439	req = _nvme_ns_rw(ns, qpair, &payload, lba, lba_count, cb_fn, cb_arg,
440			  NVME_OPC_READ, io_flags, 0, 0);
441	if (req != NULL)
442		return nvme_qpair_submit_request(qpair, req);
443
444	return ENOMEM;
445}
446
447int nvme_ns_write(struct nvme_ns *ns, struct nvme_qpair *qpair,
448		  void *buffer,
449		  uint64_t lba, uint32_t lba_count,
450		  nvme_cmd_cb cb_fn, void *cb_arg,
451		  unsigned int io_flags)
452{
453	struct nvme_request *req;
454	struct nvme_payload payload;
455
456	payload.type = NVME_PAYLOAD_TYPE_CONTIG;
457	payload.u.contig = buffer;
458	payload.md = NULL;
459
460	req = _nvme_ns_rw(ns, qpair, &payload, lba, lba_count, cb_fn, cb_arg,
461			  NVME_OPC_WRITE, io_flags, 0, 0);
462	if (req != NULL)
463		return nvme_qpair_submit_request(qpair, req);
464
465	return ENOMEM;
466}
467
468int nvme_ns_write_with_md(struct nvme_ns *ns, struct nvme_qpair *qpair,
469			  void *buffer, void *metadata,
470			  uint64_t lba, uint32_t lba_count,
471			  nvme_cmd_cb cb_fn, void *cb_arg,
472			  unsigned int io_flags,
473			  uint16_t apptag_mask, uint16_t apptag)
474{
475	struct nvme_request *req;
476	struct nvme_payload payload;
477
478	payload.type = NVME_PAYLOAD_TYPE_CONTIG;
479	payload.u.contig = buffer;
480	payload.md = metadata;
481
482	req = _nvme_ns_rw(ns, qpair, &payload, lba, lba_count, cb_fn, cb_arg,
483			  NVME_OPC_WRITE, io_flags, apptag_mask, apptag);
484	if (req != NULL)
485		return nvme_qpair_submit_request(qpair, req);
486
487	return ENOMEM;
488}
489
490int nvme_ns_writev(struct nvme_ns *ns, struct nvme_qpair *qpair,
491		   uint64_t lba, uint32_t lba_count,
492		   nvme_cmd_cb cb_fn, void *cb_arg,
493		   unsigned int io_flags,
494		   nvme_req_reset_sgl_cb reset_sgl_fn,
495		   nvme_req_next_sge_cb next_sge_fn)
496{
497	struct nvme_request *req;
498	struct nvme_payload payload;
499
500	if (reset_sgl_fn == NULL || next_sge_fn == NULL)
501		return EINVAL;
502
503	payload.type = NVME_PAYLOAD_TYPE_SGL;
504	payload.md = NULL;
505	payload.u.sgl.reset_sgl_fn = reset_sgl_fn;
506	payload.u.sgl.next_sge_fn = next_sge_fn;
507	payload.u.sgl.cb_arg = cb_arg;
508
509	req = _nvme_ns_rw(ns, qpair, &payload, lba, lba_count, cb_fn, cb_arg,
510			  NVME_OPC_WRITE, io_flags, 0, 0);
511	if (req != NULL)
512		return nvme_qpair_submit_request(qpair, req);
513
514	return ENOMEM;
515}
516
517int nvme_ns_write_zeroes(struct nvme_ns *ns, struct nvme_qpair *qpair,
518			 uint64_t lba, uint32_t lba_count,
519			 nvme_cmd_cb cb_fn, void *cb_arg,
520			 unsigned int io_flags)
521{
522	struct nvme_request *req;
523	struct nvme_cmd	*cmd;
524	uint64_t *tmp_lba;
525
526	if (lba_count == 0)
527		return EINVAL;
528
529	req = nvme_request_allocate_null(qpair, cb_fn, cb_arg);
530	if (req == NULL)
531		return ENOMEM;
532
533	cmd = &req->cmd;
534	cmd->opc = NVME_OPC_WRITE_ZEROES;
535	cmd->nsid = ns->id;
536
537	tmp_lba = (uint64_t *)&cmd->cdw10;
538	*tmp_lba = lba;
539	cmd->cdw12 = lba_count - 1;
540	cmd->cdw12 |= io_flags;
541
542	return nvme_qpair_submit_request(qpair, req);
543}
544
545int nvme_ns_deallocate(struct nvme_ns *ns, struct nvme_qpair *qpair,
546		       void *payload, uint16_t ranges,
547		       nvme_cmd_cb cb_fn, void *cb_arg)
548{
549	struct nvme_request *req;
550	struct nvme_cmd	*cmd;
551
552	if (ranges == 0 || ranges > NVME_DATASET_MANAGEMENT_MAX_RANGES)
553		return EINVAL;
554
555	req = nvme_request_allocate_contig(qpair, payload,
556				   ranges * sizeof(struct nvme_dsm_range),
557				   cb_fn, cb_arg);
558	if (req == NULL)
559		return ENOMEM;
560
561	cmd = &req->cmd;
562	cmd->opc = NVME_OPC_DATASET_MANAGEMENT;
563	cmd->nsid = ns->id;
564
565	/* TODO: create a delete command data structure */
566	cmd->cdw10 = ranges - 1;
567	cmd->cdw11 = NVME_DSM_ATTR_DEALLOCATE;
568
569	return nvme_qpair_submit_request(qpair, req);
570}
571
572int nvme_ns_flush(struct nvme_ns *ns, struct nvme_qpair *qpair,
573		  nvme_cmd_cb cb_fn, void *cb_arg)
574{
575	struct nvme_request *req;
576	struct nvme_cmd	*cmd;
577
578	req = nvme_request_allocate_null(qpair, cb_fn, cb_arg);
579	if (req == NULL)
580		return ENOMEM;
581
582	cmd = &req->cmd;
583	cmd->opc = NVME_OPC_FLUSH;
584	cmd->nsid = ns->id;
585
586	return nvme_qpair_submit_request(qpair, req);
587}
588
589int nvme_ns_reservation_register(struct nvme_ns *ns, struct nvme_qpair *qpair,
590				 struct nvme_reservation_register_data *payload,
591				 bool ignore_key,
592				 enum nvme_reservation_register_action action,
593				 enum nvme_reservation_register_cptpl cptpl,
594				 nvme_cmd_cb cb_fn, void *cb_arg)
595{
596	struct nvme_request *req;
597	struct nvme_cmd	*cmd;
598
599	req = nvme_request_allocate_contig(qpair, payload,
600					   sizeof(struct nvme_reservation_register_data),
601					   cb_fn, cb_arg);
602	if (req == NULL)
603		return ENOMEM;
604
605	cmd = &req->cmd;
606	cmd->opc = NVME_OPC_RESERVATION_REGISTER;
607	cmd->nsid = ns->id;
608
609	/* Bits 0-2 */
610	cmd->cdw10 = action;
611	/* Bit 3 */
612	cmd->cdw10 |= ignore_key ? 1 << 3 : 0;
613	/* Bits 30-31 */
614	cmd->cdw10 |= (uint32_t)cptpl << 30;
615
616	return nvme_qpair_submit_request(qpair, req);
617}
618
619int nvme_ns_reservation_release(struct nvme_ns *ns, struct nvme_qpair *qpair,
620				struct nvme_reservation_key_data *payload,
621				bool ignore_key,
622				enum nvme_reservation_release_action action,
623				enum nvme_reservation_type type,
624				nvme_cmd_cb cb_fn, void *cb_arg)
625{
626	struct nvme_request *req;
627	struct nvme_cmd	*cmd;
628
629	req = nvme_request_allocate_contig(qpair, payload,
630					   sizeof(struct nvme_reservation_key_data),
631					   cb_fn, cb_arg);
632	if (req == NULL)
633		return ENOMEM;
634
635	cmd = &req->cmd;
636	cmd->opc = NVME_OPC_RESERVATION_RELEASE;
637	cmd->nsid = ns->id;
638
639	/* Bits 0-2 */
640	cmd->cdw10 = action;
641	/* Bit 3 */
642	cmd->cdw10 |= ignore_key ? 1 << 3 : 0;
643	/* Bits 8-15 */
644	cmd->cdw10 |= (uint32_t)type << 8;
645
646	return nvme_qpair_submit_request(qpair, req);
647}
648
649int nvme_ns_reservation_acquire(struct nvme_ns *ns, struct nvme_qpair *qpair,
650				struct nvme_reservation_acquire_data *payload,
651				bool ignore_key,
652				enum nvme_reservation_acquire_action action,
653				enum nvme_reservation_type type,
654				nvme_cmd_cb cb_fn, void *cb_arg)
655{
656	struct nvme_request	*req;
657	struct nvme_cmd	*cmd;
658
659	req = nvme_request_allocate_contig(qpair, payload,
660					   sizeof(struct nvme_reservation_acquire_data),
661					   cb_fn, cb_arg);
662	if (req == NULL)
663		return ENOMEM;
664
665	cmd = &req->cmd;
666	cmd->opc = NVME_OPC_RESERVATION_ACQUIRE;
667	cmd->nsid = ns->id;
668
669	/* Bits 0-2 */
670	cmd->cdw10 = action;
671	/* Bit 3 */
672	cmd->cdw10 |= ignore_key ? 1 << 3 : 0;
673	/* Bits 8-15 */
674	cmd->cdw10 |= (uint32_t)type << 8;
675
676	return nvme_qpair_submit_request(qpair, req);
677}
678
679int nvme_ns_reservation_report(struct nvme_ns *ns, struct nvme_qpair *qpair,
680			       void *payload, size_t len,
681			       nvme_cmd_cb cb_fn, void *cb_arg)
682{
683	uint32_t num_dwords;
684	struct nvme_request *req;
685	struct nvme_cmd	*cmd;
686
687	if (len % 4)
688		return EINVAL;
689	num_dwords = len / 4;
690
691	req = nvme_request_allocate_contig(qpair, payload, len, cb_fn, cb_arg);
692	if (req == NULL)
693		return ENOMEM;
694
695	cmd = &req->cmd;
696	cmd->opc = NVME_OPC_RESERVATION_REPORT;
697	cmd->nsid = ns->id;
698
699	cmd->cdw10 = num_dwords;
700
701	return nvme_qpair_submit_request(qpair, req);
702}
703