1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright (c) Intel Corporation. All rights reserved.
5 *   Copyright (c) 2017, Western Digital Corporation or its affiliates.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Intel Corporation nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include "nvme_internal.h"
35
36/*
37 * Allocate a request descriptor from the queue pair free list.
38 */
39static struct nvme_request *nvme_alloc_request(struct nvme_qpair *qpair)
40{
41	struct nvme_request *req;
42
43	pthread_mutex_lock(&qpair->lock);
44
45	req = STAILQ_FIRST(&qpair->free_req);
46	if (req) {
47		STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);
48		memset(&req->cmd, 0, sizeof(struct nvme_cmd));
49	}
50
51	pthread_mutex_unlock(&qpair->lock);
52
53	return req;
54}
55
56static void nvme_request_cb_complete_child(void *child_arg,
57					   const struct nvme_cpl *cpl)
58{
59	struct nvme_request *child = child_arg;
60	struct nvme_request *parent = child->parent;
61
62	nvme_request_remove_child(parent, child);
63
64	if (nvme_cpl_is_error(cpl))
65		memcpy(&parent->parent_status, cpl, sizeof(*cpl));
66
67	if (parent->child_reqs == 0) {
68		if (parent->cb_fn)
69			parent->cb_fn(parent->cb_arg, &parent->parent_status);
70		nvme_request_free_locked(parent);
71	}
72}
73
74void nvme_request_completion_poll_cb(void *arg, const struct nvme_cpl *cpl)
75{
76	struct nvme_completion_poll_status *status = arg;
77
78	memcpy(&status->cpl, cpl, sizeof(*cpl));
79	status->done = true;
80}
81
82int nvme_request_pool_construct(struct nvme_qpair *qpair)
83{
84	struct nvme_request *req;
85	unsigned int i;
86
87	qpair->num_reqs = qpair->trackers * NVME_IO_ENTRIES_VS_TRACKERS_RATIO;
88	qpair->reqs = calloc(qpair->num_reqs, sizeof(struct nvme_request));
89	if (!qpair->reqs) {
90		nvme_err("QPair %d: allocate %u requests failed\n",
91			 (int)qpair->id, qpair->num_reqs);
92		return ENOMEM;
93	}
94
95	nvme_info("QPair %d: %d requests in pool\n",
96		  (int)qpair->id,
97		  (int)qpair->num_reqs);
98
99	for(i = 0; i < qpair->num_reqs; i++) {
100		req = &qpair->reqs[i];
101		req->qpair = qpair;
102		STAILQ_INSERT_TAIL(&qpair->free_req, req, stailq);
103		req++;
104	}
105
106	return 0;
107}
108
109void nvme_request_pool_destroy(struct nvme_qpair *qpair)
110{
111	struct nvme_request *req;
112	unsigned int n = 0;
113
114	while ((req = STAILQ_FIRST(&qpair->free_req))) {
115		STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);
116		n++;
117	}
118
119	if (n != qpair->num_reqs)
120		nvme_err("QPair %d: Freed %d/%d requests\n",
121			 (int)qpair->id, n, (int)qpair->num_reqs);
122
123	free(qpair->reqs);
124}
125
126struct nvme_request *nvme_request_allocate(struct nvme_qpair *qpair,
127					   const struct nvme_payload *payload,
128					   uint32_t payload_size,
129					   nvme_cmd_cb cb_fn,
130					   void *cb_arg)
131{
132	struct nvme_request *req;
133
134	req = nvme_alloc_request(qpair);
135	if (req == NULL)
136		return NULL;
137
138	/*
139	 * Only memset up to (but not including) the children TAILQ_ENTRY.
140	 * Children, and following members, are only used as part of I/O
141	 * splitting so we avoid memsetting them until it is actually needed.
142	 * They will be initialized in nvme_request_add_child()
143	 * if the request is split.
144	 */
145	memset(req, 0, offsetof(struct nvme_request, children));
146	req->cb_fn = cb_fn;
147	req->cb_arg = cb_arg;
148	req->payload = *payload;
149	req->payload_size = payload_size;
150
151	return req;
152}
153
154struct nvme_request *nvme_request_allocate_contig(struct nvme_qpair *qpair,
155						  void *buffer,
156						  uint32_t payload_size,
157						  nvme_cmd_cb cb_fn,
158						  void *cb_arg)
159{
160	struct nvme_payload payload;
161
162	payload.type = NVME_PAYLOAD_TYPE_CONTIG;
163	payload.u.contig = buffer;
164	payload.md = NULL;
165
166	return nvme_request_allocate(qpair, &payload, payload_size,
167				     cb_fn, cb_arg);
168}
169
170struct nvme_request *nvme_request_allocate_null(struct nvme_qpair *qpair,
171						nvme_cmd_cb cb_fn, void *cb_arg)
172{
173	return nvme_request_allocate_contig(qpair, NULL, 0, cb_fn, cb_arg);
174}
175
176void nvme_request_free_locked(struct nvme_request *req)
177{
178	nvme_assert(req->child_reqs == 0, "Number of child request not 0\n");
179
180	STAILQ_INSERT_HEAD(&req->qpair->free_req, req, stailq);
181}
182
183void nvme_request_free(struct nvme_request *req)
184{
185	pthread_mutex_lock(&req->qpair->lock);
186
187	nvme_request_free_locked(req);
188
189	pthread_mutex_unlock(&req->qpair->lock);
190}
191
192void nvme_request_add_child(struct nvme_request *parent,
193			    struct nvme_request *child)
194{
195	if (parent->child_reqs == 0) {
196		/*
197		 * Defer initialization of the children TAILQ since it falls
198		 * on a separate cacheline.  This ensures we do not touch this
199		 * cacheline except on request splitting cases, which are
200		 * relatively rare.
201		 */
202		TAILQ_INIT(&parent->children);
203		parent->parent = NULL;
204		memset(&parent->parent_status, 0, sizeof(struct nvme_cpl));
205	}
206
207	parent->child_reqs++;
208	TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
209	child->parent = parent;
210	child->cb_fn = nvme_request_cb_complete_child;
211	child->cb_arg = child;
212}
213
214void nvme_request_remove_child(struct nvme_request *parent,
215			       struct nvme_request *child)
216{
217	nvme_assert(child->parent == parent, "child->parent != parent\n");
218	nvme_assert(parent->child_reqs != 0, "child_reqs is 0\n");
219
220	parent->child_reqs--;
221	TAILQ_REMOVE(&parent->children, child, child_tailq);
222}
223