1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright (c) Intel Corporation. All rights reserved.
5 *   Copyright (c) 2017, Western Digital Corporation or its affiliates.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Intel Corporation nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include "nvme_internal.h"
35
36/*
37 * List of open controllers and its lock.
38 */
39LIST_HEAD(, nvme_ctrlr)	ctrlr_head = LIST_HEAD_INITIALIZER(ctrlr_head);
40static pthread_mutex_t ctrlr_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
41
42/*
43 * Search for an open controller.
44 */
45static struct nvme_ctrlr *nvme_ctrlr_get(struct nvme_ctrlr *ctrlr,
46					 bool remove)
47{
48	struct nvme_ctrlr *c;
49
50	pthread_mutex_lock(&ctrlr_lock);
51
52	LIST_FOREACH(c, &ctrlr_head, link) {
53		if (c == ctrlr) {
54			if (remove)
55				LIST_REMOVE(c, link);
56			goto out;
57		}
58	}
59
60	ctrlr = NULL;
61
62out:
63	pthread_mutex_unlock(&ctrlr_lock);
64
65	return ctrlr;
66}
67
68#ifndef __HAIKU__
69/*
70 * Probe a pci device identified by its name.
71 * Name should be in the form: [0000:]00:00.0
72 * Return NULL if failed
73 */
74static struct pci_device *nvme_pci_ctrlr_probe(const char *slot_name)
75{
76	char *domain = NULL, *bus = NULL, *dev = NULL, *func = NULL, *end = NULL;
77	char *pciid = strdup(slot_name);
78	struct pci_slot_match slot;
79	struct pci_device *pci_dev = NULL;
80
81	if (!pciid)
82		return NULL;
83
84	memset(&slot, 0, sizeof(struct pci_slot_match));
85
86	func = strrchr(pciid, '.');
87	if (func) {
88		*func = '\0';
89		func++;
90	}
91
92	dev = strrchr(pciid, ':');
93	if (dev) {
94		*dev = '\0';
95		dev++;
96	}
97
98	bus = strrchr(pciid, ':');
99	if (!bus) {
100		domain = NULL;
101		bus = pciid;
102	} else {
103		domain = pciid;
104		*bus = '\0';
105		bus++;
106	}
107
108	if (!bus || !dev || !func) {
109		nvme_err("Malformed PCI device slot name %s\n",
110			 slot_name);
111		goto out;
112	}
113
114	if (domain) {
115		slot.domain = (uint32_t)strtoul(domain, &end, 16);
116		if ((end && *end) || (slot.domain > 0xffff)) {
117			nvme_err("Invalid domain number: 0x%X\n", slot.domain);
118			return NULL;
119		}
120	} else {
121		slot.domain = PCI_MATCH_ANY;
122	}
123
124	slot.bus = (uint32_t)strtoul(bus, &end, 16);
125	if ((end && *end) || (slot.bus > 0xff)) {
126		nvme_err("Invalid bus number: 0x%X\n", slot.bus);
127		return NULL;
128	}
129
130	slot.dev = strtoul(dev, &end, 16);
131	if ((end && *end) || (slot.dev > 0x1f)) {
132		nvme_err("Invalid device number: 0x%X\n", slot.dev);
133		return NULL;
134	}
135
136	slot.func = strtoul(func, &end, 16);
137	if ((end && *end) || (slot.func > 7)) {
138		nvme_err("Invalid function number: 0x%X\n", slot.func);
139		return NULL;
140	}
141
142	nvme_debug("PCI URL: domain 0x%X, bus 0x%X, dev 0x%X, func 0x%X\n",
143		   slot.domain, slot.bus, slot.dev, slot.func);
144
145	pci_dev = nvme_pci_device_probe(&slot);
146	if (pci_dev) {
147		slot.domain = pci_dev->domain;
148		if (slot.domain == PCI_MATCH_ANY)
149			slot.domain = 0;
150		nvme_info("Found NVMe controller %04x:%02x:%02x.%1u\n",
151			  slot.domain,
152			  slot.bus,
153			  slot.dev,
154			  slot.func);
155	}
156
157out:
158	free(pciid);
159
160	return pci_dev;
161}
162#endif
163
164/*
165 * Open an NVMe controller.
166 */
167#ifdef __HAIKU__
168struct nvme_ctrlr *nvme_ctrlr_open(struct pci_device *pdev,
169				   struct nvme_ctrlr_opts *opts)
170#else
171struct nvme_ctrlr *nvme_ctrlr_open(const char *url,
172				   struct nvme_ctrlr_opts *opts)
173#endif
174{
175	struct nvme_ctrlr *ctrlr;
176#ifndef __HAIKU__
177	char *slot;
178
179	/* Check url */
180	if (strncmp(url, "pci://", 6) != 0) {
181		nvme_err("Invalid URL %s\n", url);
182		return NULL;
183	}
184
185	/* Probe PCI device */
186	slot = (char *)url + 6;
187	pdev = nvme_pci_ctrlr_probe(slot);
188	if (!pdev) {
189		nvme_err("Device %s not found\n", url);
190		return NULL;
191	}
192#endif
193
194	pthread_mutex_lock(&ctrlr_lock);
195
196	/* Verify that this controller is not already open */
197	LIST_FOREACH(ctrlr, &ctrlr_head, link) {
198		if (nvme_pci_dev_cmp(ctrlr->pci_dev, pdev) == 0) {
199			nvme_err("Controller already open\n");
200			ctrlr = NULL;
201			goto out;
202		}
203	}
204
205	/* Attach the device */
206	ctrlr = nvme_ctrlr_attach(pdev, opts);
207	if (!ctrlr) {
208		nvme_err("Attach failed\n");
209		goto out;
210	}
211
212	/* Add controller to the list */
213	LIST_INSERT_HEAD(&ctrlr_head, ctrlr, link);
214
215out:
216	pthread_mutex_unlock(&ctrlr_lock);
217
218	return ctrlr;
219
220}
221
222/*
223 * Close an open controller.
224 */
225int nvme_ctrlr_close(struct nvme_ctrlr *ctrlr)
226{
227
228	/*
229	 * Verify that this controller is open.
230	 * If it is, remove it from the list.
231	 */
232	ctrlr = nvme_ctrlr_get(ctrlr, true);
233	if (!ctrlr) {
234		nvme_err("Invalid controller\n");
235		return EINVAL;
236	}
237
238	nvme_ctrlr_detach(ctrlr);
239
240	return 0;
241}
242
243/*
244 * Get controller information.
245 */
246int nvme_ctrlr_stat(struct nvme_ctrlr *ctrlr, struct nvme_ctrlr_stat *cstat)
247{
248	struct pci_device *pdev = ctrlr->pci_dev;
249	unsigned int i;
250
251	/* Verify that this controller is open */
252	ctrlr = nvme_ctrlr_get(ctrlr, false);
253	if (!ctrlr) {
254		nvme_err("Invalid controller\n");
255		return EINVAL;
256	}
257
258	pthread_mutex_lock(&ctrlr->lock);
259
260	memset(cstat, 0, sizeof(struct nvme_ctrlr_stat));
261
262	/* Controller serial and model number */
263	strncpy(cstat->sn, (char *)ctrlr->cdata.sn,
264		NVME_SERIAL_NUMBER_LENGTH - 1);
265	strncpy(cstat->mn, (char *)ctrlr->cdata.mn,
266		NVME_MODEL_NUMBER_LENGTH - 1);
267
268	/* Remove heading and trailling spaces */
269	nvme_str_trim(cstat->sn);
270	nvme_str_trim(cstat->mn);
271
272	/* PCI device info */
273	cstat->vendor_id = pdev->vendor_id;
274	cstat->device_id = pdev->device_id;
275	cstat->subvendor_id = pdev->subvendor_id;
276	cstat->subdevice_id = pdev->subdevice_id;
277#ifndef __HAIKU__
278	cstat->device_class = pdev->device_class;
279	cstat->revision = pdev->revision;
280	cstat->domain = pdev->domain;
281	cstat->bus = pdev->bus;
282	cstat->dev = pdev->dev;
283	cstat->func = pdev->func;
284#endif
285
286	/* Maximum transfer size */
287	cstat->max_xfer_size = ctrlr->max_xfer_size;
288
289	cstat->sgl_supported = (ctrlr->flags & NVME_CTRLR_SGL_SUPPORTED);
290
291	memcpy(&cstat->features, &ctrlr->feature_supported,
292	       sizeof(ctrlr->feature_supported));
293	memcpy(&cstat->log_pages, &ctrlr->log_page_supported,
294	       sizeof(ctrlr->log_page_supported));
295
296	cstat->nr_ns = ctrlr->nr_ns;
297	for (i = 0; i < ctrlr->nr_ns; i++) {
298		cstat->ns_ids[i] = i + 1;
299	}
300
301	/* Maximum io qpair possible */
302	cstat->max_io_qpairs = ctrlr->max_io_queues;
303
304	/* Constructed io qpairs */
305	cstat->io_qpairs = ctrlr->io_queues;
306
307	/* Enabled io qpairs */
308	cstat->enabled_io_qpairs = ctrlr->enabled_io_qpairs;
309
310	/* Max queue depth */
311	cstat->max_qd = ctrlr->io_qpairs_max_entries;
312
313	pthread_mutex_unlock(&ctrlr->lock);
314
315	return 0;
316}
317
318/*
319 * Get controller data
320 */
321int nvme_ctrlr_data(struct nvme_ctrlr *ctrlr, struct nvme_ctrlr_data *cdata,
322		    struct nvme_register_data *rdata)
323{
324	union nvme_cap_register	cap;
325
326	/* Verify that this controller is open */
327	ctrlr = nvme_ctrlr_get(ctrlr, false);
328	if (!ctrlr) {
329		nvme_err("Invalid controller\n");
330		return EINVAL;
331	}
332
333	pthread_mutex_lock(&ctrlr->lock);
334
335	/* Controller data */
336	if (cdata)
337		memcpy(cdata, &ctrlr->cdata, sizeof(struct nvme_ctrlr_data));
338
339	/* Read capabilities register */
340	if (rdata) {
341		cap.raw = nvme_reg_mmio_read_8(ctrlr, cap.raw);
342		rdata->mqes = cap.bits.mqes;
343	}
344
345	pthread_mutex_unlock(&ctrlr->lock);
346
347	return 0;
348}
349
350/*
351 * Get qpair information
352 */
353int nvme_qpair_stat(struct nvme_qpair *qpair, struct nvme_qpair_stat *qpstat)
354{
355	struct nvme_ctrlr *ctrlr = qpair->ctrlr;
356
357	/* Verify that this controller is open */
358	ctrlr = nvme_ctrlr_get(ctrlr, false);
359	if (!ctrlr) {
360		nvme_err("Invalid controller\n");
361		return EINVAL;
362	}
363
364	pthread_mutex_lock(&ctrlr->lock);
365
366	qpstat->id = qpair->id;
367	qpstat->qd = qpair->entries;
368	qpstat->enabled = qpair->enabled;
369	qpstat->qprio = qpair->qprio;
370
371	pthread_mutex_unlock(&ctrlr->lock);
372
373	return 0;
374}
375
376/*
377 * Close all open controllers on exit.
378 */
379void nvme_ctrlr_cleanup(void)
380{
381	struct nvme_ctrlr *ctrlr;
382
383	while ((ctrlr = LIST_FIRST(&ctrlr_head))) {
384		LIST_REMOVE(ctrlr, link);
385		nvme_ctrlr_detach(ctrlr);
386	}
387}
388