ctl_backend_ramdisk.c revision 288799
1/*-
2 * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
3 * Copyright (c) 2012 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Edward Tomasz Napierala
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions, and the following disclaimer,
14 *    without modification.
15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16 *    substantially similar to the "NO WARRANTY" disclaimer below
17 *    ("Disclaimer") and any redistribution must be conditioned upon
18 *    including a substantially similar Disclaimer requirement for further
19 *    binary redistribution.
20 *
21 * NO WARRANTY
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGES.
33 *
34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
35 */
36/*
37 * CAM Target Layer backend for a "fake" ramdisk.
38 *
39 * Author: Ken Merry <ken@FreeBSD.org>
40 */
41
42#include <sys/cdefs.h>
43__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_backend_ramdisk.c 288799 2015-10-05 11:02:17Z mav $");
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/kernel.h>
48#include <sys/condvar.h>
49#include <sys/types.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52#include <sys/malloc.h>
53#include <sys/taskqueue.h>
54#include <sys/time.h>
55#include <sys/queue.h>
56#include <sys/conf.h>
57#include <sys/ioccom.h>
58#include <sys/module.h>
59#include <sys/sysctl.h>
60
61#include <cam/scsi/scsi_all.h>
62#include <cam/scsi/scsi_da.h>
63#include <cam/ctl/ctl_io.h>
64#include <cam/ctl/ctl.h>
65#include <cam/ctl/ctl_util.h>
66#include <cam/ctl/ctl_backend.h>
67#include <cam/ctl/ctl_debug.h>
68#include <cam/ctl/ctl_ioctl.h>
69#include <cam/ctl/ctl_ha.h>
70#include <cam/ctl/ctl_private.h>
71#include <cam/ctl/ctl_error.h>
72
73typedef enum {
74	CTL_BE_RAMDISK_LUN_UNCONFIGURED	= 0x01,
75	CTL_BE_RAMDISK_LUN_CONFIG_ERR	= 0x02,
76	CTL_BE_RAMDISK_LUN_WAITING	= 0x04
77} ctl_be_ramdisk_lun_flags;
78
79struct ctl_be_ramdisk_lun {
80	struct ctl_lun_create_params params;
81	char lunname[32];
82	uint64_t size_bytes;
83	uint64_t size_blocks;
84	struct ctl_be_ramdisk_softc *softc;
85	ctl_be_ramdisk_lun_flags flags;
86	STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
87	struct ctl_be_lun cbe_lun;
88	struct taskqueue *io_taskqueue;
89	struct task io_task;
90	STAILQ_HEAD(, ctl_io_hdr) cont_queue;
91	struct mtx_padalign queue_lock;
92};
93
94struct ctl_be_ramdisk_softc {
95	struct mtx lock;
96	int rd_size;
97#ifdef CTL_RAMDISK_PAGES
98	uint8_t **ramdisk_pages;
99	int num_pages;
100#else
101	uint8_t *ramdisk_buffer;
102#endif
103	int num_luns;
104	STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
105};
106
107static struct ctl_be_ramdisk_softc rd_softc;
108extern struct ctl_softc *control_softc;
109
110int ctl_backend_ramdisk_init(void);
111void ctl_backend_ramdisk_shutdown(void);
112static int ctl_backend_ramdisk_move_done(union ctl_io *io);
113static int ctl_backend_ramdisk_submit(union ctl_io *io);
114static void ctl_backend_ramdisk_continue(union ctl_io *io);
115static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
116				     caddr_t addr, int flag, struct thread *td);
117static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
118				  struct ctl_lun_req *req);
119static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
120				      struct ctl_lun_req *req);
121static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
122				  struct ctl_lun_req *req);
123static void ctl_backend_ramdisk_worker(void *context, int pending);
124static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
125static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
126						  ctl_lun_config_status status);
127static int ctl_backend_ramdisk_config_write(union ctl_io *io);
128static int ctl_backend_ramdisk_config_read(union ctl_io *io);
129
130static struct ctl_backend_driver ctl_be_ramdisk_driver =
131{
132	.name = "ramdisk",
133	.flags = CTL_BE_FLAG_HAS_CONFIG,
134	.init = ctl_backend_ramdisk_init,
135	.data_submit = ctl_backend_ramdisk_submit,
136	.data_move_done = ctl_backend_ramdisk_move_done,
137	.config_read = ctl_backend_ramdisk_config_read,
138	.config_write = ctl_backend_ramdisk_config_write,
139	.ioctl = ctl_backend_ramdisk_ioctl
140};
141
142MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
143CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
144
145int
146ctl_backend_ramdisk_init(void)
147{
148	struct ctl_be_ramdisk_softc *softc = &rd_softc;
149#ifdef CTL_RAMDISK_PAGES
150	int i;
151#endif
152
153	memset(softc, 0, sizeof(*softc));
154	mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
155	STAILQ_INIT(&softc->lun_list);
156	softc->rd_size = 1024 * 1024;
157#ifdef CTL_RAMDISK_PAGES
158	softc->num_pages = softc->rd_size / PAGE_SIZE;
159	softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) *
160						  softc->num_pages, M_RAMDISK,
161						  M_WAITOK);
162	for (i = 0; i < softc->num_pages; i++)
163		softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK);
164#else
165	softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK,
166						  M_WAITOK);
167#endif
168
169	return (0);
170}
171
172void
173ctl_backend_ramdisk_shutdown(void)
174{
175	struct ctl_be_ramdisk_softc *softc = &rd_softc;
176	struct ctl_be_ramdisk_lun *lun, *next_lun;
177#ifdef CTL_RAMDISK_PAGES
178	int i;
179#endif
180
181	mtx_lock(&softc->lock);
182	for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
183		/*
184		 * Grab the next LUN.  The current LUN may get removed by
185		 * ctl_invalidate_lun(), which will call our LUN shutdown
186		 * routine, if there is no outstanding I/O for this LUN.
187		 */
188		next_lun = STAILQ_NEXT(lun, links);
189
190		/*
191		 * Drop our lock here.  Since ctl_invalidate_lun() can call
192		 * back into us, this could potentially lead to a recursive
193		 * lock of the same mutex, which would cause a hang.
194		 */
195		mtx_unlock(&softc->lock);
196		ctl_disable_lun(&lun->cbe_lun);
197		ctl_invalidate_lun(&lun->cbe_lun);
198		mtx_lock(&softc->lock);
199	}
200	mtx_unlock(&softc->lock);
201
202#ifdef CTL_RAMDISK_PAGES
203	for (i = 0; i < softc->num_pages; i++)
204		free(softc->ramdisk_pages[i], M_RAMDISK);
205
206	free(softc->ramdisk_pages, M_RAMDISK);
207#else
208	free(softc->ramdisk_buffer, M_RAMDISK);
209#endif
210
211	if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) {
212		printf("ctl_backend_ramdisk_shutdown: "
213		       "ctl_backend_deregister() failed!\n");
214	}
215}
216
217static int
218ctl_backend_ramdisk_move_done(union ctl_io *io)
219{
220	struct ctl_be_lun *cbe_lun;
221	struct ctl_be_ramdisk_lun *be_lun;
222#ifdef CTL_TIME_IO
223	struct bintime cur_bt;
224#endif
225
226	CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
227	cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
228		CTL_PRIV_BACKEND_LUN].ptr;
229	be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun->be_lun;
230#ifdef CTL_TIME_IO
231	getbinuptime(&cur_bt);
232	bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
233	bintime_add(&io->io_hdr.dma_bt, &cur_bt);
234#endif
235	io->io_hdr.num_dmas++;
236	if (io->scsiio.kern_sg_entries > 0)
237		free(io->scsiio.kern_data_ptr, M_RAMDISK);
238	io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
239	if (io->io_hdr.flags & CTL_FLAG_ABORT) {
240		;
241	} else if ((io->io_hdr.port_status == 0) &&
242	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
243		if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) {
244			mtx_lock(&be_lun->queue_lock);
245			STAILQ_INSERT_TAIL(&be_lun->cont_queue,
246			    &io->io_hdr, links);
247			mtx_unlock(&be_lun->queue_lock);
248			taskqueue_enqueue(be_lun->io_taskqueue,
249			    &be_lun->io_task);
250			return (0);
251		}
252		ctl_set_success(&io->scsiio);
253	} else if ((io->io_hdr.port_status != 0) &&
254	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
255	     (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
256		/*
257		 * For hardware error sense keys, the sense key
258		 * specific value is defined to be a retry count,
259		 * but we use it to pass back an internal FETD
260		 * error code.  XXX KDM  Hopefully the FETD is only
261		 * using 16 bits for an error code, since that's
262		 * all the space we have in the sks field.
263		 */
264		ctl_set_internal_failure(&io->scsiio,
265					 /*sks_valid*/ 1,
266					 /*retry_count*/
267					 io->io_hdr.port_status);
268	}
269	ctl_data_submit_done(io);
270	return(0);
271}
272
273static int
274ctl_backend_ramdisk_submit(union ctl_io *io)
275{
276	struct ctl_be_lun *cbe_lun;
277	struct ctl_lba_len_flags *lbalen;
278
279	cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
280		CTL_PRIV_BACKEND_LUN].ptr;
281	lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
282	if (lbalen->flags & CTL_LLF_VERIFY) {
283		ctl_set_success(&io->scsiio);
284		ctl_data_submit_done(io);
285		return (CTL_RETVAL_COMPLETE);
286	}
287	io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer =
288	    lbalen->len * cbe_lun->blocksize;
289	ctl_backend_ramdisk_continue(io);
290	return (CTL_RETVAL_COMPLETE);
291}
292
293static void
294ctl_backend_ramdisk_continue(union ctl_io *io)
295{
296	struct ctl_be_ramdisk_softc *softc;
297	int len, len_filled, sg_filled;
298#ifdef CTL_RAMDISK_PAGES
299	struct ctl_sg_entry *sg_entries;
300	int i;
301#endif
302
303	softc = &rd_softc;
304	len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer;
305#ifdef CTL_RAMDISK_PAGES
306	sg_filled = min(btoc(len), softc->num_pages);
307	if (sg_filled > 1) {
308		io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
309						  sg_filled, M_RAMDISK,
310						  M_WAITOK);
311		sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
312		for (i = 0, len_filled = 0; i < sg_filled; i++) {
313			sg_entries[i].addr = softc->ramdisk_pages[i];
314			sg_entries[i].len = MIN(PAGE_SIZE, len - len_filled);
315			len_filled += sg_entries[i].len;
316		}
317	} else {
318		sg_filled = 0;
319		len_filled = len;
320		io->scsiio.kern_data_ptr = softc->ramdisk_pages[0];
321	}
322#else
323	sg_filled = 0;
324	len_filled = min(len, softc->rd_size);
325	io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
326#endif /* CTL_RAMDISK_PAGES */
327
328	io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
329	io->scsiio.kern_data_resid = 0;
330	io->scsiio.kern_data_len = len_filled;
331	io->scsiio.kern_sg_entries = sg_filled;
332	io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
333	io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled;
334#ifdef CTL_TIME_IO
335	getbinuptime(&io->io_hdr.dma_start_bt);
336#endif
337	ctl_datamove(io);
338}
339
340static void
341ctl_backend_ramdisk_worker(void *context, int pending)
342{
343	struct ctl_be_ramdisk_softc *softc;
344	struct ctl_be_ramdisk_lun *be_lun;
345	union ctl_io *io;
346
347	be_lun = (struct ctl_be_ramdisk_lun *)context;
348	softc = be_lun->softc;
349
350	mtx_lock(&be_lun->queue_lock);
351	for (;;) {
352		io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
353		if (io != NULL) {
354			STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
355				      ctl_io_hdr, links);
356			mtx_unlock(&be_lun->queue_lock);
357			ctl_backend_ramdisk_continue(io);
358			mtx_lock(&be_lun->queue_lock);
359			continue;
360		}
361
362		/*
363		 * If we get here, there is no work left in the queues, so
364		 * just break out and let the task queue go to sleep.
365		 */
366		break;
367	}
368	mtx_unlock(&be_lun->queue_lock);
369}
370
371static int
372ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
373			  int flag, struct thread *td)
374{
375	struct ctl_be_ramdisk_softc *softc = &rd_softc;
376	struct ctl_lun_req *lun_req;
377	int retval;
378
379	retval = 0;
380	switch (cmd) {
381	case CTL_LUN_REQ:
382		lun_req = (struct ctl_lun_req *)addr;
383		switch (lun_req->reqtype) {
384		case CTL_LUNREQ_CREATE:
385			retval = ctl_backend_ramdisk_create(softc, lun_req);
386			break;
387		case CTL_LUNREQ_RM:
388			retval = ctl_backend_ramdisk_rm(softc, lun_req);
389			break;
390		case CTL_LUNREQ_MODIFY:
391			retval = ctl_backend_ramdisk_modify(softc, lun_req);
392			break;
393		default:
394			lun_req->status = CTL_LUN_ERROR;
395			snprintf(lun_req->error_str, sizeof(lun_req->error_str),
396				 "%s: invalid LUN request type %d", __func__,
397				 lun_req->reqtype);
398			break;
399		}
400		break;
401	default:
402		retval = ENOTTY;
403		break;
404	}
405
406	return (retval);
407}
408
409static int
410ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
411		       struct ctl_lun_req *req)
412{
413	struct ctl_be_ramdisk_lun *be_lun;
414	struct ctl_lun_rm_params *params;
415	int retval;
416
417	retval = 0;
418	params = &req->reqdata.rm;
419	mtx_lock(&softc->lock);
420	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
421		if (be_lun->cbe_lun.lun_id == params->lun_id)
422			break;
423	}
424	mtx_unlock(&softc->lock);
425	if (be_lun == NULL) {
426		snprintf(req->error_str, sizeof(req->error_str),
427			 "%s: LUN %u is not managed by the ramdisk backend",
428			 __func__, params->lun_id);
429		goto bailout_error;
430	}
431
432	retval = ctl_disable_lun(&be_lun->cbe_lun);
433	if (retval != 0) {
434		snprintf(req->error_str, sizeof(req->error_str),
435			 "%s: error %d returned from ctl_disable_lun() for "
436			 "LUN %d", __func__, retval, params->lun_id);
437		goto bailout_error;
438	}
439
440	/*
441	 * Set the waiting flag before we invalidate the LUN.  Our shutdown
442	 * routine can be called any time after we invalidate the LUN,
443	 * and can be called from our context.
444	 *
445	 * This tells the shutdown routine that we're waiting, or we're
446	 * going to wait for the shutdown to happen.
447	 */
448	mtx_lock(&softc->lock);
449	be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
450	mtx_unlock(&softc->lock);
451
452	retval = ctl_invalidate_lun(&be_lun->cbe_lun);
453	if (retval != 0) {
454		snprintf(req->error_str, sizeof(req->error_str),
455			 "%s: error %d returned from ctl_invalidate_lun() for "
456			 "LUN %d", __func__, retval, params->lun_id);
457		mtx_lock(&softc->lock);
458		be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
459		mtx_unlock(&softc->lock);
460		goto bailout_error;
461	}
462
463	mtx_lock(&softc->lock);
464	while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
465		retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
466		if (retval == EINTR)
467			break;
468	}
469	be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
470
471	/*
472	 * We only remove this LUN from the list and free it (below) if
473	 * retval == 0.  If the user interrupted the wait, we just bail out
474	 * without actually freeing the LUN.  We let the shutdown routine
475	 * free the LUN if that happens.
476	 */
477	if (retval == 0) {
478		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
479			      links);
480		softc->num_luns--;
481	}
482
483	mtx_unlock(&softc->lock);
484
485	if (retval == 0) {
486		taskqueue_drain_all(be_lun->io_taskqueue);
487		taskqueue_free(be_lun->io_taskqueue);
488		ctl_free_opts(&be_lun->cbe_lun.options);
489		mtx_destroy(&be_lun->queue_lock);
490		free(be_lun, M_RAMDISK);
491	}
492
493	req->status = CTL_LUN_OK;
494	return (retval);
495
496bailout_error:
497	req->status = CTL_LUN_ERROR;
498	return (0);
499}
500
501static int
502ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
503			   struct ctl_lun_req *req)
504{
505	struct ctl_be_ramdisk_lun *be_lun;
506	struct ctl_be_lun *cbe_lun;
507	struct ctl_lun_create_params *params;
508	char *value;
509	char tmpstr[32];
510	int retval;
511
512	retval = 0;
513	params = &req->reqdata.create;
514
515	be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK);
516	cbe_lun = &be_lun->cbe_lun;
517	cbe_lun->be_lun = be_lun;
518	be_lun->params = req->reqdata.create;
519	be_lun->softc = softc;
520	sprintf(be_lun->lunname, "cram%d", softc->num_luns);
521	ctl_init_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
522
523	if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
524		cbe_lun->lun_type = params->device_type;
525	else
526		cbe_lun->lun_type = T_DIRECT;
527	be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
528	cbe_lun->flags = 0;
529	value = ctl_get_opt(&cbe_lun->options, "ha_role");
530	if (value != NULL) {
531		if (strcmp(value, "primary") == 0)
532			cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
533	} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
534		cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
535
536	if (cbe_lun->lun_type == T_DIRECT) {
537		if (params->blocksize_bytes != 0)
538			cbe_lun->blocksize = params->blocksize_bytes;
539		else
540			cbe_lun->blocksize = 512;
541		if (params->lun_size_bytes < cbe_lun->blocksize) {
542			snprintf(req->error_str, sizeof(req->error_str),
543				 "%s: LUN size %ju < blocksize %u", __func__,
544				 params->lun_size_bytes, cbe_lun->blocksize);
545			goto bailout_error;
546		}
547		be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize;
548		be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize;
549		cbe_lun->maxlba = be_lun->size_blocks - 1;
550		cbe_lun->atomicblock = UINT32_MAX;
551		cbe_lun->opttxferlen = softc->rd_size / cbe_lun->blocksize;
552	}
553
554	/* Tell the user the blocksize we ended up using */
555	params->blocksize_bytes = cbe_lun->blocksize;
556	params->lun_size_bytes = be_lun->size_bytes;
557
558	value = ctl_get_opt(&cbe_lun->options, "unmap");
559	if (value != NULL && strcmp(value, "on") == 0)
560		cbe_lun->flags |= CTL_LUN_FLAG_UNMAP;
561	value = ctl_get_opt(&cbe_lun->options, "readonly");
562	if (value != NULL && strcmp(value, "on") == 0)
563		cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
564	cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
565	value = ctl_get_opt(&cbe_lun->options, "serseq");
566	if (value != NULL && strcmp(value, "on") == 0)
567		cbe_lun->serseq = CTL_LUN_SERSEQ_ON;
568	else if (value != NULL && strcmp(value, "read") == 0)
569		cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
570	else if (value != NULL && strcmp(value, "off") == 0)
571		cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
572
573	if (params->flags & CTL_LUN_FLAG_ID_REQ) {
574		cbe_lun->req_lun_id = params->req_lun_id;
575		cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ;
576	} else
577		cbe_lun->req_lun_id = 0;
578
579	cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
580	cbe_lun->lun_config_status = ctl_backend_ramdisk_lun_config_status;
581	cbe_lun->be = &ctl_be_ramdisk_driver;
582	if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
583		snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
584			 softc->num_luns);
585		strncpy((char *)cbe_lun->serial_num, tmpstr,
586			MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr)));
587
588		/* Tell the user what we used for a serial number */
589		strncpy((char *)params->serial_num, tmpstr,
590			MIN(sizeof(params->serial_num), sizeof(tmpstr)));
591	} else {
592		strncpy((char *)cbe_lun->serial_num, params->serial_num,
593			MIN(sizeof(cbe_lun->serial_num),
594			    sizeof(params->serial_num)));
595	}
596	if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
597		snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
598		strncpy((char *)cbe_lun->device_id, tmpstr,
599			MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr)));
600
601		/* Tell the user what we used for a device ID */
602		strncpy((char *)params->device_id, tmpstr,
603			MIN(sizeof(params->device_id), sizeof(tmpstr)));
604	} else {
605		strncpy((char *)cbe_lun->device_id, params->device_id,
606			MIN(sizeof(cbe_lun->device_id),
607			    sizeof(params->device_id)));
608	}
609
610	STAILQ_INIT(&be_lun->cont_queue);
611	mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF);
612	TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
613	    be_lun);
614
615	be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
616	    taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
617	if (be_lun->io_taskqueue == NULL) {
618		snprintf(req->error_str, sizeof(req->error_str),
619			 "%s: Unable to create taskqueue", __func__);
620		goto bailout_error;
621	}
622
623	retval = taskqueue_start_threads(&be_lun->io_taskqueue,
624					 /*num threads*/1,
625					 /*priority*/PWAIT,
626					 /*thread name*/
627					 "%s taskq", be_lun->lunname);
628	if (retval != 0)
629		goto bailout_error;
630
631	mtx_lock(&softc->lock);
632	softc->num_luns++;
633	STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
634	mtx_unlock(&softc->lock);
635
636	retval = ctl_add_lun(&be_lun->cbe_lun);
637	if (retval != 0) {
638		mtx_lock(&softc->lock);
639		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
640			      links);
641		softc->num_luns--;
642		mtx_unlock(&softc->lock);
643		snprintf(req->error_str, sizeof(req->error_str),
644			 "%s: ctl_add_lun() returned error %d, see dmesg for "
645			"details", __func__, retval);
646		retval = 0;
647		goto bailout_error;
648	}
649
650	mtx_lock(&softc->lock);
651
652	/*
653	 * Tell the config_status routine that we're waiting so it won't
654	 * clean up the LUN in the event of an error.
655	 */
656	be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
657
658	while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
659		retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
660		if (retval == EINTR)
661			break;
662	}
663	be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
664
665	if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) {
666		snprintf(req->error_str, sizeof(req->error_str),
667			 "%s: LUN configuration error, see dmesg for details",
668			 __func__);
669		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
670			      links);
671		softc->num_luns--;
672		mtx_unlock(&softc->lock);
673		goto bailout_error;
674	} else {
675		params->req_lun_id = cbe_lun->lun_id;
676	}
677	mtx_unlock(&softc->lock);
678
679	req->status = CTL_LUN_OK;
680	return (retval);
681
682bailout_error:
683	req->status = CTL_LUN_ERROR;
684	if (be_lun != NULL) {
685		if (be_lun->io_taskqueue != NULL) {
686			taskqueue_free(be_lun->io_taskqueue);
687		}
688		ctl_free_opts(&cbe_lun->options);
689		mtx_destroy(&be_lun->queue_lock);
690		free(be_lun, M_RAMDISK);
691	}
692	return (retval);
693}
694
695static int
696ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
697		       struct ctl_lun_req *req)
698{
699	struct ctl_be_ramdisk_lun *be_lun;
700	struct ctl_be_lun *cbe_lun;
701	struct ctl_lun_modify_params *params;
702	char *value;
703	uint32_t blocksize;
704	int wasprim;
705
706	params = &req->reqdata.modify;
707
708	mtx_lock(&softc->lock);
709	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
710		if (be_lun->cbe_lun.lun_id == params->lun_id)
711			break;
712	}
713	mtx_unlock(&softc->lock);
714	if (be_lun == NULL) {
715		snprintf(req->error_str, sizeof(req->error_str),
716			 "%s: LUN %u is not managed by the ramdisk backend",
717			 __func__, params->lun_id);
718		goto bailout_error;
719	}
720	cbe_lun = &be_lun->cbe_lun;
721
722	if (params->lun_size_bytes != 0)
723		be_lun->params.lun_size_bytes = params->lun_size_bytes;
724	ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
725
726	wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
727	value = ctl_get_opt(&cbe_lun->options, "ha_role");
728	if (value != NULL) {
729		if (strcmp(value, "primary") == 0)
730			cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
731		else
732			cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
733	} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
734		cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
735	else
736		cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
737	if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
738		if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
739			ctl_lun_primary(cbe_lun);
740		else
741			ctl_lun_secondary(cbe_lun);
742	}
743
744	blocksize = be_lun->cbe_lun.blocksize;
745	if (be_lun->params.lun_size_bytes < blocksize) {
746		snprintf(req->error_str, sizeof(req->error_str),
747			"%s: LUN size %ju < blocksize %u", __func__,
748			be_lun->params.lun_size_bytes, blocksize);
749		goto bailout_error;
750	}
751	be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize;
752	be_lun->size_bytes = be_lun->size_blocks * blocksize;
753	be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1;
754	ctl_lun_capacity_changed(&be_lun->cbe_lun);
755
756	/* Tell the user the exact size we ended up using */
757	params->lun_size_bytes = be_lun->size_bytes;
758
759	req->status = CTL_LUN_OK;
760	return (0);
761
762bailout_error:
763	req->status = CTL_LUN_ERROR;
764	return (0);
765}
766
767static void
768ctl_backend_ramdisk_lun_shutdown(void *be_lun)
769{
770	struct ctl_be_ramdisk_lun *lun;
771	struct ctl_be_ramdisk_softc *softc;
772	int do_free;
773
774	lun = (struct ctl_be_ramdisk_lun *)be_lun;
775	softc = lun->softc;
776	do_free = 0;
777
778	mtx_lock(&softc->lock);
779	lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
780	if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
781		wakeup(lun);
782	} else {
783		STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
784			      links);
785		softc->num_luns--;
786		do_free = 1;
787	}
788	mtx_unlock(&softc->lock);
789
790	if (do_free != 0)
791		free(be_lun, M_RAMDISK);
792}
793
794static void
795ctl_backend_ramdisk_lun_config_status(void *be_lun,
796				      ctl_lun_config_status status)
797{
798	struct ctl_be_ramdisk_lun *lun;
799	struct ctl_be_ramdisk_softc *softc;
800
801	lun = (struct ctl_be_ramdisk_lun *)be_lun;
802	softc = lun->softc;
803
804	if (status == CTL_LUN_CONFIG_OK) {
805		mtx_lock(&softc->lock);
806		lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
807		if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
808			wakeup(lun);
809		mtx_unlock(&softc->lock);
810
811		/*
812		 * We successfully added the LUN, attempt to enable it.
813		 */
814		if (ctl_enable_lun(&lun->cbe_lun) != 0) {
815			printf("%s: ctl_enable_lun() failed!\n", __func__);
816			if (ctl_invalidate_lun(&lun->cbe_lun) != 0) {
817				printf("%s: ctl_invalidate_lun() failed!\n",
818				       __func__);
819			}
820		}
821
822		return;
823	}
824
825
826	mtx_lock(&softc->lock);
827	lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
828
829	/*
830	 * If we have a user waiting, let him handle the cleanup.  If not,
831	 * clean things up here.
832	 */
833	if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
834		lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR;
835		wakeup(lun);
836	} else {
837		STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
838			      links);
839		softc->num_luns--;
840		free(lun, M_RAMDISK);
841	}
842	mtx_unlock(&softc->lock);
843}
844
845static int
846ctl_backend_ramdisk_config_write(union ctl_io *io)
847{
848	struct ctl_be_ramdisk_softc *softc;
849	int retval;
850
851	retval = 0;
852	softc = &rd_softc;
853
854	switch (io->scsiio.cdb[0]) {
855	case SYNCHRONIZE_CACHE:
856	case SYNCHRONIZE_CACHE_16:
857		/*
858		 * The upper level CTL code will filter out any CDBs with
859		 * the immediate bit set and return the proper error.  It
860		 * will also not allow a sync cache command to go to a LUN
861		 * that is powered down.
862		 *
863		 * We don't really need to worry about what LBA range the
864		 * user asked to be synced out.  When they issue a sync
865		 * cache command, we'll sync out the whole thing.
866		 *
867		 * This is obviously just a stubbed out implementation.
868		 * The real implementation will be in the RAIDCore/CTL
869		 * interface, and can only really happen when RAIDCore
870		 * implements a per-array cache sync.
871		 */
872		ctl_set_success(&io->scsiio);
873		ctl_config_write_done(io);
874		break;
875	case START_STOP_UNIT: {
876		struct scsi_start_stop_unit *cdb;
877		struct ctl_be_lun *cbe_lun;
878		struct ctl_be_ramdisk_lun *be_lun;
879
880		cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
881
882		cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
883			CTL_PRIV_BACKEND_LUN].ptr;
884		be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun->be_lun;
885
886		if (cdb->how & SSS_START)
887			retval = ctl_start_lun(cbe_lun);
888		else {
889			retval = ctl_stop_lun(cbe_lun);
890#ifdef NEEDTOPORT
891			if ((retval == 0)
892			 && (cdb->byte2 & SSS_ONOFFLINE))
893				retval = ctl_lun_offline(cbe_lun);
894#endif
895		}
896
897		/*
898		 * In general, the above routines should not fail.  They
899		 * just set state for the LUN.  So we've got something
900		 * pretty wrong here if we can't start or stop the LUN.
901		 */
902		if (retval != 0) {
903			ctl_set_internal_failure(&io->scsiio,
904						 /*sks_valid*/ 1,
905						 /*retry_count*/ 0xf051);
906			retval = CTL_RETVAL_COMPLETE;
907		} else {
908			ctl_set_success(&io->scsiio);
909		}
910		ctl_config_write_done(io);
911		break;
912	}
913	case WRITE_SAME_10:
914	case WRITE_SAME_16:
915	case UNMAP:
916		ctl_set_success(&io->scsiio);
917		ctl_config_write_done(io);
918		break;
919	default:
920		ctl_set_invalid_opcode(&io->scsiio);
921		ctl_config_write_done(io);
922		retval = CTL_RETVAL_COMPLETE;
923		break;
924	}
925
926	return (retval);
927}
928
929static int
930ctl_backend_ramdisk_config_read(union ctl_io *io)
931{
932	int retval = 0;
933
934	switch (io->scsiio.cdb[0]) {
935	case SERVICE_ACTION_IN:
936		if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
937			/* We have nothing to tell, leave default data. */
938			ctl_config_read_done(io);
939			retval = CTL_RETVAL_COMPLETE;
940			break;
941		}
942		ctl_set_invalid_field(&io->scsiio,
943				      /*sks_valid*/ 1,
944				      /*command*/ 1,
945				      /*field*/ 1,
946				      /*bit_valid*/ 1,
947				      /*bit*/ 4);
948		ctl_config_read_done(io);
949		retval = CTL_RETVAL_COMPLETE;
950		break;
951	default:
952		ctl_set_invalid_opcode(&io->scsiio);
953		ctl_config_read_done(io);
954		retval = CTL_RETVAL_COMPLETE;
955		break;
956	}
957
958	return (retval);
959}
960