ctl_backend_ramdisk.c revision 288727
1/*-
2 * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
3 * Copyright (c) 2012 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Edward Tomasz Napierala
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions, and the following disclaimer,
14 *    without modification.
15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16 *    substantially similar to the "NO WARRANTY" disclaimer below
17 *    ("Disclaimer") and any redistribution must be conditioned upon
18 *    including a substantially similar Disclaimer requirement for further
19 *    binary redistribution.
20 *
21 * NO WARRANTY
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGES.
33 *
34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
35 */
36/*
37 * CAM Target Layer backend for a "fake" ramdisk.
38 *
39 * Author: Ken Merry <ken@FreeBSD.org>
40 */
41
42#include <sys/cdefs.h>
43__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_backend_ramdisk.c 288727 2015-10-05 08:51:20Z mav $");
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/kernel.h>
48#include <sys/condvar.h>
49#include <sys/types.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52#include <sys/malloc.h>
53#include <sys/taskqueue.h>
54#include <sys/time.h>
55#include <sys/queue.h>
56#include <sys/conf.h>
57#include <sys/ioccom.h>
58#include <sys/module.h>
59
60#include <cam/scsi/scsi_all.h>
61#include <cam/ctl/ctl_io.h>
62#include <cam/ctl/ctl.h>
63#include <cam/ctl/ctl_util.h>
64#include <cam/ctl/ctl_backend.h>
65#include <cam/ctl/ctl_debug.h>
66#include <cam/ctl/ctl_ioctl.h>
67#include <cam/ctl/ctl_error.h>
68
69typedef enum {
70	CTL_BE_RAMDISK_LUN_UNCONFIGURED	= 0x01,
71	CTL_BE_RAMDISK_LUN_CONFIG_ERR	= 0x02,
72	CTL_BE_RAMDISK_LUN_WAITING	= 0x04
73} ctl_be_ramdisk_lun_flags;
74
75struct ctl_be_ramdisk_lun {
76	char lunname[32];
77	uint64_t size_bytes;
78	uint64_t size_blocks;
79	struct ctl_be_ramdisk_softc *softc;
80	ctl_be_ramdisk_lun_flags flags;
81	STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
82	struct ctl_be_lun cbe_lun;
83	struct taskqueue *io_taskqueue;
84	struct task io_task;
85	STAILQ_HEAD(, ctl_io_hdr) cont_queue;
86	struct mtx_padalign queue_lock;
87};
88
89struct ctl_be_ramdisk_softc {
90	struct mtx lock;
91	int rd_size;
92#ifdef CTL_RAMDISK_PAGES
93	uint8_t **ramdisk_pages;
94	int num_pages;
95#else
96	uint8_t *ramdisk_buffer;
97#endif
98	int num_luns;
99	STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
100};
101
102static struct ctl_be_ramdisk_softc rd_softc;
103
104int ctl_backend_ramdisk_init(void);
105void ctl_backend_ramdisk_shutdown(void);
106static int ctl_backend_ramdisk_move_done(union ctl_io *io);
107static int ctl_backend_ramdisk_submit(union ctl_io *io);
108static void ctl_backend_ramdisk_continue(union ctl_io *io);
109static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
110				     caddr_t addr, int flag, struct thread *td);
111static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
112				  struct ctl_lun_req *req);
113static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
114				      struct ctl_lun_req *req);
115static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
116				  struct ctl_lun_req *req);
117static void ctl_backend_ramdisk_worker(void *context, int pending);
118static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
119static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
120						  ctl_lun_config_status status);
121static int ctl_backend_ramdisk_config_write(union ctl_io *io);
122static int ctl_backend_ramdisk_config_read(union ctl_io *io);
123
124static struct ctl_backend_driver ctl_be_ramdisk_driver =
125{
126	.name = "ramdisk",
127	.flags = CTL_BE_FLAG_HAS_CONFIG,
128	.init = ctl_backend_ramdisk_init,
129	.data_submit = ctl_backend_ramdisk_submit,
130	.data_move_done = ctl_backend_ramdisk_move_done,
131	.config_read = ctl_backend_ramdisk_config_read,
132	.config_write = ctl_backend_ramdisk_config_write,
133	.ioctl = ctl_backend_ramdisk_ioctl
134};
135
136MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
137CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
138
139int
140ctl_backend_ramdisk_init(void)
141{
142	struct ctl_be_ramdisk_softc *softc;
143#ifdef CTL_RAMDISK_PAGES
144	int i;
145#endif
146
147
148	softc = &rd_softc;
149
150	memset(softc, 0, sizeof(*softc));
151
152	mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
153
154	STAILQ_INIT(&softc->lun_list);
155	softc->rd_size = 1024 * 1024;
156#ifdef CTL_RAMDISK_PAGES
157	softc->num_pages = softc->rd_size / PAGE_SIZE;
158	softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) *
159						  softc->num_pages, M_RAMDISK,
160						  M_WAITOK);
161	for (i = 0; i < softc->num_pages; i++)
162		softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK);
163#else
164	softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK,
165						  M_WAITOK);
166#endif
167
168	return (0);
169}
170
171void
172ctl_backend_ramdisk_shutdown(void)
173{
174	struct ctl_be_ramdisk_softc *softc;
175	struct ctl_be_ramdisk_lun *lun, *next_lun;
176#ifdef CTL_RAMDISK_PAGES
177	int i;
178#endif
179
180	softc = &rd_softc;
181
182	mtx_lock(&softc->lock);
183	for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
184		/*
185		 * Grab the next LUN.  The current LUN may get removed by
186		 * ctl_invalidate_lun(), which will call our LUN shutdown
187		 * routine, if there is no outstanding I/O for this LUN.
188		 */
189		next_lun = STAILQ_NEXT(lun, links);
190
191		/*
192		 * Drop our lock here.  Since ctl_invalidate_lun() can call
193		 * back into us, this could potentially lead to a recursive
194		 * lock of the same mutex, which would cause a hang.
195		 */
196		mtx_unlock(&softc->lock);
197		ctl_disable_lun(&lun->cbe_lun);
198		ctl_invalidate_lun(&lun->cbe_lun);
199		mtx_lock(&softc->lock);
200	}
201	mtx_unlock(&softc->lock);
202
203#ifdef CTL_RAMDISK_PAGES
204	for (i = 0; i < softc->num_pages; i++)
205		free(softc->ramdisk_pages[i], M_RAMDISK);
206
207	free(softc->ramdisk_pages, M_RAMDISK);
208#else
209	free(softc->ramdisk_buffer, M_RAMDISK);
210#endif
211
212	if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) {
213		printf("ctl_backend_ramdisk_shutdown: "
214		       "ctl_backend_deregister() failed!\n");
215	}
216}
217
218static int
219ctl_backend_ramdisk_move_done(union ctl_io *io)
220{
221	struct ctl_be_lun *cbe_lun;
222	struct ctl_be_ramdisk_lun *be_lun;
223#ifdef CTL_TIME_IO
224	struct bintime cur_bt;
225#endif
226
227	CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
228	cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
229		CTL_PRIV_BACKEND_LUN].ptr;
230	be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun->be_lun;
231#ifdef CTL_TIME_IO
232	getbintime(&cur_bt);
233	bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
234	bintime_add(&io->io_hdr.dma_bt, &cur_bt);
235	io->io_hdr.num_dmas++;
236#endif
237	if (io->scsiio.kern_sg_entries > 0)
238		free(io->scsiio.kern_data_ptr, M_RAMDISK);
239	io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
240	if (io->io_hdr.flags & CTL_FLAG_ABORT) {
241		;
242	} else if ((io->io_hdr.port_status == 0) &&
243	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
244		if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) {
245			mtx_lock(&be_lun->queue_lock);
246			STAILQ_INSERT_TAIL(&be_lun->cont_queue,
247			    &io->io_hdr, links);
248			mtx_unlock(&be_lun->queue_lock);
249			taskqueue_enqueue(be_lun->io_taskqueue,
250			    &be_lun->io_task);
251			return (0);
252		}
253		ctl_set_success(&io->scsiio);
254	} else if ((io->io_hdr.port_status != 0) &&
255	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
256	     (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
257		/*
258		 * For hardware error sense keys, the sense key
259		 * specific value is defined to be a retry count,
260		 * but we use it to pass back an internal FETD
261		 * error code.  XXX KDM  Hopefully the FETD is only
262		 * using 16 bits for an error code, since that's
263		 * all the space we have in the sks field.
264		 */
265		ctl_set_internal_failure(&io->scsiio,
266					 /*sks_valid*/ 1,
267					 /*retry_count*/
268					 io->io_hdr.port_status);
269	}
270	ctl_data_submit_done(io);
271	return(0);
272}
273
274static int
275ctl_backend_ramdisk_submit(union ctl_io *io)
276{
277	struct ctl_be_lun *cbe_lun;
278	struct ctl_lba_len_flags *lbalen;
279
280	cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
281		CTL_PRIV_BACKEND_LUN].ptr;
282	lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
283	if (lbalen->flags & CTL_LLF_VERIFY) {
284		ctl_set_success(&io->scsiio);
285		ctl_data_submit_done(io);
286		return (CTL_RETVAL_COMPLETE);
287	}
288	io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer =
289	    lbalen->len * cbe_lun->blocksize;
290	ctl_backend_ramdisk_continue(io);
291	return (CTL_RETVAL_COMPLETE);
292}
293
294static void
295ctl_backend_ramdisk_continue(union ctl_io *io)
296{
297	struct ctl_be_ramdisk_softc *softc;
298	int len, len_filled, sg_filled;
299#ifdef CTL_RAMDISK_PAGES
300	struct ctl_sg_entry *sg_entries;
301	int i;
302#endif
303
304	softc = &rd_softc;
305	len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer;
306#ifdef CTL_RAMDISK_PAGES
307	sg_filled = min(btoc(len), softc->num_pages);
308	if (sg_filled > 1) {
309		io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
310						  sg_filled, M_RAMDISK,
311						  M_WAITOK);
312		sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
313		for (i = 0, len_filled = 0; i < sg_filled; i++) {
314			sg_entries[i].addr = softc->ramdisk_pages[i];
315			sg_entries[i].len = MIN(PAGE_SIZE, len - len_filled);
316			len_filled += sg_entries[i].len;
317		}
318		io->io_hdr.flags |= CTL_FLAG_KDPTR_SGLIST;
319	} else {
320		sg_filled = 0;
321		len_filled = len;
322		io->scsiio.kern_data_ptr = softc->ramdisk_pages[0];
323	}
324#else
325	sg_filled = 0;
326	len_filled = min(len, softc->rd_size);
327	io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
328#endif /* CTL_RAMDISK_PAGES */
329
330	io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
331	io->scsiio.kern_data_resid = 0;
332	io->scsiio.kern_data_len = len_filled;
333	io->scsiio.kern_sg_entries = sg_filled;
334	io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
335	io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled;
336#ifdef CTL_TIME_IO
337	getbintime(&io->io_hdr.dma_start_bt);
338#endif
339	ctl_datamove(io);
340}
341
342static void
343ctl_backend_ramdisk_worker(void *context, int pending)
344{
345	struct ctl_be_ramdisk_softc *softc;
346	struct ctl_be_ramdisk_lun *be_lun;
347	union ctl_io *io;
348
349	be_lun = (struct ctl_be_ramdisk_lun *)context;
350	softc = be_lun->softc;
351
352	mtx_lock(&be_lun->queue_lock);
353	for (;;) {
354		io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
355		if (io != NULL) {
356			STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
357				      ctl_io_hdr, links);
358
359			mtx_unlock(&be_lun->queue_lock);
360
361			ctl_backend_ramdisk_continue(io);
362
363			mtx_lock(&be_lun->queue_lock);
364			continue;
365		}
366
367		/*
368		 * If we get here, there is no work left in the queues, so
369		 * just break out and let the task queue go to sleep.
370		 */
371		break;
372	}
373	mtx_unlock(&be_lun->queue_lock);
374}
375
376static int
377ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
378			  int flag, struct thread *td)
379{
380	struct ctl_be_ramdisk_softc *softc;
381	int retval;
382
383	retval = 0;
384	softc = &rd_softc;
385
386	switch (cmd) {
387	case CTL_LUN_REQ: {
388		struct ctl_lun_req *lun_req;
389
390		lun_req = (struct ctl_lun_req *)addr;
391
392		switch (lun_req->reqtype) {
393		case CTL_LUNREQ_CREATE:
394			retval = ctl_backend_ramdisk_create(softc, lun_req);
395			break;
396		case CTL_LUNREQ_RM:
397			retval = ctl_backend_ramdisk_rm(softc, lun_req);
398			break;
399		case CTL_LUNREQ_MODIFY:
400			retval = ctl_backend_ramdisk_modify(softc, lun_req);
401			break;
402		default:
403			lun_req->status = CTL_LUN_ERROR;
404			snprintf(lun_req->error_str, sizeof(lun_req->error_str),
405				 "%s: invalid LUN request type %d", __func__,
406				 lun_req->reqtype);
407			break;
408		}
409		break;
410	}
411	default:
412		retval = ENOTTY;
413		break;
414	}
415
416	return (retval);
417}
418
419static int
420ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
421		       struct ctl_lun_req *req)
422{
423	struct ctl_be_ramdisk_lun *be_lun;
424	struct ctl_lun_rm_params *params;
425	int retval;
426
427
428	retval = 0;
429	params = &req->reqdata.rm;
430
431	be_lun = NULL;
432
433	mtx_lock(&softc->lock);
434
435	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
436		if (be_lun->cbe_lun.lun_id == params->lun_id)
437			break;
438	}
439	mtx_unlock(&softc->lock);
440
441	if (be_lun == NULL) {
442		snprintf(req->error_str, sizeof(req->error_str),
443			 "%s: LUN %u is not managed by the ramdisk backend",
444			 __func__, params->lun_id);
445		goto bailout_error;
446	}
447
448	retval = ctl_disable_lun(&be_lun->cbe_lun);
449
450	if (retval != 0) {
451		snprintf(req->error_str, sizeof(req->error_str),
452			 "%s: error %d returned from ctl_disable_lun() for "
453			 "LUN %d", __func__, retval, params->lun_id);
454		goto bailout_error;
455	}
456
457	/*
458	 * Set the waiting flag before we invalidate the LUN.  Our shutdown
459	 * routine can be called any time after we invalidate the LUN,
460	 * and can be called from our context.
461	 *
462	 * This tells the shutdown routine that we're waiting, or we're
463	 * going to wait for the shutdown to happen.
464	 */
465	mtx_lock(&softc->lock);
466	be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
467	mtx_unlock(&softc->lock);
468
469	retval = ctl_invalidate_lun(&be_lun->cbe_lun);
470	if (retval != 0) {
471		snprintf(req->error_str, sizeof(req->error_str),
472			 "%s: error %d returned from ctl_invalidate_lun() for "
473			 "LUN %d", __func__, retval, params->lun_id);
474		mtx_lock(&softc->lock);
475		be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
476		mtx_unlock(&softc->lock);
477		goto bailout_error;
478	}
479
480	mtx_lock(&softc->lock);
481
482	while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
483		retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
484 		if (retval == EINTR)
485			break;
486	}
487	be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
488
489	/*
490	 * We only remove this LUN from the list and free it (below) if
491	 * retval == 0.  If the user interrupted the wait, we just bail out
492	 * without actually freeing the LUN.  We let the shutdown routine
493	 * free the LUN if that happens.
494	 */
495	if (retval == 0) {
496		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
497			      links);
498		softc->num_luns--;
499	}
500
501	mtx_unlock(&softc->lock);
502
503	if (retval == 0) {
504		taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
505		taskqueue_free(be_lun->io_taskqueue);
506		ctl_free_opts(&be_lun->cbe_lun.options);
507		mtx_destroy(&be_lun->queue_lock);
508		free(be_lun, M_RAMDISK);
509	}
510
511	req->status = CTL_LUN_OK;
512
513	return (retval);
514
515bailout_error:
516	req->status = CTL_LUN_ERROR;
517
518	return (0);
519}
520
521static int
522ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
523			   struct ctl_lun_req *req)
524{
525	struct ctl_be_ramdisk_lun *be_lun;
526	struct ctl_be_lun *cbe_lun;
527	struct ctl_lun_create_params *params;
528	char *value;
529	char tmpstr[32];
530	int retval;
531
532	retval = 0;
533	params = &req->reqdata.create;
534
535	be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK);
536	cbe_lun = &be_lun->cbe_lun;
537	cbe_lun->be_lun = be_lun;
538	be_lun->softc = softc;
539	sprintf(be_lun->lunname, "cram%d", softc->num_luns);
540	ctl_init_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
541
542	if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
543		cbe_lun->lun_type = params->device_type;
544	else
545		cbe_lun->lun_type = T_DIRECT;
546	be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
547	cbe_lun->flags = CTL_LUN_FLAG_PRIMARY;
548
549	if (cbe_lun->lun_type == T_DIRECT) {
550		if (params->blocksize_bytes != 0)
551			cbe_lun->blocksize = params->blocksize_bytes;
552		else
553			cbe_lun->blocksize = 512;
554		if (params->lun_size_bytes < cbe_lun->blocksize) {
555			snprintf(req->error_str, sizeof(req->error_str),
556				 "%s: LUN size %ju < blocksize %u", __func__,
557				 params->lun_size_bytes, cbe_lun->blocksize);
558			goto bailout_error;
559		}
560		be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize;
561		be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize;
562		cbe_lun->maxlba = be_lun->size_blocks - 1;
563		cbe_lun->atomicblock = UINT32_MAX;
564		cbe_lun->opttxferlen = softc->rd_size / cbe_lun->blocksize;
565	}
566
567	/* Tell the user the blocksize we ended up using */
568	params->blocksize_bytes = cbe_lun->blocksize;
569	params->lun_size_bytes = be_lun->size_bytes;
570
571	value = ctl_get_opt(&cbe_lun->options, "unmap");
572	if (value != NULL && strcmp(value, "on") == 0)
573		cbe_lun->flags |= CTL_LUN_FLAG_UNMAP;
574	value = ctl_get_opt(&cbe_lun->options, "readonly");
575	if (value != NULL && strcmp(value, "on") == 0)
576		cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
577	cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
578	value = ctl_get_opt(&cbe_lun->options, "serseq");
579	if (value != NULL && strcmp(value, "on") == 0)
580		cbe_lun->serseq = CTL_LUN_SERSEQ_ON;
581	else if (value != NULL && strcmp(value, "read") == 0)
582		cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
583	else if (value != NULL && strcmp(value, "off") == 0)
584		cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
585
586	if (params->flags & CTL_LUN_FLAG_ID_REQ) {
587		cbe_lun->req_lun_id = params->req_lun_id;
588		cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ;
589	} else
590		cbe_lun->req_lun_id = 0;
591
592	cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
593	cbe_lun->lun_config_status = ctl_backend_ramdisk_lun_config_status;
594	cbe_lun->be = &ctl_be_ramdisk_driver;
595	if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
596		snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
597			 softc->num_luns);
598		strncpy((char *)cbe_lun->serial_num, tmpstr,
599			MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr)));
600
601		/* Tell the user what we used for a serial number */
602		strncpy((char *)params->serial_num, tmpstr,
603			MIN(sizeof(params->serial_num), sizeof(tmpstr)));
604	} else {
605		strncpy((char *)cbe_lun->serial_num, params->serial_num,
606			MIN(sizeof(cbe_lun->serial_num),
607			    sizeof(params->serial_num)));
608	}
609	if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
610		snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
611		strncpy((char *)cbe_lun->device_id, tmpstr,
612			MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr)));
613
614		/* Tell the user what we used for a device ID */
615		strncpy((char *)params->device_id, tmpstr,
616			MIN(sizeof(params->device_id), sizeof(tmpstr)));
617	} else {
618		strncpy((char *)cbe_lun->device_id, params->device_id,
619			MIN(sizeof(cbe_lun->device_id),
620			    sizeof(params->device_id)));
621	}
622
623	STAILQ_INIT(&be_lun->cont_queue);
624	mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF);
625	TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
626	    be_lun);
627
628	be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
629	    taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
630	if (be_lun->io_taskqueue == NULL) {
631		snprintf(req->error_str, sizeof(req->error_str),
632			 "%s: Unable to create taskqueue", __func__);
633		goto bailout_error;
634	}
635
636	retval = taskqueue_start_threads(&be_lun->io_taskqueue,
637					 /*num threads*/1,
638					 /*priority*/PWAIT,
639					 /*thread name*/
640					 "%s taskq", be_lun->lunname);
641	if (retval != 0)
642		goto bailout_error;
643
644	mtx_lock(&softc->lock);
645	softc->num_luns++;
646	STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
647
648	mtx_unlock(&softc->lock);
649
650	retval = ctl_add_lun(&be_lun->cbe_lun);
651	if (retval != 0) {
652		mtx_lock(&softc->lock);
653		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
654			      links);
655		softc->num_luns--;
656		mtx_unlock(&softc->lock);
657		snprintf(req->error_str, sizeof(req->error_str),
658			 "%s: ctl_add_lun() returned error %d, see dmesg for "
659			"details", __func__, retval);
660		retval = 0;
661		goto bailout_error;
662	}
663
664	mtx_lock(&softc->lock);
665
666	/*
667	 * Tell the config_status routine that we're waiting so it won't
668	 * clean up the LUN in the event of an error.
669	 */
670	be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
671
672	while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
673		retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
674		if (retval == EINTR)
675			break;
676	}
677	be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
678
679	if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) {
680		snprintf(req->error_str, sizeof(req->error_str),
681			 "%s: LUN configuration error, see dmesg for details",
682			 __func__);
683		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
684			      links);
685		softc->num_luns--;
686		mtx_unlock(&softc->lock);
687		goto bailout_error;
688	} else {
689		params->req_lun_id = cbe_lun->lun_id;
690	}
691	mtx_unlock(&softc->lock);
692
693	req->status = CTL_LUN_OK;
694
695	return (retval);
696
697bailout_error:
698	req->status = CTL_LUN_ERROR;
699	if (be_lun != NULL) {
700		if (be_lun->io_taskqueue != NULL) {
701			taskqueue_free(be_lun->io_taskqueue);
702		}
703		ctl_free_opts(&cbe_lun->options);
704		mtx_destroy(&be_lun->queue_lock);
705		free(be_lun, M_RAMDISK);
706	}
707
708	return (retval);
709}
710
711static int
712ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
713		       struct ctl_lun_req *req)
714{
715	struct ctl_be_ramdisk_lun *be_lun;
716	struct ctl_lun_modify_params *params;
717	uint32_t blocksize;
718
719	params = &req->reqdata.modify;
720
721	be_lun = NULL;
722
723	mtx_lock(&softc->lock);
724	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
725		if (be_lun->cbe_lun.lun_id == params->lun_id)
726			break;
727	}
728	mtx_unlock(&softc->lock);
729
730	if (be_lun == NULL) {
731		snprintf(req->error_str, sizeof(req->error_str),
732			 "%s: LUN %u is not managed by the ramdisk backend",
733			 __func__, params->lun_id);
734		goto bailout_error;
735	}
736
737	if (params->lun_size_bytes == 0) {
738		snprintf(req->error_str, sizeof(req->error_str),
739			"%s: LUN size \"auto\" not supported "
740			"by the ramdisk backend", __func__);
741		goto bailout_error;
742	}
743
744	blocksize = be_lun->cbe_lun.blocksize;
745
746	if (params->lun_size_bytes < blocksize) {
747		snprintf(req->error_str, sizeof(req->error_str),
748			"%s: LUN size %ju < blocksize %u", __func__,
749			params->lun_size_bytes, blocksize);
750		goto bailout_error;
751	}
752
753	be_lun->size_blocks = params->lun_size_bytes / blocksize;
754	be_lun->size_bytes = be_lun->size_blocks * blocksize;
755
756	/*
757	 * The maximum LBA is the size - 1.
758	 *
759	 * XXX: Note that this field is being updated without locking,
760	 * 	which might cause problems on 32-bit architectures.
761	 */
762	be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1;
763	ctl_lun_capacity_changed(&be_lun->cbe_lun);
764
765	/* Tell the user the exact size we ended up using */
766	params->lun_size_bytes = be_lun->size_bytes;
767
768	req->status = CTL_LUN_OK;
769
770	return (0);
771
772bailout_error:
773	req->status = CTL_LUN_ERROR;
774
775	return (0);
776}
777
778static void
779ctl_backend_ramdisk_lun_shutdown(void *be_lun)
780{
781	struct ctl_be_ramdisk_lun *lun;
782	struct ctl_be_ramdisk_softc *softc;
783	int do_free;
784
785	lun = (struct ctl_be_ramdisk_lun *)be_lun;
786	softc = lun->softc;
787	do_free = 0;
788
789	mtx_lock(&softc->lock);
790
791	lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
792
793	if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
794		wakeup(lun);
795	} else {
796		STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
797			      links);
798		softc->num_luns--;
799		do_free = 1;
800	}
801
802	mtx_unlock(&softc->lock);
803
804	if (do_free != 0)
805		free(be_lun, M_RAMDISK);
806}
807
808static void
809ctl_backend_ramdisk_lun_config_status(void *be_lun,
810				      ctl_lun_config_status status)
811{
812	struct ctl_be_ramdisk_lun *lun;
813	struct ctl_be_ramdisk_softc *softc;
814
815	lun = (struct ctl_be_ramdisk_lun *)be_lun;
816	softc = lun->softc;
817
818	if (status == CTL_LUN_CONFIG_OK) {
819		mtx_lock(&softc->lock);
820		lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
821		if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
822			wakeup(lun);
823		mtx_unlock(&softc->lock);
824
825		/*
826		 * We successfully added the LUN, attempt to enable it.
827		 */
828		if (ctl_enable_lun(&lun->cbe_lun) != 0) {
829			printf("%s: ctl_enable_lun() failed!\n", __func__);
830			if (ctl_invalidate_lun(&lun->cbe_lun) != 0) {
831				printf("%s: ctl_invalidate_lun() failed!\n",
832				       __func__);
833			}
834		}
835
836		return;
837	}
838
839
840	mtx_lock(&softc->lock);
841	lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
842
843	/*
844	 * If we have a user waiting, let him handle the cleanup.  If not,
845	 * clean things up here.
846	 */
847	if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
848		lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR;
849		wakeup(lun);
850	} else {
851		STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
852			      links);
853		softc->num_luns--;
854		free(lun, M_RAMDISK);
855	}
856	mtx_unlock(&softc->lock);
857}
858
859static int
860ctl_backend_ramdisk_config_write(union ctl_io *io)
861{
862	struct ctl_be_ramdisk_softc *softc;
863	int retval;
864
865	retval = 0;
866	softc = &rd_softc;
867
868	switch (io->scsiio.cdb[0]) {
869	case SYNCHRONIZE_CACHE:
870	case SYNCHRONIZE_CACHE_16:
871		/*
872		 * The upper level CTL code will filter out any CDBs with
873		 * the immediate bit set and return the proper error.  It
874		 * will also not allow a sync cache command to go to a LUN
875		 * that is powered down.
876		 *
877		 * We don't really need to worry about what LBA range the
878		 * user asked to be synced out.  When they issue a sync
879		 * cache command, we'll sync out the whole thing.
880		 *
881		 * This is obviously just a stubbed out implementation.
882		 * The real implementation will be in the RAIDCore/CTL
883		 * interface, and can only really happen when RAIDCore
884		 * implements a per-array cache sync.
885		 */
886		ctl_set_success(&io->scsiio);
887		ctl_config_write_done(io);
888		break;
889	case START_STOP_UNIT: {
890		struct scsi_start_stop_unit *cdb;
891		struct ctl_be_lun *cbe_lun;
892		struct ctl_be_ramdisk_lun *be_lun;
893
894		cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
895
896		cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
897			CTL_PRIV_BACKEND_LUN].ptr;
898		be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun->be_lun;
899
900		if (cdb->how & SSS_START)
901			retval = ctl_start_lun(cbe_lun);
902		else {
903			retval = ctl_stop_lun(cbe_lun);
904#ifdef NEEDTOPORT
905			if ((retval == 0)
906			 && (cdb->byte2 & SSS_ONOFFLINE))
907				retval = ctl_lun_offline(cbe_lun);
908#endif
909		}
910
911		/*
912		 * In general, the above routines should not fail.  They
913		 * just set state for the LUN.  So we've got something
914		 * pretty wrong here if we can't start or stop the LUN.
915		 */
916		if (retval != 0) {
917			ctl_set_internal_failure(&io->scsiio,
918						 /*sks_valid*/ 1,
919						 /*retry_count*/ 0xf051);
920			retval = CTL_RETVAL_COMPLETE;
921		} else {
922			ctl_set_success(&io->scsiio);
923		}
924		ctl_config_write_done(io);
925		break;
926	}
927	case WRITE_SAME_10:
928	case WRITE_SAME_16:
929	case UNMAP:
930		ctl_set_success(&io->scsiio);
931		ctl_config_write_done(io);
932		break;
933	default:
934		ctl_set_invalid_opcode(&io->scsiio);
935		ctl_config_write_done(io);
936		retval = CTL_RETVAL_COMPLETE;
937		break;
938	}
939
940	return (retval);
941}
942
943static int
944ctl_backend_ramdisk_config_read(union ctl_io *io)
945{
946	int retval = 0;
947
948	switch (io->scsiio.cdb[0]) {
949	case SERVICE_ACTION_IN:
950		if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
951			/* We have nothing to tell, leave default data. */
952			ctl_config_read_done(io);
953			retval = CTL_RETVAL_COMPLETE;
954			break;
955		}
956		ctl_set_invalid_field(&io->scsiio,
957				      /*sks_valid*/ 1,
958				      /*command*/ 1,
959				      /*field*/ 1,
960				      /*bit_valid*/ 1,
961				      /*bit*/ 4);
962		ctl_config_read_done(io);
963		retval = CTL_RETVAL_COMPLETE;
964		break;
965	default:
966		ctl_set_invalid_opcode(&io->scsiio);
967		ctl_config_read_done(io);
968		retval = CTL_RETVAL_COMPLETE;
969		break;
970	}
971
972	return (retval);
973}
974