ctl_backend_ramdisk.c revision 265634
1/*-
2 * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
3 * Copyright (c) 2012 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Edward Tomasz Napierala
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions, and the following disclaimer,
14 *    without modification.
15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16 *    substantially similar to the "NO WARRANTY" disclaimer below
17 *    ("Disclaimer") and any redistribution must be conditioned upon
18 *    including a substantially similar Disclaimer requirement for further
19 *    binary redistribution.
20 *
21 * NO WARRANTY
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGES.
33 *
34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
35 */
36/*
37 * CAM Target Layer backend for a "fake" ramdisk.
38 *
39 * Author: Ken Merry <ken@FreeBSD.org>
40 */
41
42#include <sys/cdefs.h>
43__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_backend_ramdisk.c 265634 2014-05-08 07:00:45Z mav $");
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/kernel.h>
48#include <sys/condvar.h>
49#include <sys/types.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52#include <sys/malloc.h>
53#include <sys/time.h>
54#include <sys/queue.h>
55#include <sys/conf.h>
56#include <sys/ioccom.h>
57#include <sys/module.h>
58
59#include <cam/scsi/scsi_all.h>
60#include <cam/ctl/ctl_io.h>
61#include <cam/ctl/ctl.h>
62#include <cam/ctl/ctl_util.h>
63#include <cam/ctl/ctl_backend.h>
64#include <cam/ctl/ctl_frontend_internal.h>
65#include <cam/ctl/ctl_debug.h>
66#include <cam/ctl/ctl_ioctl.h>
67#include <cam/ctl/ctl_error.h>
68
69typedef enum {
70	CTL_BE_RAMDISK_LUN_UNCONFIGURED	= 0x01,
71	CTL_BE_RAMDISK_LUN_CONFIG_ERR	= 0x02,
72	CTL_BE_RAMDISK_LUN_WAITING	= 0x04
73} ctl_be_ramdisk_lun_flags;
74
75struct ctl_be_ramdisk_lun {
76	uint64_t size_bytes;
77	uint64_t size_blocks;
78	struct ctl_be_ramdisk_softc *softc;
79	ctl_be_ramdisk_lun_flags flags;
80	STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
81	struct ctl_be_lun ctl_be_lun;
82};
83
84struct ctl_be_ramdisk_softc {
85	struct mtx lock;
86	int rd_size;
87#ifdef CTL_RAMDISK_PAGES
88	uint8_t **ramdisk_pages;
89	int num_pages;
90#else
91	uint8_t *ramdisk_buffer;
92#endif
93	int num_luns;
94	STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
95};
96
97static struct ctl_be_ramdisk_softc rd_softc;
98
99int ctl_backend_ramdisk_init(void);
100void ctl_backend_ramdisk_shutdown(void);
101static int ctl_backend_ramdisk_move_done(union ctl_io *io);
102static int ctl_backend_ramdisk_submit(union ctl_io *io);
103static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
104				     caddr_t addr, int flag, struct thread *td);
105static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
106				  struct ctl_lun_req *req);
107static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
108				      struct ctl_lun_req *req, int do_wait);
109static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
110				  struct ctl_lun_req *req);
111static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
112static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
113						  ctl_lun_config_status status);
114static int ctl_backend_ramdisk_config_write(union ctl_io *io);
115static int ctl_backend_ramdisk_config_read(union ctl_io *io);
116
117static struct ctl_backend_driver ctl_be_ramdisk_driver =
118{
119	.name = "ramdisk",
120	.flags = CTL_BE_FLAG_HAS_CONFIG,
121	.init = ctl_backend_ramdisk_init,
122	.data_submit = ctl_backend_ramdisk_submit,
123	.data_move_done = ctl_backend_ramdisk_move_done,
124	.config_read = ctl_backend_ramdisk_config_read,
125	.config_write = ctl_backend_ramdisk_config_write,
126	.ioctl = ctl_backend_ramdisk_ioctl
127};
128
129MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
130CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
131
132int
133ctl_backend_ramdisk_init(void)
134{
135	struct ctl_be_ramdisk_softc *softc;
136#ifdef CTL_RAMDISK_PAGES
137	int i;
138#endif
139
140
141	softc = &rd_softc;
142
143	memset(softc, 0, sizeof(*softc));
144
145	mtx_init(&softc->lock, "ramdisk", NULL, MTX_DEF);
146
147	STAILQ_INIT(&softc->lun_list);
148	softc->rd_size = 4 * 1024 * 1024;
149#ifdef CTL_RAMDISK_PAGES
150	softc->num_pages = softc->rd_size / PAGE_SIZE;
151	softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) *
152						  softc->num_pages, M_RAMDISK,
153						  M_WAITOK);
154	for (i = 0; i < softc->num_pages; i++)
155		softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK);
156#else
157	softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK,
158						  M_WAITOK);
159#endif
160
161	return (0);
162}
163
164void
165ctl_backend_ramdisk_shutdown(void)
166{
167	struct ctl_be_ramdisk_softc *softc;
168	struct ctl_be_ramdisk_lun *lun, *next_lun;
169#ifdef CTL_RAMDISK_PAGES
170	int i;
171#endif
172
173	softc = &rd_softc;
174
175	mtx_lock(&softc->lock);
176	for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
177		/*
178		 * Grab the next LUN.  The current LUN may get removed by
179		 * ctl_invalidate_lun(), which will call our LUN shutdown
180		 * routine, if there is no outstanding I/O for this LUN.
181		 */
182		next_lun = STAILQ_NEXT(lun, links);
183
184		/*
185		 * Drop our lock here.  Since ctl_invalidate_lun() can call
186		 * back into us, this could potentially lead to a recursive
187		 * lock of the same mutex, which would cause a hang.
188		 */
189		mtx_unlock(&softc->lock);
190		ctl_disable_lun(&lun->ctl_be_lun);
191		ctl_invalidate_lun(&lun->ctl_be_lun);
192		mtx_lock(&softc->lock);
193	}
194	mtx_unlock(&softc->lock);
195
196#ifdef CTL_RAMDISK_PAGES
197	for (i = 0; i < softc->num_pages; i++)
198		free(softc->ramdisk_pages[i], M_RAMDISK);
199
200	free(softc->ramdisk_pages, M_RAMDISK);
201#else
202	free(softc->ramdisk_buffer, M_RAMDISK);
203#endif
204
205	if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) {
206		printf("ctl_backend_ramdisk_shutdown: "
207		       "ctl_backend_deregister() failed!\n");
208	}
209}
210
211static int
212ctl_backend_ramdisk_move_done(union ctl_io *io)
213{
214#ifdef CTL_TIME_IO
215	struct bintime cur_bt;
216#endif
217
218	CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
219	if ((io->io_hdr.port_status == 0)
220	 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
221	 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
222		io->io_hdr.status = CTL_SUCCESS;
223	else if ((io->io_hdr.port_status != 0)
224	      && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
225	      && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){
226		/*
227		 * For hardware error sense keys, the sense key
228		 * specific value is defined to be a retry count,
229		 * but we use it to pass back an internal FETD
230		 * error code.  XXX KDM  Hopefully the FETD is only
231		 * using 16 bits for an error code, since that's
232		 * all the space we have in the sks field.
233		 */
234		ctl_set_internal_failure(&io->scsiio,
235					 /*sks_valid*/ 1,
236					 /*retry_count*/
237					 io->io_hdr.port_status);
238	}
239#ifdef CTL_TIME_IO
240	getbintime(&cur_bt);
241	bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
242	bintime_add(&io->io_hdr.dma_bt, &cur_bt);
243	io->io_hdr.num_dmas++;
244#endif
245
246	if (io->scsiio.kern_sg_entries > 0)
247		free(io->scsiio.kern_data_ptr, M_RAMDISK);
248	ctl_done(io);
249	return(0);
250}
251
252static int
253ctl_backend_ramdisk_submit(union ctl_io *io)
254{
255	struct ctl_lba_len lbalen;
256#ifdef CTL_RAMDISK_PAGES
257	struct ctl_sg_entry *sg_entries;
258	int len_filled;
259	int i;
260#endif
261	int num_sg_entries, len;
262	struct ctl_be_ramdisk_softc *softc;
263	struct ctl_be_lun *ctl_be_lun;
264	struct ctl_be_ramdisk_lun *be_lun;
265
266	softc = &rd_softc;
267
268	ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
269		CTL_PRIV_BACKEND_LUN].ptr;
270	be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
271
272	memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
273	       sizeof(lbalen));
274
275	len = lbalen.len * ctl_be_lun->blocksize;
276
277	/*
278	 * Kick out the request if it's bigger than we can handle.
279	 */
280	if (len > softc->rd_size) {
281		ctl_set_internal_failure(&io->scsiio,
282					 /*sks_valid*/ 0,
283					 /*retry_count*/ 0);
284		ctl_done(io);
285		return (CTL_RETVAL_COMPLETE);
286	}
287
288	/*
289	 * Kick out the request if it's larger than the device size that
290	 * the user requested.
291	 */
292	if (((lbalen.lba * ctl_be_lun->blocksize) + len) > be_lun->size_bytes) {
293		ctl_set_lba_out_of_range(&io->scsiio);
294		ctl_done(io);
295		return (CTL_RETVAL_COMPLETE);
296	}
297
298#ifdef CTL_RAMDISK_PAGES
299	num_sg_entries = len >> PAGE_SHIFT;
300	if ((len & (PAGE_SIZE - 1)) != 0)
301		num_sg_entries++;
302
303	if (num_sg_entries > 1) {
304		io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
305						  num_sg_entries, M_RAMDISK,
306						  M_WAITOK);
307		sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
308		for (i = 0, len_filled = 0; i < num_sg_entries;
309		     i++, len_filled += PAGE_SIZE) {
310			sg_entries[i].addr = softc->ramdisk_pages[i];
311			sg_entries[i].len = ctl_min(PAGE_SIZE,
312						    len - len_filled);
313		}
314	} else {
315#endif /* CTL_RAMDISK_PAGES */
316		/*
317		 * If this is less than 1 page, don't bother allocating a
318		 * scatter/gather list for it.  This saves time/overhead.
319		 */
320		num_sg_entries = 0;
321#ifdef CTL_RAMDISK_PAGES
322		io->scsiio.kern_data_ptr = softc->ramdisk_pages[0];
323#else
324		io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
325#endif
326#ifdef CTL_RAMDISK_PAGES
327	}
328#endif
329
330	io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
331	io->scsiio.kern_data_len = len;
332	io->scsiio.kern_total_len = len;
333	io->scsiio.kern_rel_offset = 0;
334	io->scsiio.kern_data_resid = 0;
335	io->scsiio.kern_sg_entries = num_sg_entries;
336	io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
337#ifdef CTL_TIME_IO
338	getbintime(&io->io_hdr.dma_start_bt);
339#endif
340	ctl_datamove(io);
341
342	return (CTL_RETVAL_COMPLETE);
343}
344
345static int
346ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
347			  int flag, struct thread *td)
348{
349	struct ctl_be_ramdisk_softc *softc;
350	int retval;
351
352	retval = 0;
353	softc = &rd_softc;
354
355	switch (cmd) {
356	case CTL_LUN_REQ: {
357		struct ctl_lun_req *lun_req;
358
359		lun_req = (struct ctl_lun_req *)addr;
360
361		switch (lun_req->reqtype) {
362		case CTL_LUNREQ_CREATE:
363			retval = ctl_backend_ramdisk_create(softc, lun_req,
364							    /*do_wait*/ 1);
365			break;
366		case CTL_LUNREQ_RM:
367			retval = ctl_backend_ramdisk_rm(softc, lun_req);
368			break;
369		case CTL_LUNREQ_MODIFY:
370			retval = ctl_backend_ramdisk_modify(softc, lun_req);
371			break;
372		default:
373			lun_req->status = CTL_LUN_ERROR;
374			snprintf(lun_req->error_str, sizeof(lun_req->error_str),
375				 "%s: invalid LUN request type %d", __func__,
376				 lun_req->reqtype);
377			break;
378		}
379		break;
380	}
381	default:
382		retval = ENOTTY;
383		break;
384	}
385
386	return (retval);
387}
388
389static int
390ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
391		       struct ctl_lun_req *req)
392{
393	struct ctl_be_ramdisk_lun *be_lun;
394	struct ctl_lun_rm_params *params;
395	int retval;
396
397
398	retval = 0;
399	params = &req->reqdata.rm;
400
401	be_lun = NULL;
402
403	mtx_lock(&softc->lock);
404
405	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
406		if (be_lun->ctl_be_lun.lun_id == params->lun_id)
407			break;
408	}
409	mtx_unlock(&softc->lock);
410
411	if (be_lun == NULL) {
412		snprintf(req->error_str, sizeof(req->error_str),
413			 "%s: LUN %u is not managed by the ramdisk backend",
414			 __func__, params->lun_id);
415		goto bailout_error;
416	}
417
418	retval = ctl_disable_lun(&be_lun->ctl_be_lun);
419
420	if (retval != 0) {
421		snprintf(req->error_str, sizeof(req->error_str),
422			 "%s: error %d returned from ctl_disable_lun() for "
423			 "LUN %d", __func__, retval, params->lun_id);
424		goto bailout_error;
425	}
426
427	/*
428	 * Set the waiting flag before we invalidate the LUN.  Our shutdown
429	 * routine can be called any time after we invalidate the LUN,
430	 * and can be called from our context.
431	 *
432	 * This tells the shutdown routine that we're waiting, or we're
433	 * going to wait for the shutdown to happen.
434	 */
435	mtx_lock(&softc->lock);
436	be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
437	mtx_unlock(&softc->lock);
438
439	retval = ctl_invalidate_lun(&be_lun->ctl_be_lun);
440	if (retval != 0) {
441		snprintf(req->error_str, sizeof(req->error_str),
442			 "%s: error %d returned from ctl_invalidate_lun() for "
443			 "LUN %d", __func__, retval, params->lun_id);
444		mtx_lock(&softc->lock);
445		be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
446		mtx_unlock(&softc->lock);
447		goto bailout_error;
448	}
449
450	mtx_lock(&softc->lock);
451
452	while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
453		retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
454 		if (retval == EINTR)
455			break;
456	}
457	be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
458
459	/*
460	 * We only remove this LUN from the list and free it (below) if
461	 * retval == 0.  If the user interrupted the wait, we just bail out
462	 * without actually freeing the LUN.  We let the shutdown routine
463	 * free the LUN if that happens.
464	 */
465	if (retval == 0) {
466		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
467			      links);
468		softc->num_luns--;
469	}
470
471	mtx_unlock(&softc->lock);
472
473	if (retval == 0)
474		free(be_lun, M_RAMDISK);
475
476	req->status = CTL_LUN_OK;
477
478	return (retval);
479
480bailout_error:
481	req->status = CTL_LUN_ERROR;
482
483	return (0);
484}
485
486static int
487ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
488			   struct ctl_lun_req *req, int do_wait)
489{
490	struct ctl_be_ramdisk_lun *be_lun;
491	struct ctl_lun_create_params *params;
492	uint32_t blocksize;
493	char tmpstr[32];
494	int i, retval, unmap;
495
496	retval = 0;
497	params = &req->reqdata.create;
498	if (params->blocksize_bytes != 0)
499		blocksize = params->blocksize_bytes;
500	else
501		blocksize = 512;
502
503	be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | (do_wait ?
504			M_WAITOK : M_NOWAIT));
505
506	if (be_lun == NULL) {
507		snprintf(req->error_str, sizeof(req->error_str),
508			 "%s: error allocating %zd bytes", __func__,
509			 sizeof(*be_lun));
510		goto bailout_error;
511	}
512	STAILQ_INIT(&be_lun->ctl_be_lun.options);
513
514	if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
515		be_lun->ctl_be_lun.lun_type = params->device_type;
516	else
517		be_lun->ctl_be_lun.lun_type = T_DIRECT;
518
519	if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
520
521		if (params->lun_size_bytes < blocksize) {
522			snprintf(req->error_str, sizeof(req->error_str),
523				 "%s: LUN size %ju < blocksize %u", __func__,
524				 params->lun_size_bytes, blocksize);
525			goto bailout_error;
526		}
527
528		be_lun->size_blocks = params->lun_size_bytes / blocksize;
529		be_lun->size_bytes = be_lun->size_blocks * blocksize;
530
531		be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
532	} else {
533		be_lun->ctl_be_lun.maxlba = 0;
534		blocksize = 0;
535		be_lun->size_bytes = 0;
536		be_lun->size_blocks = 0;
537	}
538
539	be_lun->ctl_be_lun.blocksize = blocksize;
540
541	/* Tell the user the blocksize we ended up using */
542	params->blocksize_bytes = blocksize;
543
544	/* Tell the user the exact size we ended up using */
545	params->lun_size_bytes = be_lun->size_bytes;
546
547	be_lun->softc = softc;
548
549	unmap = 0;
550	for (i = 0; i < req->num_be_args; i++) {
551		if (strcmp(req->kern_be_args[i].kname, "unmap") == 0 &&
552		    strcmp(req->kern_be_args[i].kvalue, "on") == 0) {
553			unmap = 1;
554		} else {
555			struct ctl_be_lun_option *opt;
556
557			opt = malloc(sizeof(*opt), M_RAMDISK, M_WAITOK);
558			opt->name = malloc(strlen(req->kern_be_args[i].kname) + 1, M_RAMDISK, M_WAITOK);
559			strcpy(opt->name, req->kern_be_args[i].kname);
560			opt->value = malloc(strlen(req->kern_be_args[i].kvalue) + 1, M_RAMDISK, M_WAITOK);
561			strcpy(opt->value, req->kern_be_args[i].kvalue);
562			STAILQ_INSERT_TAIL(&be_lun->ctl_be_lun.options, opt, links);
563		}
564	}
565
566	be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
567	be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
568	if (unmap)
569		be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP;
570	be_lun->ctl_be_lun.be_lun = be_lun;
571
572	if (params->flags & CTL_LUN_FLAG_ID_REQ) {
573		be_lun->ctl_be_lun.req_lun_id = params->req_lun_id;
574		be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ;
575	} else
576		be_lun->ctl_be_lun.req_lun_id = 0;
577
578	be_lun->ctl_be_lun.lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
579	be_lun->ctl_be_lun.lun_config_status =
580		ctl_backend_ramdisk_lun_config_status;
581	be_lun->ctl_be_lun.be = &ctl_be_ramdisk_driver;
582	if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
583		snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
584			 softc->num_luns);
585		strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr,
586			ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
587			sizeof(tmpstr)));
588
589		/* Tell the user what we used for a serial number */
590		strncpy((char *)params->serial_num, tmpstr,
591			ctl_min(sizeof(params->serial_num), sizeof(tmpstr)));
592	} else {
593		strncpy((char *)be_lun->ctl_be_lun.serial_num,
594			params->serial_num,
595			ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
596			sizeof(params->serial_num)));
597	}
598	if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
599		snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
600		strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr,
601			ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
602			sizeof(tmpstr)));
603
604		/* Tell the user what we used for a device ID */
605		strncpy((char *)params->device_id, tmpstr,
606			ctl_min(sizeof(params->device_id), sizeof(tmpstr)));
607	} else {
608		strncpy((char *)be_lun->ctl_be_lun.device_id,
609			params->device_id,
610			ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
611				sizeof(params->device_id)));
612	}
613
614	mtx_lock(&softc->lock);
615	softc->num_luns++;
616	STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
617
618	mtx_unlock(&softc->lock);
619
620	retval = ctl_add_lun(&be_lun->ctl_be_lun);
621	if (retval != 0) {
622		mtx_lock(&softc->lock);
623		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
624			      links);
625		softc->num_luns--;
626		mtx_unlock(&softc->lock);
627		snprintf(req->error_str, sizeof(req->error_str),
628			 "%s: ctl_add_lun() returned error %d, see dmesg for "
629			"details", __func__, retval);
630		retval = 0;
631		goto bailout_error;
632	}
633
634	if (do_wait == 0)
635		return (retval);
636
637	mtx_lock(&softc->lock);
638
639	/*
640	 * Tell the config_status routine that we're waiting so it won't
641	 * clean up the LUN in the event of an error.
642	 */
643	be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
644
645	while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
646		retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
647		if (retval == EINTR)
648			break;
649	}
650	be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
651
652	if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) {
653		snprintf(req->error_str, sizeof(req->error_str),
654			 "%s: LUN configuration error, see dmesg for details",
655			 __func__);
656		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
657			      links);
658		softc->num_luns--;
659		mtx_unlock(&softc->lock);
660		goto bailout_error;
661	} else {
662		params->req_lun_id = be_lun->ctl_be_lun.lun_id;
663	}
664	mtx_unlock(&softc->lock);
665
666	req->status = CTL_LUN_OK;
667
668	return (retval);
669
670bailout_error:
671	req->status = CTL_LUN_ERROR;
672	free(be_lun, M_RAMDISK);
673
674	return (retval);
675}
676
677static int
678ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
679		       struct ctl_lun_req *req)
680{
681	struct ctl_be_ramdisk_lun *be_lun;
682	struct ctl_lun_modify_params *params;
683	uint32_t blocksize;
684
685	params = &req->reqdata.modify;
686
687	be_lun = NULL;
688
689	mtx_lock(&softc->lock);
690	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
691		if (be_lun->ctl_be_lun.lun_id == params->lun_id)
692			break;
693	}
694	mtx_unlock(&softc->lock);
695
696	if (be_lun == NULL) {
697		snprintf(req->error_str, sizeof(req->error_str),
698			 "%s: LUN %u is not managed by the ramdisk backend",
699			 __func__, params->lun_id);
700		goto bailout_error;
701	}
702
703	if (params->lun_size_bytes == 0) {
704		snprintf(req->error_str, sizeof(req->error_str),
705			"%s: LUN size \"auto\" not supported "
706			"by the ramdisk backend", __func__);
707		goto bailout_error;
708	}
709
710	blocksize = be_lun->ctl_be_lun.blocksize;
711
712	if (params->lun_size_bytes < blocksize) {
713		snprintf(req->error_str, sizeof(req->error_str),
714			"%s: LUN size %ju < blocksize %u", __func__,
715			params->lun_size_bytes, blocksize);
716		goto bailout_error;
717	}
718
719	be_lun->size_blocks = params->lun_size_bytes / blocksize;
720	be_lun->size_bytes = be_lun->size_blocks * blocksize;
721
722	/*
723	 * The maximum LBA is the size - 1.
724	 *
725	 * XXX: Note that this field is being updated without locking,
726	 * 	which might cause problems on 32-bit architectures.
727	 */
728	be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
729	ctl_lun_capacity_changed(&be_lun->ctl_be_lun);
730
731	/* Tell the user the exact size we ended up using */
732	params->lun_size_bytes = be_lun->size_bytes;
733
734	req->status = CTL_LUN_OK;
735
736	return (0);
737
738bailout_error:
739	req->status = CTL_LUN_ERROR;
740
741	return (0);
742}
743
744static void
745ctl_backend_ramdisk_lun_shutdown(void *be_lun)
746{
747	struct ctl_be_ramdisk_lun *lun;
748	struct ctl_be_ramdisk_softc *softc;
749	int do_free;
750
751	lun = (struct ctl_be_ramdisk_lun *)be_lun;
752	softc = lun->softc;
753	do_free = 0;
754
755	mtx_lock(&softc->lock);
756
757	lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
758
759	if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
760		wakeup(lun);
761	} else {
762		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
763			      links);
764		softc->num_luns--;
765		do_free = 1;
766	}
767
768	mtx_unlock(&softc->lock);
769
770	if (do_free != 0)
771		free(be_lun, M_RAMDISK);
772}
773
774static void
775ctl_backend_ramdisk_lun_config_status(void *be_lun,
776				      ctl_lun_config_status status)
777{
778	struct ctl_be_ramdisk_lun *lun;
779	struct ctl_be_ramdisk_softc *softc;
780
781	lun = (struct ctl_be_ramdisk_lun *)be_lun;
782	softc = lun->softc;
783
784	if (status == CTL_LUN_CONFIG_OK) {
785		mtx_lock(&softc->lock);
786		lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
787		if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
788			wakeup(lun);
789		mtx_unlock(&softc->lock);
790
791		/*
792		 * We successfully added the LUN, attempt to enable it.
793		 */
794		if (ctl_enable_lun(&lun->ctl_be_lun) != 0) {
795			printf("%s: ctl_enable_lun() failed!\n", __func__);
796			if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) {
797				printf("%s: ctl_invalidate_lun() failed!\n",
798				       __func__);
799			}
800		}
801
802		return;
803	}
804
805
806	mtx_lock(&softc->lock);
807	lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
808
809	/*
810	 * If we have a user waiting, let him handle the cleanup.  If not,
811	 * clean things up here.
812	 */
813	if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
814		lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR;
815		wakeup(lun);
816	} else {
817		STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
818			      links);
819		softc->num_luns--;
820		free(lun, M_RAMDISK);
821	}
822	mtx_unlock(&softc->lock);
823}
824
825static int
826ctl_backend_ramdisk_config_write(union ctl_io *io)
827{
828	struct ctl_be_ramdisk_softc *softc;
829	int retval;
830
831	retval = 0;
832	softc = &rd_softc;
833
834	switch (io->scsiio.cdb[0]) {
835	case SYNCHRONIZE_CACHE:
836	case SYNCHRONIZE_CACHE_16:
837		/*
838		 * The upper level CTL code will filter out any CDBs with
839		 * the immediate bit set and return the proper error.  It
840		 * will also not allow a sync cache command to go to a LUN
841		 * that is powered down.
842		 *
843		 * We don't really need to worry about what LBA range the
844		 * user asked to be synced out.  When they issue a sync
845		 * cache command, we'll sync out the whole thing.
846		 *
847		 * This is obviously just a stubbed out implementation.
848		 * The real implementation will be in the RAIDCore/CTL
849		 * interface, and can only really happen when RAIDCore
850		 * implements a per-array cache sync.
851		 */
852		ctl_set_success(&io->scsiio);
853		ctl_config_write_done(io);
854		break;
855	case START_STOP_UNIT: {
856		struct scsi_start_stop_unit *cdb;
857		struct ctl_be_lun *ctl_be_lun;
858		struct ctl_be_ramdisk_lun *be_lun;
859
860		cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
861
862		ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
863			CTL_PRIV_BACKEND_LUN].ptr;
864		be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
865
866		if (cdb->how & SSS_START)
867			retval = ctl_start_lun(ctl_be_lun);
868		else {
869			retval = ctl_stop_lun(ctl_be_lun);
870#ifdef NEEDTOPORT
871			if ((retval == 0)
872			 && (cdb->byte2 & SSS_ONOFFLINE))
873				retval = ctl_lun_offline(ctl_be_lun);
874#endif
875		}
876
877		/*
878		 * In general, the above routines should not fail.  They
879		 * just set state for the LUN.  So we've got something
880		 * pretty wrong here if we can't start or stop the LUN.
881		 */
882		if (retval != 0) {
883			ctl_set_internal_failure(&io->scsiio,
884						 /*sks_valid*/ 1,
885						 /*retry_count*/ 0xf051);
886			retval = CTL_RETVAL_COMPLETE;
887		} else {
888			ctl_set_success(&io->scsiio);
889		}
890		ctl_config_write_done(io);
891		break;
892	}
893	case WRITE_SAME_10:
894	case WRITE_SAME_16:
895	case UNMAP:
896		ctl_set_success(&io->scsiio);
897		ctl_config_write_done(io);
898		break;
899	default:
900		ctl_set_invalid_opcode(&io->scsiio);
901		ctl_config_write_done(io);
902		retval = CTL_RETVAL_COMPLETE;
903		break;
904	}
905
906	return (retval);
907}
908
909static int
910ctl_backend_ramdisk_config_read(union ctl_io *io)
911{
912	/*
913	 * XXX KDM need to implement!!
914	 */
915	return (0);
916}
917