1/*
2 *  History:
3 *  Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 *           to allow user process control of SCSI devices.
5 *  Development Sponsored by Killy Corp. NY NY
6 *
7 * Original driver (sg.c):
8 *        Copyright (C) 1992 Lawrence Foard
9 * Version 2 and 3 extensions to driver:
10 *        Copyright (C) 1998 - 2005 Douglas Gilbert
11 *
12 *  Modified  19-JAN-1998  Richard Gooch <rgooch@atnf.csiro.au>  Devfs support
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 */
20
21static int sg_version_num = 30534;	/* 2 digits for each component */
22#define SG_VERSION_STR "3.5.34"
23
24/*
25 *  D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
26 *      - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
27 *        the kernel/module needs to be built with CONFIG_SCSI_LOGGING
28 *        (otherwise the macros compile to empty statements).
29 *
30 */
31#include <linux/module.h>
32
33#include <linux/fs.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/string.h>
37#include <linux/mm.h>
38#include <linux/errno.h>
39#include <linux/mtio.h>
40#include <linux/ioctl.h>
41#include <linux/fcntl.h>
42#include <linux/init.h>
43#include <linux/poll.h>
44#include <linux/moduleparam.h>
45#include <linux/cdev.h>
46#include <linux/seq_file.h>
47#include <linux/blkdev.h>
48#include <linux/delay.h>
49#include <linux/scatterlist.h>
50
51#include "scsi.h"
52#include <scsi/scsi_dbg.h>
53#include <scsi/scsi_host.h>
54#include <scsi/scsi_driver.h>
55#include <scsi/scsi_ioctl.h>
56#include <scsi/sg.h>
57
58#include "scsi_logging.h"
59
60#ifdef CONFIG_SCSI_PROC_FS
61#include <linux/proc_fs.h>
62static char *sg_version_date = "20061027";
63
64static int sg_proc_init(void);
65static void sg_proc_cleanup(void);
66#endif
67
68#define SG_ALLOW_DIO_DEF 0
69#define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
70
71#define SG_MAX_DEVS 32768
72
73/*
74 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
75 * Then when using 32 bit integers x * m may overflow during the calculation.
76 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
77 * calculates the same, but prevents the overflow when both m and d
78 * are "small" numbers (like HZ and USER_HZ).
79 * Of course an overflow is inavoidable if the result of muldiv doesn't fit
80 * in 32 bits.
81 */
82#define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
83
84#define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
85
86int sg_big_buff = SG_DEF_RESERVED_SIZE;
87/* N.B. This variable is readable and writeable via
88   /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
89   of this size (or less if there is not enough memory) will be reserved
90   for use by this file descriptor. [Deprecated usage: this variable is also
91   readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
92   the kernel (i.e. it is not a module).] */
93static int def_reserved_size = -1;	/* picks up init parameter */
94static int sg_allow_dio = SG_ALLOW_DIO_DEF;
95
96static int scatter_elem_sz = SG_SCATTER_SZ;
97static int scatter_elem_sz_prev = SG_SCATTER_SZ;
98
99#define SG_SECTOR_SZ 512
100#define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
101
102#define SG_DEV_ARR_LUMP 32	/* amount to over allocate sg_dev_arr by */
103
104static int sg_add(struct class_device *, struct class_interface *);
105static void sg_remove(struct class_device *, struct class_interface *);
106
107static DEFINE_RWLOCK(sg_dev_arr_lock);	/* Also used to lock
108							   file descriptor list for device */
109
110static struct class_interface sg_interface = {
111	.add		= sg_add,
112	.remove		= sg_remove,
113};
114
115typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
116	unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
117	unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */
118	unsigned bufflen;	/* Size of (aggregate) data buffer */
119	unsigned b_malloc_len;	/* actual len malloc'ed in buffer */
120	struct scatterlist *buffer;/* scatter list */
121	char dio_in_use;	/* 0->indirect IO (or mmap), 1->dio */
122	unsigned char cmd_opcode; /* first byte of command */
123} Sg_scatter_hold;
124
125struct sg_device;		/* forward declarations */
126struct sg_fd;
127
128typedef struct sg_request {	/* SG_MAX_QUEUE requests outstanding per file */
129	struct sg_request *nextrp;	/* NULL -> tail request (slist) */
130	struct sg_fd *parentfp;	/* NULL -> not in use */
131	Sg_scatter_hold data;	/* hold buffer, perhaps scatter list */
132	sg_io_hdr_t header;	/* scsi command+info, see <scsi/sg.h> */
133	unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
134	char res_used;		/* 1 -> using reserve buffer, 0 -> not ... */
135	char orphan;		/* 1 -> drop on sight, 0 -> normal */
136	char sg_io_owned;	/* 1 -> packet belongs to SG_IO */
137	volatile char done;	/* 0->before bh, 1->before read, 2->read */
138} Sg_request;
139
140typedef struct sg_fd {		/* holds the state of a file descriptor */
141	struct sg_fd *nextfp;	/* NULL when last opened fd on this device */
142	struct sg_device *parentdp;	/* owning device */
143	wait_queue_head_t read_wait;	/* queue read until command done */
144	rwlock_t rq_list_lock;	/* protect access to list in req_arr */
145	int timeout;		/* defaults to SG_DEFAULT_TIMEOUT      */
146	int timeout_user;	/* defaults to SG_DEFAULT_TIMEOUT_USER */
147	Sg_scatter_hold reserve;	/* buffer held for this file descriptor */
148	unsigned save_scat_len;	/* original length of trunc. scat. element */
149	Sg_request *headrp;	/* head of request slist, NULL->empty */
150	struct fasync_struct *async_qp;	/* used by asynchronous notification */
151	Sg_request req_arr[SG_MAX_QUEUE];	/* used as singly-linked list */
152	char low_dma;		/* as in parent but possibly overridden to 1 */
153	char force_packid;	/* 1 -> pack_id input to read(), 0 -> ignored */
154	volatile char closed;	/* 1 -> fd closed but request(s) outstanding */
155	char cmd_q;		/* 1 -> allow command queuing, 0 -> don't */
156	char next_cmd_len;	/* 0 -> automatic (def), >0 -> use on next write() */
157	char keep_orphan;	/* 0 -> drop orphan (def), 1 -> keep for read() */
158	char mmap_called;	/* 0 -> mmap() never called on this fd */
159} Sg_fd;
160
161typedef struct sg_device { /* holds the state of each scsi generic device */
162	struct scsi_device *device;
163	wait_queue_head_t o_excl_wait;	/* queue open() when O_EXCL in use */
164	int sg_tablesize;	/* adapter's max scatter-gather table size */
165	Sg_fd *headfp;		/* first open fd belonging to this device */
166	volatile char detached;	/* 0->attached, 1->detached pending removal */
167	volatile char exclude;	/* opened for exclusive access */
168	char sgdebug;		/* 0->off, 1->sense, 9->dump dev, 10-> all devs */
169	struct gendisk *disk;
170	struct cdev * cdev;	/* char_dev [sysfs: /sys/cdev/major/sg<n>] */
171} Sg_device;
172
173static int sg_fasync(int fd, struct file *filp, int mode);
174/* tasklet or soft irq callback */
175static void sg_cmd_done(void *data, char *sense, int result, int resid);
176static int sg_start_req(Sg_request * srp);
177static void sg_finish_rem_req(Sg_request * srp);
178static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
179static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
180			 int tablesize);
181static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
182			   Sg_request * srp);
183static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
184			    int blocking, int read_only, Sg_request ** o_srp);
185static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
186			   unsigned char *cmnd, int timeout, int blocking);
187static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
188		      int wr_xf, int *countp, unsigned char __user **up);
189static int sg_write_xfer(Sg_request * srp);
190static int sg_read_xfer(Sg_request * srp);
191static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
192static void sg_remove_scat(Sg_scatter_hold * schp);
193static void sg_build_reserve(Sg_fd * sfp, int req_size);
194static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
195static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
196static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
197static void sg_page_free(struct page *page, int size);
198static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
199static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
200static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
201static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
202static Sg_request *sg_add_request(Sg_fd * sfp);
203static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
204static int sg_res_in_use(Sg_fd * sfp);
205static int sg_allow_access(unsigned char opcode, char dev_type);
206static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
207static Sg_device *sg_get_dev(int dev);
208#ifdef CONFIG_SCSI_PROC_FS
209static int sg_last_dev(void);
210#endif
211
212static Sg_device **sg_dev_arr = NULL;
213static int sg_dev_max;
214static int sg_nr_dev;
215
216#define SZ_SG_HEADER sizeof(struct sg_header)
217#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
218#define SZ_SG_IOVEC sizeof(sg_iovec_t)
219#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
220
221static int
222sg_open(struct inode *inode, struct file *filp)
223{
224	int dev = iminor(inode);
225	int flags = filp->f_flags;
226	struct request_queue *q;
227	Sg_device *sdp;
228	Sg_fd *sfp;
229	int res;
230	int retval;
231
232	nonseekable_open(inode, filp);
233	SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
234	sdp = sg_get_dev(dev);
235	if ((!sdp) || (!sdp->device))
236		return -ENXIO;
237	if (sdp->detached)
238		return -ENODEV;
239
240	/* This driver's module count bumped by fops_get in <linux/fs.h> */
241	/* Prevent the device driver from vanishing while we sleep */
242	retval = scsi_device_get(sdp->device);
243	if (retval)
244		return retval;
245
246	if (!((flags & O_NONBLOCK) ||
247	      scsi_block_when_processing_errors(sdp->device))) {
248		retval = -ENXIO;
249		/* we are in error recovery for this device */
250		goto error_out;
251	}
252
253	if (flags & O_EXCL) {
254		if (O_RDONLY == (flags & O_ACCMODE)) {
255			retval = -EPERM; /* Can't lock it with read only access */
256			goto error_out;
257		}
258		if (sdp->headfp && (flags & O_NONBLOCK)) {
259			retval = -EBUSY;
260			goto error_out;
261		}
262		res = 0;
263		__wait_event_interruptible(sdp->o_excl_wait,
264			((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
265		if (res) {
266			retval = res;	/* -ERESTARTSYS because signal hit process */
267			goto error_out;
268		}
269	} else if (sdp->exclude) {	/* some other fd has an exclusive lock on dev */
270		if (flags & O_NONBLOCK) {
271			retval = -EBUSY;
272			goto error_out;
273		}
274		res = 0;
275		__wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
276					   res);
277		if (res) {
278			retval = res;	/* -ERESTARTSYS because signal hit process */
279			goto error_out;
280		}
281	}
282	if (sdp->detached) {
283		retval = -ENODEV;
284		goto error_out;
285	}
286	if (!sdp->headfp) {	/* no existing opens on this device */
287		sdp->sgdebug = 0;
288		q = sdp->device->request_queue;
289		sdp->sg_tablesize = min(q->max_hw_segments,
290					q->max_phys_segments);
291	}
292	if ((sfp = sg_add_sfp(sdp, dev)))
293		filp->private_data = sfp;
294	else {
295		if (flags & O_EXCL)
296			sdp->exclude = 0;	/* undo if error */
297		retval = -ENOMEM;
298		goto error_out;
299	}
300	return 0;
301
302      error_out:
303	scsi_device_put(sdp->device);
304	return retval;
305}
306
307/* Following function was formerly called 'sg_close' */
308static int
309sg_release(struct inode *inode, struct file *filp)
310{
311	Sg_device *sdp;
312	Sg_fd *sfp;
313
314	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
315		return -ENXIO;
316	SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
317	sg_fasync(-1, filp, 0);	/* remove filp from async notification list */
318	if (0 == sg_remove_sfp(sdp, sfp)) {	/* Returns 1 when sdp gone */
319		if (!sdp->detached) {
320			scsi_device_put(sdp->device);
321		}
322		sdp->exclude = 0;
323		wake_up_interruptible(&sdp->o_excl_wait);
324	}
325	return 0;
326}
327
328static ssize_t
329sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
330{
331	Sg_device *sdp;
332	Sg_fd *sfp;
333	Sg_request *srp;
334	int req_pack_id = -1;
335	sg_io_hdr_t *hp;
336	struct sg_header *old_hdr = NULL;
337	int retval = 0;
338
339	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
340		return -ENXIO;
341	SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
342				   sdp->disk->disk_name, (int) count));
343
344	if (!access_ok(VERIFY_WRITE, buf, count))
345		return -EFAULT;
346	if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
347		old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
348		if (!old_hdr)
349			return -ENOMEM;
350		if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
351			retval = -EFAULT;
352			goto free_old_hdr;
353		}
354		if (old_hdr->reply_len < 0) {
355			if (count >= SZ_SG_IO_HDR) {
356				sg_io_hdr_t *new_hdr;
357				new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
358				if (!new_hdr) {
359					retval = -ENOMEM;
360					goto free_old_hdr;
361				}
362				retval =__copy_from_user
363				    (new_hdr, buf, SZ_SG_IO_HDR);
364				req_pack_id = new_hdr->pack_id;
365				kfree(new_hdr);
366				if (retval) {
367					retval = -EFAULT;
368					goto free_old_hdr;
369				}
370			}
371		} else
372			req_pack_id = old_hdr->pack_id;
373	}
374	srp = sg_get_rq_mark(sfp, req_pack_id);
375	if (!srp) {		/* now wait on packet to arrive */
376		if (sdp->detached) {
377			retval = -ENODEV;
378			goto free_old_hdr;
379		}
380		if (filp->f_flags & O_NONBLOCK) {
381			retval = -EAGAIN;
382			goto free_old_hdr;
383		}
384		while (1) {
385			retval = 0; /* following macro beats race condition */
386			__wait_event_interruptible(sfp->read_wait,
387				(sdp->detached ||
388				(srp = sg_get_rq_mark(sfp, req_pack_id))),
389				retval);
390			if (sdp->detached) {
391				retval = -ENODEV;
392				goto free_old_hdr;
393			}
394			if (0 == retval)
395				break;
396
397			/* -ERESTARTSYS as signal hit process */
398			goto free_old_hdr;
399		}
400	}
401	if (srp->header.interface_id != '\0') {
402		retval = sg_new_read(sfp, buf, count, srp);
403		goto free_old_hdr;
404	}
405
406	hp = &srp->header;
407	if (old_hdr == NULL) {
408		old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
409		if (! old_hdr) {
410			retval = -ENOMEM;
411			goto free_old_hdr;
412		}
413	}
414	memset(old_hdr, 0, SZ_SG_HEADER);
415	old_hdr->reply_len = (int) hp->timeout;
416	old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
417	old_hdr->pack_id = hp->pack_id;
418	old_hdr->twelve_byte =
419	    ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
420	old_hdr->target_status = hp->masked_status;
421	old_hdr->host_status = hp->host_status;
422	old_hdr->driver_status = hp->driver_status;
423	if ((CHECK_CONDITION & hp->masked_status) ||
424	    (DRIVER_SENSE & hp->driver_status))
425		memcpy(old_hdr->sense_buffer, srp->sense_b,
426		       sizeof (old_hdr->sense_buffer));
427	switch (hp->host_status) {
428	/* This setup of 'result' is for backward compatibility and is best
429	   ignored by the user who should use target, host + driver status */
430	case DID_OK:
431	case DID_PASSTHROUGH:
432	case DID_SOFT_ERROR:
433		old_hdr->result = 0;
434		break;
435	case DID_NO_CONNECT:
436	case DID_BUS_BUSY:
437	case DID_TIME_OUT:
438		old_hdr->result = EBUSY;
439		break;
440	case DID_BAD_TARGET:
441	case DID_ABORT:
442	case DID_PARITY:
443	case DID_RESET:
444	case DID_BAD_INTR:
445		old_hdr->result = EIO;
446		break;
447	case DID_ERROR:
448		old_hdr->result = (srp->sense_b[0] == 0 &&
449				  hp->masked_status == GOOD) ? 0 : EIO;
450		break;
451	default:
452		old_hdr->result = EIO;
453		break;
454	}
455
456	/* Now copy the result back to the user buffer.  */
457	if (count >= SZ_SG_HEADER) {
458		if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
459			retval = -EFAULT;
460			goto free_old_hdr;
461		}
462		buf += SZ_SG_HEADER;
463		if (count > old_hdr->reply_len)
464			count = old_hdr->reply_len;
465		if (count > SZ_SG_HEADER) {
466			if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
467				retval = -EFAULT;
468				goto free_old_hdr;
469			}
470		}
471	} else
472		count = (old_hdr->result == 0) ? 0 : -EIO;
473	sg_finish_rem_req(srp);
474	retval = count;
475free_old_hdr:
476	kfree(old_hdr);
477	return retval;
478}
479
480static ssize_t
481sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
482{
483	sg_io_hdr_t *hp = &srp->header;
484	int err = 0;
485	int len;
486
487	if (count < SZ_SG_IO_HDR) {
488		err = -EINVAL;
489		goto err_out;
490	}
491	hp->sb_len_wr = 0;
492	if ((hp->mx_sb_len > 0) && hp->sbp) {
493		if ((CHECK_CONDITION & hp->masked_status) ||
494		    (DRIVER_SENSE & hp->driver_status)) {
495			int sb_len = SCSI_SENSE_BUFFERSIZE;
496			sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
497			len = 8 + (int) srp->sense_b[7];	/* Additional sense length field */
498			len = (len > sb_len) ? sb_len : len;
499			if (copy_to_user(hp->sbp, srp->sense_b, len)) {
500				err = -EFAULT;
501				goto err_out;
502			}
503			hp->sb_len_wr = len;
504		}
505	}
506	if (hp->masked_status || hp->host_status || hp->driver_status)
507		hp->info |= SG_INFO_CHECK;
508	if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
509		err = -EFAULT;
510		goto err_out;
511	}
512	err = sg_read_xfer(srp);
513      err_out:
514	sg_finish_rem_req(srp);
515	return (0 == err) ? count : err;
516}
517
518static ssize_t
519sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
520{
521	int mxsize, cmd_size, k;
522	int input_size, blocking;
523	unsigned char opcode;
524	Sg_device *sdp;
525	Sg_fd *sfp;
526	Sg_request *srp;
527	struct sg_header old_hdr;
528	sg_io_hdr_t *hp;
529	unsigned char cmnd[MAX_COMMAND_SIZE];
530
531	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
532		return -ENXIO;
533	SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
534				   sdp->disk->disk_name, (int) count));
535	if (sdp->detached)
536		return -ENODEV;
537	if (!((filp->f_flags & O_NONBLOCK) ||
538	      scsi_block_when_processing_errors(sdp->device)))
539		return -ENXIO;
540
541	if (!access_ok(VERIFY_READ, buf, count))
542		return -EFAULT;	/* protects following copy_from_user()s + get_user()s */
543	if (count < SZ_SG_HEADER)
544		return -EIO;
545	if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
546		return -EFAULT;
547	blocking = !(filp->f_flags & O_NONBLOCK);
548	if (old_hdr.reply_len < 0)
549		return sg_new_write(sfp, buf, count, blocking, 0, NULL);
550	if (count < (SZ_SG_HEADER + 6))
551		return -EIO;	/* The minimum scsi command length is 6 bytes. */
552
553	if (!(srp = sg_add_request(sfp))) {
554		SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
555		return -EDOM;
556	}
557	buf += SZ_SG_HEADER;
558	__get_user(opcode, buf);
559	if (sfp->next_cmd_len > 0) {
560		if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
561			SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
562			sfp->next_cmd_len = 0;
563			sg_remove_request(sfp, srp);
564			return -EIO;
565		}
566		cmd_size = sfp->next_cmd_len;
567		sfp->next_cmd_len = 0;	/* reset so only this write() effected */
568	} else {
569		cmd_size = COMMAND_SIZE(opcode);	/* based on SCSI command group */
570		if ((opcode >= 0xc0) && old_hdr.twelve_byte)
571			cmd_size = 12;
572	}
573	SCSI_LOG_TIMEOUT(4, printk(
574		"sg_write:   scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
575/* Determine buffer size.  */
576	input_size = count - cmd_size;
577	mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
578	mxsize -= SZ_SG_HEADER;
579	input_size -= SZ_SG_HEADER;
580	if (input_size < 0) {
581		sg_remove_request(sfp, srp);
582		return -EIO;	/* User did not pass enough bytes for this command. */
583	}
584	hp = &srp->header;
585	hp->interface_id = '\0';	/* indicator of old interface tunnelled */
586	hp->cmd_len = (unsigned char) cmd_size;
587	hp->iovec_count = 0;
588	hp->mx_sb_len = 0;
589	if (input_size > 0)
590		hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
591		    SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
592	else
593		hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
594	hp->dxfer_len = mxsize;
595	hp->dxferp = (char __user *)buf + cmd_size;
596	hp->sbp = NULL;
597	hp->timeout = old_hdr.reply_len;	/* structure abuse ... */
598	hp->flags = input_size;	/* structure abuse ... */
599	hp->pack_id = old_hdr.pack_id;
600	hp->usr_ptr = NULL;
601	if (__copy_from_user(cmnd, buf, cmd_size))
602		return -EFAULT;
603	/*
604	 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
605	 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
606	 * is a non-zero input_size, so emit a warning.
607	 */
608	if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)
609		if (printk_ratelimit())
610			printk(KERN_WARNING
611			       "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
612			       "guessing data in;\n" KERN_WARNING "   "
613			       "program %s not setting count and/or reply_len properly\n",
614			       old_hdr.reply_len - (int)SZ_SG_HEADER,
615			       input_size, (unsigned int) cmnd[0],
616			       current->comm);
617	k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
618	return (k < 0) ? k : count;
619}
620
621static ssize_t
622sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
623	     int blocking, int read_only, Sg_request ** o_srp)
624{
625	int k;
626	Sg_request *srp;
627	sg_io_hdr_t *hp;
628	unsigned char cmnd[MAX_COMMAND_SIZE];
629	int timeout;
630	unsigned long ul_timeout;
631
632	if (count < SZ_SG_IO_HDR)
633		return -EINVAL;
634	if (!access_ok(VERIFY_READ, buf, count))
635		return -EFAULT; /* protects following copy_from_user()s + get_user()s */
636
637	sfp->cmd_q = 1;	/* when sg_io_hdr seen, set command queuing on */
638	if (!(srp = sg_add_request(sfp))) {
639		SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
640		return -EDOM;
641	}
642	hp = &srp->header;
643	if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
644		sg_remove_request(sfp, srp);
645		return -EFAULT;
646	}
647	if (hp->interface_id != 'S') {
648		sg_remove_request(sfp, srp);
649		return -ENOSYS;
650	}
651	if (hp->flags & SG_FLAG_MMAP_IO) {
652		if (hp->dxfer_len > sfp->reserve.bufflen) {
653			sg_remove_request(sfp, srp);
654			return -ENOMEM;	/* MMAP_IO size must fit in reserve buffer */
655		}
656		if (hp->flags & SG_FLAG_DIRECT_IO) {
657			sg_remove_request(sfp, srp);
658			return -EINVAL;	/* either MMAP_IO or DIRECT_IO (not both) */
659		}
660		if (sg_res_in_use(sfp)) {
661			sg_remove_request(sfp, srp);
662			return -EBUSY;	/* reserve buffer already being used */
663		}
664	}
665	ul_timeout = msecs_to_jiffies(srp->header.timeout);
666	timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
667	if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
668		sg_remove_request(sfp, srp);
669		return -EMSGSIZE;
670	}
671	if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
672		sg_remove_request(sfp, srp);
673		return -EFAULT;	/* protects following copy_from_user()s + get_user()s */
674	}
675	if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
676		sg_remove_request(sfp, srp);
677		return -EFAULT;
678	}
679	if (read_only &&
680	    (!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) {
681		sg_remove_request(sfp, srp);
682		return -EPERM;
683	}
684	k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
685	if (k < 0)
686		return k;
687	if (o_srp)
688		*o_srp = srp;
689	return count;
690}
691
692static int
693sg_common_write(Sg_fd * sfp, Sg_request * srp,
694		unsigned char *cmnd, int timeout, int blocking)
695{
696	int k, data_dir;
697	Sg_device *sdp = sfp->parentdp;
698	sg_io_hdr_t *hp = &srp->header;
699
700	srp->data.cmd_opcode = cmnd[0];	/* hold opcode of command */
701	hp->status = 0;
702	hp->masked_status = 0;
703	hp->msg_status = 0;
704	hp->info = 0;
705	hp->host_status = 0;
706	hp->driver_status = 0;
707	hp->resid = 0;
708	SCSI_LOG_TIMEOUT(4, printk("sg_common_write:  scsi opcode=0x%02x, cmd_size=%d\n",
709			  (int) cmnd[0], (int) hp->cmd_len));
710
711	if ((k = sg_start_req(srp))) {
712		SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
713		sg_finish_rem_req(srp);
714		return k;	/* probably out of space --> ENOMEM */
715	}
716	if ((k = sg_write_xfer(srp))) {
717		SCSI_LOG_TIMEOUT(1, printk("sg_common_write: write_xfer, bad address\n"));
718		sg_finish_rem_req(srp);
719		return k;
720	}
721	if (sdp->detached) {
722		sg_finish_rem_req(srp);
723		return -ENODEV;
724	}
725
726	switch (hp->dxfer_direction) {
727	case SG_DXFER_TO_FROM_DEV:
728	case SG_DXFER_FROM_DEV:
729		data_dir = DMA_FROM_DEVICE;
730		break;
731	case SG_DXFER_TO_DEV:
732		data_dir = DMA_TO_DEVICE;
733		break;
734	case SG_DXFER_UNKNOWN:
735		data_dir = DMA_BIDIRECTIONAL;
736		break;
737	default:
738		data_dir = DMA_NONE;
739		break;
740	}
741	hp->duration = jiffies_to_msecs(jiffies);
742/* Now send everything of to mid-level. The next time we hear about this
743   packet is when sg_cmd_done() is called (i.e. a callback). */
744	if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer,
745				hp->dxfer_len, srp->data.k_use_sg, timeout,
746				SG_DEFAULT_RETRIES, srp, sg_cmd_done,
747				GFP_ATOMIC)) {
748		SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n"));
749		/*
750		 * most likely out of mem, but could also be a bad map
751		 */
752		sg_finish_rem_req(srp);
753		return -ENOMEM;
754	} else
755		return 0;
756}
757
758static int
759sg_srp_done(Sg_request *srp, Sg_fd *sfp)
760{
761	unsigned long iflags;
762	int done;
763
764	read_lock_irqsave(&sfp->rq_list_lock, iflags);
765	done = srp->done;
766	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
767	return done;
768}
769
770static int
771sg_ioctl(struct inode *inode, struct file *filp,
772	 unsigned int cmd_in, unsigned long arg)
773{
774	void __user *p = (void __user *)arg;
775	int __user *ip = p;
776	int result, val, read_only;
777	Sg_device *sdp;
778	Sg_fd *sfp;
779	Sg_request *srp;
780	unsigned long iflags;
781
782	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
783		return -ENXIO;
784	SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
785				   sdp->disk->disk_name, (int) cmd_in));
786	read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
787
788	switch (cmd_in) {
789	case SG_IO:
790		{
791			int blocking = 1;	/* ignore O_NONBLOCK flag */
792
793			if (sdp->detached)
794				return -ENODEV;
795			if (!scsi_block_when_processing_errors(sdp->device))
796				return -ENXIO;
797			if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
798				return -EFAULT;
799			result =
800			    sg_new_write(sfp, p, SZ_SG_IO_HDR,
801					 blocking, read_only, &srp);
802			if (result < 0)
803				return result;
804			srp->sg_io_owned = 1;
805			while (1) {
806				result = 0;	/* following macro to beat race condition */
807				__wait_event_interruptible(sfp->read_wait,
808					(sdp->detached || sfp->closed || sg_srp_done(srp, sfp)),
809							   result);
810				if (sdp->detached)
811					return -ENODEV;
812				if (sfp->closed)
813					return 0;	/* request packet dropped already */
814				if (0 == result)
815					break;
816				srp->orphan = 1;
817				return result;	/* -ERESTARTSYS because signal hit process */
818			}
819			write_lock_irqsave(&sfp->rq_list_lock, iflags);
820			srp->done = 2;
821			write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
822			result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
823			return (result < 0) ? result : 0;
824		}
825	case SG_SET_TIMEOUT:
826		result = get_user(val, ip);
827		if (result)
828			return result;
829		if (val < 0)
830			return -EIO;
831		if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
832		    val = MULDIV (INT_MAX, USER_HZ, HZ);
833		sfp->timeout_user = val;
834		sfp->timeout = MULDIV (val, HZ, USER_HZ);
835
836		return 0;
837	case SG_GET_TIMEOUT:	/* N.B. User receives timeout as return value */
838				/* strange ..., for backward compatibility */
839		return sfp->timeout_user;
840	case SG_SET_FORCE_LOW_DMA:
841		result = get_user(val, ip);
842		if (result)
843			return result;
844		if (val) {
845			sfp->low_dma = 1;
846			if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
847				val = (int) sfp->reserve.bufflen;
848				sg_remove_scat(&sfp->reserve);
849				sg_build_reserve(sfp, val);
850			}
851		} else {
852			if (sdp->detached)
853				return -ENODEV;
854			sfp->low_dma = sdp->device->host->unchecked_isa_dma;
855		}
856		return 0;
857	case SG_GET_LOW_DMA:
858		return put_user((int) sfp->low_dma, ip);
859	case SG_GET_SCSI_ID:
860		if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
861			return -EFAULT;
862		else {
863			sg_scsi_id_t __user *sg_idp = p;
864
865			if (sdp->detached)
866				return -ENODEV;
867			__put_user((int) sdp->device->host->host_no,
868				   &sg_idp->host_no);
869			__put_user((int) sdp->device->channel,
870				   &sg_idp->channel);
871			__put_user((int) sdp->device->id, &sg_idp->scsi_id);
872			__put_user((int) sdp->device->lun, &sg_idp->lun);
873			__put_user((int) sdp->device->type, &sg_idp->scsi_type);
874			__put_user((short) sdp->device->host->cmd_per_lun,
875				   &sg_idp->h_cmd_per_lun);
876			__put_user((short) sdp->device->queue_depth,
877				   &sg_idp->d_queue_depth);
878			__put_user(0, &sg_idp->unused[0]);
879			__put_user(0, &sg_idp->unused[1]);
880			return 0;
881		}
882	case SG_SET_FORCE_PACK_ID:
883		result = get_user(val, ip);
884		if (result)
885			return result;
886		sfp->force_packid = val ? 1 : 0;
887		return 0;
888	case SG_GET_PACK_ID:
889		if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
890			return -EFAULT;
891		read_lock_irqsave(&sfp->rq_list_lock, iflags);
892		for (srp = sfp->headrp; srp; srp = srp->nextrp) {
893			if ((1 == srp->done) && (!srp->sg_io_owned)) {
894				read_unlock_irqrestore(&sfp->rq_list_lock,
895						       iflags);
896				__put_user(srp->header.pack_id, ip);
897				return 0;
898			}
899		}
900		read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
901		__put_user(-1, ip);
902		return 0;
903	case SG_GET_NUM_WAITING:
904		read_lock_irqsave(&sfp->rq_list_lock, iflags);
905		for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
906			if ((1 == srp->done) && (!srp->sg_io_owned))
907				++val;
908		}
909		read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
910		return put_user(val, ip);
911	case SG_GET_SG_TABLESIZE:
912		return put_user(sdp->sg_tablesize, ip);
913	case SG_SET_RESERVED_SIZE:
914		result = get_user(val, ip);
915		if (result)
916			return result;
917                if (val < 0)
918                        return -EINVAL;
919		val = min_t(int, val,
920				sdp->device->request_queue->max_sectors * 512);
921		if (val != sfp->reserve.bufflen) {
922			if (sg_res_in_use(sfp) || sfp->mmap_called)
923				return -EBUSY;
924			sg_remove_scat(&sfp->reserve);
925			sg_build_reserve(sfp, val);
926		}
927		return 0;
928	case SG_GET_RESERVED_SIZE:
929		val = min_t(int, sfp->reserve.bufflen,
930				sdp->device->request_queue->max_sectors * 512);
931		return put_user(val, ip);
932	case SG_SET_COMMAND_Q:
933		result = get_user(val, ip);
934		if (result)
935			return result;
936		sfp->cmd_q = val ? 1 : 0;
937		return 0;
938	case SG_GET_COMMAND_Q:
939		return put_user((int) sfp->cmd_q, ip);
940	case SG_SET_KEEP_ORPHAN:
941		result = get_user(val, ip);
942		if (result)
943			return result;
944		sfp->keep_orphan = val;
945		return 0;
946	case SG_GET_KEEP_ORPHAN:
947		return put_user((int) sfp->keep_orphan, ip);
948	case SG_NEXT_CMD_LEN:
949		result = get_user(val, ip);
950		if (result)
951			return result;
952		sfp->next_cmd_len = (val > 0) ? val : 0;
953		return 0;
954	case SG_GET_VERSION_NUM:
955		return put_user(sg_version_num, ip);
956	case SG_GET_ACCESS_COUNT:
957		/* faked - we don't have a real access count anymore */
958		val = (sdp->device ? 1 : 0);
959		return put_user(val, ip);
960	case SG_GET_REQUEST_TABLE:
961		if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
962			return -EFAULT;
963		else {
964			sg_req_info_t *rinfo;
965			unsigned int ms;
966
967			rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
968								GFP_KERNEL);
969			if (!rinfo)
970				return -ENOMEM;
971			read_lock_irqsave(&sfp->rq_list_lock, iflags);
972			for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
973			     ++val, srp = srp ? srp->nextrp : srp) {
974				memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
975				if (srp) {
976					rinfo[val].req_state = srp->done + 1;
977					rinfo[val].problem =
978					    srp->header.masked_status &
979					    srp->header.host_status &
980					    srp->header.driver_status;
981					if (srp->done)
982						rinfo[val].duration =
983							srp->header.duration;
984					else {
985						ms = jiffies_to_msecs(jiffies);
986						rinfo[val].duration =
987						    (ms > srp->header.duration) ?
988						    (ms - srp->header.duration) : 0;
989					}
990					rinfo[val].orphan = srp->orphan;
991					rinfo[val].sg_io_owned =
992							srp->sg_io_owned;
993					rinfo[val].pack_id =
994							srp->header.pack_id;
995					rinfo[val].usr_ptr =
996							srp->header.usr_ptr;
997				}
998			}
999			read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1000			result = __copy_to_user(p, rinfo,
1001						SZ_SG_REQ_INFO * SG_MAX_QUEUE);
1002			result = result ? -EFAULT : 0;
1003			kfree(rinfo);
1004			return result;
1005		}
1006	case SG_EMULATED_HOST:
1007		if (sdp->detached)
1008			return -ENODEV;
1009		return put_user(sdp->device->host->hostt->emulated, ip);
1010	case SG_SCSI_RESET:
1011		if (sdp->detached)
1012			return -ENODEV;
1013		if (filp->f_flags & O_NONBLOCK) {
1014			if (scsi_host_in_recovery(sdp->device->host))
1015				return -EBUSY;
1016		} else if (!scsi_block_when_processing_errors(sdp->device))
1017			return -EBUSY;
1018		result = get_user(val, ip);
1019		if (result)
1020			return result;
1021		if (SG_SCSI_RESET_NOTHING == val)
1022			return 0;
1023		switch (val) {
1024		case SG_SCSI_RESET_DEVICE:
1025			val = SCSI_TRY_RESET_DEVICE;
1026			break;
1027		case SG_SCSI_RESET_BUS:
1028			val = SCSI_TRY_RESET_BUS;
1029			break;
1030		case SG_SCSI_RESET_HOST:
1031			val = SCSI_TRY_RESET_HOST;
1032			break;
1033		default:
1034			return -EINVAL;
1035		}
1036		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1037			return -EACCES;
1038		return (scsi_reset_provider(sdp->device, val) ==
1039			SUCCESS) ? 0 : -EIO;
1040	case SCSI_IOCTL_SEND_COMMAND:
1041		if (sdp->detached)
1042			return -ENODEV;
1043		if (read_only) {
1044			unsigned char opcode = WRITE_6;
1045			Scsi_Ioctl_Command __user *siocp = p;
1046
1047			if (copy_from_user(&opcode, siocp->data, 1))
1048				return -EFAULT;
1049			if (!sg_allow_access(opcode, sdp->device->type))
1050				return -EPERM;
1051		}
1052		return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
1053	case SG_SET_DEBUG:
1054		result = get_user(val, ip);
1055		if (result)
1056			return result;
1057		sdp->sgdebug = (char) val;
1058		return 0;
1059	case SCSI_IOCTL_GET_IDLUN:
1060	case SCSI_IOCTL_GET_BUS_NUMBER:
1061	case SCSI_IOCTL_PROBE_HOST:
1062	case SG_GET_TRANSFORM:
1063		if (sdp->detached)
1064			return -ENODEV;
1065		return scsi_ioctl(sdp->device, cmd_in, p);
1066	case BLKSECTGET:
1067		return put_user(sdp->device->request_queue->max_sectors * 512,
1068				ip);
1069	default:
1070		if (read_only)
1071			return -EPERM;	/* don't know so take safe approach */
1072		return scsi_ioctl(sdp->device, cmd_in, p);
1073	}
1074}
1075
1076#ifdef CONFIG_COMPAT
1077static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1078{
1079	Sg_device *sdp;
1080	Sg_fd *sfp;
1081	struct scsi_device *sdev;
1082
1083	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1084		return -ENXIO;
1085
1086	sdev = sdp->device;
1087	if (sdev->host->hostt->compat_ioctl) {
1088		int ret;
1089
1090		ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
1091
1092		return ret;
1093	}
1094
1095	return -ENOIOCTLCMD;
1096}
1097#endif
1098
1099static unsigned int
1100sg_poll(struct file *filp, poll_table * wait)
1101{
1102	unsigned int res = 0;
1103	Sg_device *sdp;
1104	Sg_fd *sfp;
1105	Sg_request *srp;
1106	int count = 0;
1107	unsigned long iflags;
1108
1109	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))
1110	    || sfp->closed)
1111		return POLLERR;
1112	poll_wait(filp, &sfp->read_wait, wait);
1113	read_lock_irqsave(&sfp->rq_list_lock, iflags);
1114	for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1115		/* if any read waiting, flag it */
1116		if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1117			res = POLLIN | POLLRDNORM;
1118		++count;
1119	}
1120	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1121
1122	if (sdp->detached)
1123		res |= POLLHUP;
1124	else if (!sfp->cmd_q) {
1125		if (0 == count)
1126			res |= POLLOUT | POLLWRNORM;
1127	} else if (count < SG_MAX_QUEUE)
1128		res |= POLLOUT | POLLWRNORM;
1129	SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
1130				   sdp->disk->disk_name, (int) res));
1131	return res;
1132}
1133
1134static int
1135sg_fasync(int fd, struct file *filp, int mode)
1136{
1137	int retval;
1138	Sg_device *sdp;
1139	Sg_fd *sfp;
1140
1141	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1142		return -ENXIO;
1143	SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
1144				   sdp->disk->disk_name, mode));
1145
1146	retval = fasync_helper(fd, filp, mode, &sfp->async_qp);
1147	return (retval < 0) ? retval : 0;
1148}
1149
1150static struct page *
1151sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
1152{
1153	Sg_fd *sfp;
1154	struct page *page = NOPAGE_SIGBUS;
1155	unsigned long offset, len, sa;
1156	Sg_scatter_hold *rsv_schp;
1157	struct scatterlist *sg;
1158	int k;
1159
1160	if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1161		return page;
1162	rsv_schp = &sfp->reserve;
1163	offset = addr - vma->vm_start;
1164	if (offset >= rsv_schp->bufflen)
1165		return page;
1166	SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n",
1167				   offset, rsv_schp->k_use_sg));
1168	sg = rsv_schp->buffer;
1169	sa = vma->vm_start;
1170	for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1171	     ++k, ++sg) {
1172		len = vma->vm_end - sa;
1173		len = (len < sg->length) ? len : sg->length;
1174		if (offset < len) {
1175			page = virt_to_page(page_address(sg->page) + offset);
1176			get_page(page);	/* increment page count */
1177			break;
1178		}
1179		sa += len;
1180		offset -= len;
1181	}
1182
1183	if (type)
1184		*type = VM_FAULT_MINOR;
1185	return page;
1186}
1187
1188static struct vm_operations_struct sg_mmap_vm_ops = {
1189	.nopage = sg_vma_nopage,
1190};
1191
1192static int
1193sg_mmap(struct file *filp, struct vm_area_struct *vma)
1194{
1195	Sg_fd *sfp;
1196	unsigned long req_sz, len, sa;
1197	Sg_scatter_hold *rsv_schp;
1198	int k;
1199	struct scatterlist *sg;
1200
1201	if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1202		return -ENXIO;
1203	req_sz = vma->vm_end - vma->vm_start;
1204	SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
1205				   (void *) vma->vm_start, (int) req_sz));
1206	if (vma->vm_pgoff)
1207		return -EINVAL;	/* want no offset */
1208	rsv_schp = &sfp->reserve;
1209	if (req_sz > rsv_schp->bufflen)
1210		return -ENOMEM;	/* cannot map more than reserved buffer */
1211
1212	sa = vma->vm_start;
1213	sg = rsv_schp->buffer;
1214	for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1215	     ++k, ++sg) {
1216		len = vma->vm_end - sa;
1217		len = (len < sg->length) ? len : sg->length;
1218		sa += len;
1219	}
1220
1221	sfp->mmap_called = 1;
1222	vma->vm_flags |= VM_RESERVED;
1223	vma->vm_private_data = sfp;
1224	vma->vm_ops = &sg_mmap_vm_ops;
1225	return 0;
1226}
1227
1228/* This function is a "bottom half" handler that is called by the
1229 * mid level when a command is completed (or has failed). */
1230static void
1231sg_cmd_done(void *data, char *sense, int result, int resid)
1232{
1233	Sg_request *srp = data;
1234	Sg_device *sdp = NULL;
1235	Sg_fd *sfp;
1236	unsigned long iflags;
1237	unsigned int ms;
1238
1239	if (NULL == srp) {
1240		printk(KERN_ERR "sg_cmd_done: NULL request\n");
1241		return;
1242	}
1243	sfp = srp->parentfp;
1244	if (sfp)
1245		sdp = sfp->parentdp;
1246	if ((NULL == sdp) || sdp->detached) {
1247		printk(KERN_INFO "sg_cmd_done: device detached\n");
1248		return;
1249	}
1250
1251
1252	SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1253		sdp->disk->disk_name, srp->header.pack_id, result));
1254	srp->header.resid = resid;
1255	ms = jiffies_to_msecs(jiffies);
1256	srp->header.duration = (ms > srp->header.duration) ?
1257				(ms - srp->header.duration) : 0;
1258	if (0 != result) {
1259		struct scsi_sense_hdr sshdr;
1260
1261		memcpy(srp->sense_b, sense, sizeof (srp->sense_b));
1262		srp->header.status = 0xff & result;
1263		srp->header.masked_status = status_byte(result);
1264		srp->header.msg_status = msg_byte(result);
1265		srp->header.host_status = host_byte(result);
1266		srp->header.driver_status = driver_byte(result);
1267		if ((sdp->sgdebug > 0) &&
1268		    ((CHECK_CONDITION == srp->header.masked_status) ||
1269		     (COMMAND_TERMINATED == srp->header.masked_status)))
1270			__scsi_print_sense("sg_cmd_done", sense,
1271					   SCSI_SENSE_BUFFERSIZE);
1272
1273		/* Following if statement is a patch supplied by Eric Youngdale */
1274		if (driver_byte(result) != 0
1275		    && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
1276		    && !scsi_sense_is_deferred(&sshdr)
1277		    && sshdr.sense_key == UNIT_ATTENTION
1278		    && sdp->device->removable) {
1279			/* Detected possible disc change. Set the bit - this */
1280			/* may be used if there are filesystems using this device */
1281			sdp->device->changed = 1;
1282		}
1283	}
1284	/* Rely on write phase to clean out srp status values, so no "else" */
1285
1286	if (sfp->closed) {	/* whoops this fd already released, cleanup */
1287		SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n"));
1288		sg_finish_rem_req(srp);
1289		srp = NULL;
1290		if (NULL == sfp->headrp) {
1291			SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, final cleanup\n"));
1292			if (0 == sg_remove_sfp(sdp, sfp)) {	/* device still present */
1293				scsi_device_put(sdp->device);
1294			}
1295			sfp = NULL;
1296		}
1297	} else if (srp && srp->orphan) {
1298		if (sfp->keep_orphan)
1299			srp->sg_io_owned = 0;
1300		else {
1301			sg_finish_rem_req(srp);
1302			srp = NULL;
1303		}
1304	}
1305	if (sfp && srp) {
1306		/* Now wake up any sg_read() that is waiting for this packet. */
1307		kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1308		write_lock_irqsave(&sfp->rq_list_lock, iflags);
1309		srp->done = 1;
1310		wake_up_interruptible(&sfp->read_wait);
1311		write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1312	}
1313}
1314
1315static struct file_operations sg_fops = {
1316	.owner = THIS_MODULE,
1317	.read = sg_read,
1318	.write = sg_write,
1319	.poll = sg_poll,
1320	.ioctl = sg_ioctl,
1321#ifdef CONFIG_COMPAT
1322	.compat_ioctl = sg_compat_ioctl,
1323#endif
1324	.open = sg_open,
1325	.mmap = sg_mmap,
1326	.release = sg_release,
1327	.fasync = sg_fasync,
1328};
1329
1330static struct class *sg_sysfs_class;
1331
1332static int sg_sysfs_valid = 0;
1333
1334static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1335{
1336	struct request_queue *q = scsidp->request_queue;
1337	Sg_device *sdp;
1338	unsigned long iflags;
1339	void *old_sg_dev_arr = NULL;
1340	int k, error;
1341
1342	sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
1343	if (!sdp) {
1344		printk(KERN_WARNING "kmalloc Sg_device failure\n");
1345		return -ENOMEM;
1346	}
1347
1348	write_lock_irqsave(&sg_dev_arr_lock, iflags);
1349	if (unlikely(sg_nr_dev >= sg_dev_max)) {	/* try to resize */
1350		Sg_device **tmp_da;
1351		int tmp_dev_max = sg_nr_dev + SG_DEV_ARR_LUMP;
1352		write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1353
1354		tmp_da = kzalloc(tmp_dev_max * sizeof(Sg_device *), GFP_KERNEL);
1355		if (unlikely(!tmp_da))
1356			goto expand_failed;
1357
1358		write_lock_irqsave(&sg_dev_arr_lock, iflags);
1359		memcpy(tmp_da, sg_dev_arr, sg_dev_max * sizeof(Sg_device *));
1360		old_sg_dev_arr = sg_dev_arr;
1361		sg_dev_arr = tmp_da;
1362		sg_dev_max = tmp_dev_max;
1363	}
1364
1365	for (k = 0; k < sg_dev_max; k++)
1366		if (!sg_dev_arr[k])
1367			break;
1368	if (unlikely(k >= SG_MAX_DEVS))
1369		goto overflow;
1370
1371	SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
1372	sprintf(disk->disk_name, "sg%d", k);
1373	disk->first_minor = k;
1374	sdp->disk = disk;
1375	sdp->device = scsidp;
1376	init_waitqueue_head(&sdp->o_excl_wait);
1377	sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
1378
1379	sg_nr_dev++;
1380	sg_dev_arr[k] = sdp;
1381	write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1382	error = k;
1383
1384 out:
1385	if (error < 0)
1386		kfree(sdp);
1387	kfree(old_sg_dev_arr);
1388	return error;
1389
1390 expand_failed:
1391	printk(KERN_WARNING "sg_alloc: device array cannot be resized\n");
1392	error = -ENOMEM;
1393	goto out;
1394
1395 overflow:
1396	write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1397	sdev_printk(KERN_WARNING, scsidp,
1398		    "Unable to attach sg device type=%d, minor "
1399		    "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
1400	error = -ENODEV;
1401	goto out;
1402}
1403
1404static int
1405sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
1406{
1407	struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
1408	struct gendisk *disk;
1409	Sg_device *sdp = NULL;
1410	struct cdev * cdev = NULL;
1411	int error, k;
1412	unsigned long iflags;
1413
1414	disk = alloc_disk(1);
1415	if (!disk) {
1416		printk(KERN_WARNING "alloc_disk failed\n");
1417		return -ENOMEM;
1418	}
1419	disk->major = SCSI_GENERIC_MAJOR;
1420
1421	error = -ENOMEM;
1422	cdev = cdev_alloc();
1423	if (!cdev) {
1424		printk(KERN_WARNING "cdev_alloc failed\n");
1425		goto out;
1426	}
1427	cdev->owner = THIS_MODULE;
1428	cdev->ops = &sg_fops;
1429
1430	error = sg_alloc(disk, scsidp);
1431	if (error < 0) {
1432		printk(KERN_WARNING "sg_alloc failed\n");
1433		goto out;
1434	}
1435	k = error;
1436	sdp = sg_dev_arr[k];
1437
1438	error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, k), 1);
1439	if (error)
1440		goto cdev_add_err;
1441
1442	sdp->cdev = cdev;
1443	if (sg_sysfs_valid) {
1444		struct class_device * sg_class_member;
1445
1446		sg_class_member = class_device_create(sg_sysfs_class, NULL,
1447				MKDEV(SCSI_GENERIC_MAJOR, k),
1448				cl_dev->dev, "%s",
1449				disk->disk_name);
1450		if (IS_ERR(sg_class_member))
1451			printk(KERN_WARNING "sg_add: "
1452				"class_device_create failed\n");
1453		class_set_devdata(sg_class_member, sdp);
1454		error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1455					  &sg_class_member->kobj, "generic");
1456		if (error)
1457			printk(KERN_ERR "sg_add: unable to make symlink "
1458					"'generic' back to sg%d\n", k);
1459	} else
1460		printk(KERN_WARNING "sg_add: sg_sys INvalid\n");
1461
1462	sdev_printk(KERN_NOTICE, scsidp,
1463		    "Attached scsi generic sg%d type %d\n", k,scsidp->type);
1464
1465	return 0;
1466
1467cdev_add_err:
1468	write_lock_irqsave(&sg_dev_arr_lock, iflags);
1469	kfree(sg_dev_arr[k]);
1470	sg_dev_arr[k] = NULL;
1471	sg_nr_dev--;
1472	write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1473
1474out:
1475	put_disk(disk);
1476	if (cdev)
1477		cdev_del(cdev);
1478	return error;
1479}
1480
1481static void
1482sg_remove(struct class_device *cl_dev, struct class_interface *cl_intf)
1483{
1484	struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
1485	Sg_device *sdp = NULL;
1486	unsigned long iflags;
1487	Sg_fd *sfp;
1488	Sg_fd *tsfp;
1489	Sg_request *srp;
1490	Sg_request *tsrp;
1491	int k, delay;
1492
1493	if (NULL == sg_dev_arr)
1494		return;
1495	delay = 0;
1496	write_lock_irqsave(&sg_dev_arr_lock, iflags);
1497	for (k = 0; k < sg_dev_max; k++) {
1498		sdp = sg_dev_arr[k];
1499		if ((NULL == sdp) || (sdp->device != scsidp))
1500			continue;	/* dirty but lowers nesting */
1501		if (sdp->headfp) {
1502			sdp->detached = 1;
1503			for (sfp = sdp->headfp; sfp; sfp = tsfp) {
1504				tsfp = sfp->nextfp;
1505				for (srp = sfp->headrp; srp; srp = tsrp) {
1506					tsrp = srp->nextrp;
1507					if (sfp->closed || (0 == sg_srp_done(srp, sfp)))
1508						sg_finish_rem_req(srp);
1509				}
1510				if (sfp->closed) {
1511					scsi_device_put(sdp->device);
1512					__sg_remove_sfp(sdp, sfp);
1513				} else {
1514					delay = 1;
1515					wake_up_interruptible(&sfp->read_wait);
1516					kill_fasync(&sfp->async_qp, SIGPOLL,
1517						    POLL_HUP);
1518				}
1519			}
1520			SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d, dirty\n", k));
1521			if (NULL == sdp->headfp) {
1522				sg_dev_arr[k] = NULL;
1523			}
1524		} else {	/* nothing active, simple case */
1525			SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d\n", k));
1526			sg_dev_arr[k] = NULL;
1527		}
1528		sg_nr_dev--;
1529		break;
1530	}
1531	write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1532
1533	if (sdp) {
1534		sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1535		class_device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, k));
1536		cdev_del(sdp->cdev);
1537		sdp->cdev = NULL;
1538		put_disk(sdp->disk);
1539		sdp->disk = NULL;
1540		if (NULL == sdp->headfp)
1541			kfree((char *) sdp);
1542	}
1543
1544	if (delay)
1545		msleep(10);	/* dirty detach so delay device destruction */
1546}
1547
1548module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
1549module_param_named(def_reserved_size, def_reserved_size, int,
1550		   S_IRUGO | S_IWUSR);
1551module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1552
1553MODULE_AUTHOR("Douglas Gilbert");
1554MODULE_DESCRIPTION("SCSI generic (sg) driver");
1555MODULE_LICENSE("GPL");
1556MODULE_VERSION(SG_VERSION_STR);
1557MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
1558
1559MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
1560                "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
1561MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1562MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1563
1564static int __init
1565init_sg(void)
1566{
1567	int rc;
1568
1569	if (scatter_elem_sz < PAGE_SIZE) {
1570		scatter_elem_sz = PAGE_SIZE;
1571		scatter_elem_sz_prev = scatter_elem_sz;
1572	}
1573	if (def_reserved_size >= 0)
1574		sg_big_buff = def_reserved_size;
1575	else
1576		def_reserved_size = sg_big_buff;
1577
1578	rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1579				    SG_MAX_DEVS, "sg");
1580	if (rc)
1581		return rc;
1582        sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
1583        if ( IS_ERR(sg_sysfs_class) ) {
1584		rc = PTR_ERR(sg_sysfs_class);
1585		goto err_out;
1586        }
1587	sg_sysfs_valid = 1;
1588	rc = scsi_register_interface(&sg_interface);
1589	if (0 == rc) {
1590#ifdef CONFIG_SCSI_PROC_FS
1591		sg_proc_init();
1592#endif				/* CONFIG_SCSI_PROC_FS */
1593		return 0;
1594	}
1595	class_destroy(sg_sysfs_class);
1596err_out:
1597	unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
1598	return rc;
1599}
1600
1601static void __exit
1602exit_sg(void)
1603{
1604#ifdef CONFIG_SCSI_PROC_FS
1605	sg_proc_cleanup();
1606#endif				/* CONFIG_SCSI_PROC_FS */
1607	scsi_unregister_interface(&sg_interface);
1608	class_destroy(sg_sysfs_class);
1609	sg_sysfs_valid = 0;
1610	unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1611				 SG_MAX_DEVS);
1612	kfree((char *)sg_dev_arr);
1613	sg_dev_arr = NULL;
1614	sg_dev_max = 0;
1615}
1616
1617static int
1618sg_start_req(Sg_request * srp)
1619{
1620	int res;
1621	Sg_fd *sfp = srp->parentfp;
1622	sg_io_hdr_t *hp = &srp->header;
1623	int dxfer_len = (int) hp->dxfer_len;
1624	int dxfer_dir = hp->dxfer_direction;
1625	Sg_scatter_hold *req_schp = &srp->data;
1626	Sg_scatter_hold *rsv_schp = &sfp->reserve;
1627
1628	SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
1629	if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1630		return 0;
1631	if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) &&
1632	    (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) &&
1633	    (!sfp->parentdp->device->host->unchecked_isa_dma)) {
1634		res = sg_build_direct(srp, sfp, dxfer_len);
1635		if (res <= 0)	/* -ve -> error, 0 -> done, 1 -> try indirect */
1636			return res;
1637	}
1638	if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen))
1639		sg_link_reserve(sfp, srp, dxfer_len);
1640	else {
1641		res = sg_build_indirect(req_schp, sfp, dxfer_len);
1642		if (res) {
1643			sg_remove_scat(req_schp);
1644			return res;
1645		}
1646	}
1647	return 0;
1648}
1649
1650static void
1651sg_finish_rem_req(Sg_request * srp)
1652{
1653	Sg_fd *sfp = srp->parentfp;
1654	Sg_scatter_hold *req_schp = &srp->data;
1655
1656	SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
1657	if (srp->res_used)
1658		sg_unlink_reserve(sfp, srp);
1659	else
1660		sg_remove_scat(req_schp);
1661	sg_remove_request(sfp, srp);
1662}
1663
1664static int
1665sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1666{
1667	int sg_bufflen = tablesize * sizeof(struct scatterlist);
1668	gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1669
1670	if (sfp->low_dma)
1671		 gfp_flags |= GFP_DMA;
1672	schp->buffer = kzalloc(sg_bufflen, gfp_flags);
1673	if (!schp->buffer)
1674		return -ENOMEM;
1675	schp->sglist_len = sg_bufflen;
1676	return tablesize;	/* number of scat_gath elements allocated */
1677}
1678
1679#ifdef SG_ALLOW_DIO_CODE
1680/* vvvvvvvv  following code borrowed from st driver's direct IO vvvvvvvvv */
1681	/* TODO: hopefully we can use the generic block layer code */
1682
1683/* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
1684   - mapping of all pages not successful
1685   (i.e., either completely successful or fails)
1686*/
1687static int
1688st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1689	          unsigned long uaddr, size_t count, int rw)
1690{
1691	unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1692	unsigned long start = uaddr >> PAGE_SHIFT;
1693	const int nr_pages = end - start;
1694	int res, i, j;
1695	struct page **pages;
1696
1697	/* User attempted Overflow! */
1698	if ((uaddr + count) < uaddr)
1699		return -EINVAL;
1700
1701	/* Too big */
1702        if (nr_pages > max_pages)
1703		return -ENOMEM;
1704
1705	/* Hmm? */
1706	if (count == 0)
1707		return 0;
1708
1709	if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
1710		return -ENOMEM;
1711
1712        /* Try to fault in all of the necessary pages */
1713	down_read(&current->mm->mmap_sem);
1714        /* rw==READ means read from drive, write into memory area */
1715	res = get_user_pages(
1716		current,
1717		current->mm,
1718		uaddr,
1719		nr_pages,
1720		rw == READ,
1721		0, /* don't force */
1722		pages,
1723		NULL);
1724	up_read(&current->mm->mmap_sem);
1725
1726	/* Errors and no page mapped should return here */
1727	if (res < nr_pages)
1728		goto out_unmap;
1729
1730        for (i=0; i < nr_pages; i++) {
1731		flush_dcache_page(pages[i]);
1732		/* ?? Is locking needed? I don't think so */
1733		/* if (TestSetPageLocked(pages[i]))
1734		   goto out_unlock; */
1735        }
1736
1737	sgl[0].page = pages[0];
1738	sgl[0].offset = uaddr & ~PAGE_MASK;
1739	if (nr_pages > 1) {
1740		sgl[0].length = PAGE_SIZE - sgl[0].offset;
1741		count -= sgl[0].length;
1742		for (i=1; i < nr_pages ; i++) {
1743			sgl[i].page = pages[i];
1744			sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE;
1745			count -= PAGE_SIZE;
1746		}
1747	}
1748	else {
1749		sgl[0].length = count;
1750	}
1751
1752	kfree(pages);
1753	return nr_pages;
1754
1755 out_unmap:
1756	if (res > 0) {
1757		for (j=0; j < res; j++)
1758			page_cache_release(pages[j]);
1759		res = 0;
1760	}
1761	kfree(pages);
1762	return res;
1763}
1764
1765
1766/* And unmap them... */
1767static int
1768st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
1769		    int dirtied)
1770{
1771	int i;
1772
1773	for (i=0; i < nr_pages; i++) {
1774		struct page *page = sgl[i].page;
1775
1776		if (dirtied)
1777			SetPageDirty(page);
1778		/* unlock_page(page); */
1779		page_cache_release(page);
1780	}
1781
1782	return 0;
1783}
1784
1785/* ^^^^^^^^  above code borrowed from st driver's direct IO ^^^^^^^^^ */
1786#endif
1787
1788
1789/* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
1790static int
1791sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
1792{
1793#ifdef SG_ALLOW_DIO_CODE
1794	sg_io_hdr_t *hp = &srp->header;
1795	Sg_scatter_hold *schp = &srp->data;
1796	int sg_tablesize = sfp->parentdp->sg_tablesize;
1797	int mx_sc_elems, res;
1798	struct scsi_device *sdev = sfp->parentdp->device;
1799
1800	if (((unsigned long)hp->dxferp &
1801			queue_dma_alignment(sdev->request_queue)) != 0)
1802		return 1;
1803
1804	mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1805        if (mx_sc_elems <= 0) {
1806                return 1;
1807        }
1808	res = st_map_user_pages(schp->buffer, mx_sc_elems,
1809				(unsigned long)hp->dxferp, dxfer_len,
1810				(SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
1811	if (res <= 0) {
1812		sg_remove_scat(schp);
1813		return 1;
1814	}
1815	schp->k_use_sg = res;
1816	schp->dio_in_use = 1;
1817	hp->info |= SG_INFO_DIRECT_IO;
1818	return 0;
1819#else
1820	return 1;
1821#endif
1822}
1823
1824static int
1825sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1826{
1827	struct scatterlist *sg;
1828	int ret_sz = 0, k, rem_sz, num, mx_sc_elems;
1829	int sg_tablesize = sfp->parentdp->sg_tablesize;
1830	int blk_size = buff_size;
1831	struct page *p = NULL;
1832
1833	if ((blk_size < 0) || (!sfp))
1834		return -EFAULT;
1835	if (0 == blk_size)
1836		++blk_size;	/* don't know why */
1837/* round request up to next highest SG_SECTOR_SZ byte boundary */
1838	blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK);
1839	SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
1840				   buff_size, blk_size));
1841
1842	/* N.B. ret_sz carried into this block ... */
1843	mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1844	if (mx_sc_elems < 0)
1845		return mx_sc_elems;	/* most likely -ENOMEM */
1846
1847	num = scatter_elem_sz;
1848	if (unlikely(num != scatter_elem_sz_prev)) {
1849		if (num < PAGE_SIZE) {
1850			scatter_elem_sz = PAGE_SIZE;
1851			scatter_elem_sz_prev = PAGE_SIZE;
1852		} else
1853			scatter_elem_sz_prev = num;
1854	}
1855	for (k = 0, sg = schp->buffer, rem_sz = blk_size;
1856	     (rem_sz > 0) && (k < mx_sc_elems);
1857	     ++k, rem_sz -= ret_sz, ++sg) {
1858
1859		num = (rem_sz > scatter_elem_sz_prev) ?
1860		      scatter_elem_sz_prev : rem_sz;
1861		p = sg_page_malloc(num, sfp->low_dma, &ret_sz);
1862		if (!p)
1863			return -ENOMEM;
1864
1865		if (num == scatter_elem_sz_prev) {
1866			if (unlikely(ret_sz > scatter_elem_sz_prev)) {
1867				scatter_elem_sz = ret_sz;
1868				scatter_elem_sz_prev = ret_sz;
1869			}
1870		}
1871		sg->page = p;
1872		sg->length = (ret_sz > num) ? num : ret_sz;
1873
1874		SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
1875				 "ret_sz=%d\n", k, num, ret_sz));
1876	}		/* end of for loop */
1877
1878	schp->k_use_sg = k;
1879	SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
1880			 "rem_sz=%d\n", k, rem_sz));
1881
1882	schp->bufflen = blk_size;
1883	if (rem_sz > 0)	/* must have failed */
1884		return -ENOMEM;
1885
1886	return 0;
1887}
1888
1889static int
1890sg_write_xfer(Sg_request * srp)
1891{
1892	sg_io_hdr_t *hp = &srp->header;
1893	Sg_scatter_hold *schp = &srp->data;
1894	struct scatterlist *sg = schp->buffer;
1895	int num_xfer = 0;
1896	int j, k, onum, usglen, ksglen, res;
1897	int iovec_count = (int) hp->iovec_count;
1898	int dxfer_dir = hp->dxfer_direction;
1899	unsigned char *p;
1900	unsigned char __user *up;
1901	int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
1902
1903	if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
1904	    (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
1905		num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
1906		if (schp->bufflen < num_xfer)
1907			num_xfer = schp->bufflen;
1908	}
1909	if ((num_xfer <= 0) || (schp->dio_in_use) ||
1910	    (new_interface
1911	     && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
1912		return 0;
1913
1914	SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
1915			  num_xfer, iovec_count, schp->k_use_sg));
1916	if (iovec_count) {
1917		onum = iovec_count;
1918		if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
1919			return -EFAULT;
1920	} else
1921		onum = 1;
1922
1923	ksglen = sg->length;
1924	p = page_address(sg->page);
1925	for (j = 0, k = 0; j < onum; ++j) {
1926		res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
1927		if (res)
1928			return res;
1929
1930		for (; p; ++sg, ksglen = sg->length,
1931		     p = page_address(sg->page)) {
1932			if (usglen <= 0)
1933				break;
1934			if (ksglen > usglen) {
1935				if (usglen >= num_xfer) {
1936					if (__copy_from_user(p, up, num_xfer))
1937						return -EFAULT;
1938					return 0;
1939				}
1940				if (__copy_from_user(p, up, usglen))
1941					return -EFAULT;
1942				p += usglen;
1943				ksglen -= usglen;
1944				break;
1945			} else {
1946				if (ksglen >= num_xfer) {
1947					if (__copy_from_user(p, up, num_xfer))
1948						return -EFAULT;
1949					return 0;
1950				}
1951				if (__copy_from_user(p, up, ksglen))
1952					return -EFAULT;
1953				up += ksglen;
1954				usglen -= ksglen;
1955			}
1956			++k;
1957			if (k >= schp->k_use_sg)
1958				return 0;
1959		}
1960	}
1961
1962	return 0;
1963}
1964
1965static int
1966sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
1967	   int wr_xf, int *countp, unsigned char __user **up)
1968{
1969	int num_xfer = (int) hp->dxfer_len;
1970	unsigned char __user *p = hp->dxferp;
1971	int count;
1972
1973	if (0 == sg_num) {
1974		if (wr_xf && ('\0' == hp->interface_id))
1975			count = (int) hp->flags;	/* holds "old" input_size */
1976		else
1977			count = num_xfer;
1978	} else {
1979		sg_iovec_t iovec;
1980		if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
1981			return -EFAULT;
1982		p = iovec.iov_base;
1983		count = (int) iovec.iov_len;
1984	}
1985	if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
1986		return -EFAULT;
1987	if (up)
1988		*up = p;
1989	if (countp)
1990		*countp = count;
1991	return 0;
1992}
1993
1994static void
1995sg_remove_scat(Sg_scatter_hold * schp)
1996{
1997	SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
1998	if (schp->buffer && (schp->sglist_len > 0)) {
1999		struct scatterlist *sg = schp->buffer;
2000
2001		if (schp->dio_in_use) {
2002#ifdef SG_ALLOW_DIO_CODE
2003			st_unmap_user_pages(sg, schp->k_use_sg, TRUE);
2004#endif
2005		} else {
2006			int k;
2007
2008			for (k = 0; (k < schp->k_use_sg) && sg->page;
2009			     ++k, ++sg) {
2010				SCSI_LOG_TIMEOUT(5, printk(
2011				    "sg_remove_scat: k=%d, pg=0x%p, len=%d\n",
2012				    k, sg->page, sg->length));
2013				sg_page_free(sg->page, sg->length);
2014			}
2015		}
2016		kfree(schp->buffer);
2017	}
2018	memset(schp, 0, sizeof (*schp));
2019}
2020
2021static int
2022sg_read_xfer(Sg_request * srp)
2023{
2024	sg_io_hdr_t *hp = &srp->header;
2025	Sg_scatter_hold *schp = &srp->data;
2026	struct scatterlist *sg = schp->buffer;
2027	int num_xfer = 0;
2028	int j, k, onum, usglen, ksglen, res;
2029	int iovec_count = (int) hp->iovec_count;
2030	int dxfer_dir = hp->dxfer_direction;
2031	unsigned char *p;
2032	unsigned char __user *up;
2033	int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2034
2035	if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
2036	    || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2037		num_xfer = hp->dxfer_len;
2038		if (schp->bufflen < num_xfer)
2039			num_xfer = schp->bufflen;
2040	}
2041	if ((num_xfer <= 0) || (schp->dio_in_use) ||
2042	    (new_interface
2043	     && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2044		return 0;
2045
2046	SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2047			  num_xfer, iovec_count, schp->k_use_sg));
2048	if (iovec_count) {
2049		onum = iovec_count;
2050		if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
2051			return -EFAULT;
2052	} else
2053		onum = 1;
2054
2055	p = page_address(sg->page);
2056	ksglen = sg->length;
2057	for (j = 0, k = 0; j < onum; ++j) {
2058		res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2059		if (res)
2060			return res;
2061
2062		for (; p; ++sg, ksglen = sg->length,
2063		     p = page_address(sg->page)) {
2064			if (usglen <= 0)
2065				break;
2066			if (ksglen > usglen) {
2067				if (usglen >= num_xfer) {
2068					if (__copy_to_user(up, p, num_xfer))
2069						return -EFAULT;
2070					return 0;
2071				}
2072				if (__copy_to_user(up, p, usglen))
2073					return -EFAULT;
2074				p += usglen;
2075				ksglen -= usglen;
2076				break;
2077			} else {
2078				if (ksglen >= num_xfer) {
2079					if (__copy_to_user(up, p, num_xfer))
2080						return -EFAULT;
2081					return 0;
2082				}
2083				if (__copy_to_user(up, p, ksglen))
2084					return -EFAULT;
2085				up += ksglen;
2086				usglen -= ksglen;
2087			}
2088			++k;
2089			if (k >= schp->k_use_sg)
2090				return 0;
2091		}
2092	}
2093
2094	return 0;
2095}
2096
2097static int
2098sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2099{
2100	Sg_scatter_hold *schp = &srp->data;
2101	struct scatterlist *sg = schp->buffer;
2102	int k, num;
2103
2104	SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
2105				   num_read_xfer));
2106	if ((!outp) || (num_read_xfer <= 0))
2107		return 0;
2108
2109	for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, ++sg) {
2110		num = sg->length;
2111		if (num > num_read_xfer) {
2112			if (__copy_to_user(outp, page_address(sg->page),
2113					   num_read_xfer))
2114				return -EFAULT;
2115			break;
2116		} else {
2117			if (__copy_to_user(outp, page_address(sg->page),
2118					   num))
2119				return -EFAULT;
2120			num_read_xfer -= num;
2121			if (num_read_xfer <= 0)
2122				break;
2123			outp += num;
2124		}
2125	}
2126
2127	return 0;
2128}
2129
2130static void
2131sg_build_reserve(Sg_fd * sfp, int req_size)
2132{
2133	Sg_scatter_hold *schp = &sfp->reserve;
2134
2135	SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
2136	do {
2137		if (req_size < PAGE_SIZE)
2138			req_size = PAGE_SIZE;
2139		if (0 == sg_build_indirect(schp, sfp, req_size))
2140			return;
2141		else
2142			sg_remove_scat(schp);
2143		req_size >>= 1;	/* divide by 2 */
2144	} while (req_size > (PAGE_SIZE / 2));
2145}
2146
2147static void
2148sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2149{
2150	Sg_scatter_hold *req_schp = &srp->data;
2151	Sg_scatter_hold *rsv_schp = &sfp->reserve;
2152	struct scatterlist *sg = rsv_schp->buffer;
2153	int k, num, rem;
2154
2155	srp->res_used = 1;
2156	SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2157	rem = size;
2158
2159	for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) {
2160		num = sg->length;
2161		if (rem <= num) {
2162			sfp->save_scat_len = num;
2163			sg->length = rem;
2164			req_schp->k_use_sg = k + 1;
2165			req_schp->sglist_len = rsv_schp->sglist_len;
2166			req_schp->buffer = rsv_schp->buffer;
2167
2168			req_schp->bufflen = size;
2169			req_schp->b_malloc_len = rsv_schp->b_malloc_len;
2170			break;
2171		} else
2172			rem -= num;
2173	}
2174
2175	if (k >= rsv_schp->k_use_sg)
2176		SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
2177}
2178
2179static void
2180sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2181{
2182	Sg_scatter_hold *req_schp = &srp->data;
2183	Sg_scatter_hold *rsv_schp = &sfp->reserve;
2184
2185	SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
2186				   (int) req_schp->k_use_sg));
2187	if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
2188		struct scatterlist *sg = rsv_schp->buffer;
2189
2190		if (sfp->save_scat_len > 0)
2191			(sg + (req_schp->k_use_sg - 1))->length =
2192			    (unsigned) sfp->save_scat_len;
2193		else
2194			SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
2195	}
2196	req_schp->k_use_sg = 0;
2197	req_schp->bufflen = 0;
2198	req_schp->buffer = NULL;
2199	req_schp->sglist_len = 0;
2200	sfp->save_scat_len = 0;
2201	srp->res_used = 0;
2202}
2203
2204static Sg_request *
2205sg_get_rq_mark(Sg_fd * sfp, int pack_id)
2206{
2207	Sg_request *resp;
2208	unsigned long iflags;
2209
2210	write_lock_irqsave(&sfp->rq_list_lock, iflags);
2211	for (resp = sfp->headrp; resp; resp = resp->nextrp) {
2212		/* look for requests that are ready + not SG_IO owned */
2213		if ((1 == resp->done) && (!resp->sg_io_owned) &&
2214		    ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
2215			resp->done = 2;	/* guard against other readers */
2216			break;
2217		}
2218	}
2219	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2220	return resp;
2221}
2222
2223#ifdef CONFIG_SCSI_PROC_FS
2224static Sg_request *
2225sg_get_nth_request(Sg_fd * sfp, int nth)
2226{
2227	Sg_request *resp;
2228	unsigned long iflags;
2229	int k;
2230
2231	read_lock_irqsave(&sfp->rq_list_lock, iflags);
2232	for (k = 0, resp = sfp->headrp; resp && (k < nth);
2233	     ++k, resp = resp->nextrp) ;
2234	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2235	return resp;
2236}
2237#endif
2238
2239/* always adds to end of list */
2240static Sg_request *
2241sg_add_request(Sg_fd * sfp)
2242{
2243	int k;
2244	unsigned long iflags;
2245	Sg_request *resp;
2246	Sg_request *rp = sfp->req_arr;
2247
2248	write_lock_irqsave(&sfp->rq_list_lock, iflags);
2249	resp = sfp->headrp;
2250	if (!resp) {
2251		memset(rp, 0, sizeof (Sg_request));
2252		rp->parentfp = sfp;
2253		resp = rp;
2254		sfp->headrp = resp;
2255	} else {
2256		if (0 == sfp->cmd_q)
2257			resp = NULL;	/* command queuing disallowed */
2258		else {
2259			for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
2260				if (!rp->parentfp)
2261					break;
2262			}
2263			if (k < SG_MAX_QUEUE) {
2264				memset(rp, 0, sizeof (Sg_request));
2265				rp->parentfp = sfp;
2266				while (resp->nextrp)
2267					resp = resp->nextrp;
2268				resp->nextrp = rp;
2269				resp = rp;
2270			} else
2271				resp = NULL;
2272		}
2273	}
2274	if (resp) {
2275		resp->nextrp = NULL;
2276		resp->header.duration = jiffies_to_msecs(jiffies);
2277	}
2278	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2279	return resp;
2280}
2281
2282/* Return of 1 for found; 0 for not found */
2283static int
2284sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2285{
2286	Sg_request *prev_rp;
2287	Sg_request *rp;
2288	unsigned long iflags;
2289	int res = 0;
2290
2291	if ((!sfp) || (!srp) || (!sfp->headrp))
2292		return res;
2293	write_lock_irqsave(&sfp->rq_list_lock, iflags);
2294	prev_rp = sfp->headrp;
2295	if (srp == prev_rp) {
2296		sfp->headrp = prev_rp->nextrp;
2297		prev_rp->parentfp = NULL;
2298		res = 1;
2299	} else {
2300		while ((rp = prev_rp->nextrp)) {
2301			if (srp == rp) {
2302				prev_rp->nextrp = rp->nextrp;
2303				rp->parentfp = NULL;
2304				res = 1;
2305				break;
2306			}
2307			prev_rp = rp;
2308		}
2309	}
2310	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2311	return res;
2312}
2313
2314#ifdef CONFIG_SCSI_PROC_FS
2315static Sg_fd *
2316sg_get_nth_sfp(Sg_device * sdp, int nth)
2317{
2318	Sg_fd *resp;
2319	unsigned long iflags;
2320	int k;
2321
2322	read_lock_irqsave(&sg_dev_arr_lock, iflags);
2323	for (k = 0, resp = sdp->headfp; resp && (k < nth);
2324	     ++k, resp = resp->nextfp) ;
2325	read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2326	return resp;
2327}
2328#endif
2329
2330static Sg_fd *
2331sg_add_sfp(Sg_device * sdp, int dev)
2332{
2333	Sg_fd *sfp;
2334	unsigned long iflags;
2335	int bufflen;
2336
2337	sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2338	if (!sfp)
2339		return NULL;
2340
2341	init_waitqueue_head(&sfp->read_wait);
2342	rwlock_init(&sfp->rq_list_lock);
2343
2344	sfp->timeout = SG_DEFAULT_TIMEOUT;
2345	sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2346	sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2347	sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
2348	    sdp->device->host->unchecked_isa_dma : 1;
2349	sfp->cmd_q = SG_DEF_COMMAND_Q;
2350	sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2351	sfp->parentdp = sdp;
2352	write_lock_irqsave(&sg_dev_arr_lock, iflags);
2353	if (!sdp->headfp)
2354		sdp->headfp = sfp;
2355	else {			/* add to tail of existing list */
2356		Sg_fd *pfp = sdp->headfp;
2357		while (pfp->nextfp)
2358			pfp = pfp->nextfp;
2359		pfp->nextfp = sfp;
2360	}
2361	write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2362	SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2363	if (unlikely(sg_big_buff != def_reserved_size))
2364		sg_big_buff = def_reserved_size;
2365
2366	bufflen = min_t(int, sg_big_buff,
2367			sdp->device->request_queue->max_sectors * 512);
2368	sg_build_reserve(sfp, bufflen);
2369	SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp:   bufflen=%d, k_use_sg=%d\n",
2370			   sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2371	return sfp;
2372}
2373
2374static void
2375__sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2376{
2377	Sg_fd *fp;
2378	Sg_fd *prev_fp;
2379
2380	prev_fp = sdp->headfp;
2381	if (sfp == prev_fp)
2382		sdp->headfp = prev_fp->nextfp;
2383	else {
2384		while ((fp = prev_fp->nextfp)) {
2385			if (sfp == fp) {
2386				prev_fp->nextfp = fp->nextfp;
2387				break;
2388			}
2389			prev_fp = fp;
2390		}
2391	}
2392	if (sfp->reserve.bufflen > 0) {
2393		SCSI_LOG_TIMEOUT(6,
2394			printk("__sg_remove_sfp:    bufflen=%d, k_use_sg=%d\n",
2395			(int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
2396		sg_remove_scat(&sfp->reserve);
2397	}
2398	sfp->parentdp = NULL;
2399	SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp:    sfp=0x%p\n", sfp));
2400	kfree(sfp);
2401}
2402
2403/* Returns 0 in normal case, 1 when detached and sdp object removed */
2404static int
2405sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2406{
2407	Sg_request *srp;
2408	Sg_request *tsrp;
2409	int dirty = 0;
2410	int res = 0;
2411
2412	for (srp = sfp->headrp; srp; srp = tsrp) {
2413		tsrp = srp->nextrp;
2414		if (sg_srp_done(srp, sfp))
2415			sg_finish_rem_req(srp);
2416		else
2417			++dirty;
2418	}
2419	if (0 == dirty) {
2420		unsigned long iflags;
2421
2422		write_lock_irqsave(&sg_dev_arr_lock, iflags);
2423		__sg_remove_sfp(sdp, sfp);
2424		if (sdp->detached && (NULL == sdp->headfp)) {
2425			int k, maxd;
2426
2427			maxd = sg_dev_max;
2428			for (k = 0; k < maxd; ++k) {
2429				if (sdp == sg_dev_arr[k])
2430					break;
2431			}
2432			if (k < maxd)
2433				sg_dev_arr[k] = NULL;
2434			kfree((char *) sdp);
2435			res = 1;
2436		}
2437		write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2438	} else {
2439		/* MOD_INC's to inhibit unloading sg and associated adapter driver */
2440		/* only bump the access_count if we actually succeeded in
2441		 * throwing another counter on the host module */
2442		scsi_device_get(sdp->device);
2443		sfp->closed = 1;	/* flag dirty state on this fd */
2444		SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n",
2445				  dirty));
2446	}
2447	return res;
2448}
2449
2450static int
2451sg_res_in_use(Sg_fd * sfp)
2452{
2453	const Sg_request *srp;
2454	unsigned long iflags;
2455
2456	read_lock_irqsave(&sfp->rq_list_lock, iflags);
2457	for (srp = sfp->headrp; srp; srp = srp->nextrp)
2458		if (srp->res_used)
2459			break;
2460	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2461	return srp ? 1 : 0;
2462}
2463
2464/* The size fetched (value output via retSzp) set when non-NULL return */
2465static struct page *
2466sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2467{
2468	struct page *resp = NULL;
2469	gfp_t page_mask;
2470	int order, a_size;
2471	int resSz;
2472
2473	if ((rqSz <= 0) || (NULL == retSzp))
2474		return resp;
2475
2476	if (lowDma)
2477		page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
2478	else
2479		page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
2480
2481	for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
2482	     order++, a_size <<= 1) ;
2483	resSz = a_size;		/* rounded up if necessary */
2484	resp = alloc_pages(page_mask, order);
2485	while ((!resp) && order) {
2486		--order;
2487		a_size >>= 1;	/* divide by 2, until PAGE_SIZE */
2488		resp =  alloc_pages(page_mask, order);	/* try half */
2489		resSz = a_size;
2490	}
2491	if (resp) {
2492		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2493			memset(page_address(resp), 0, resSz);
2494		*retSzp = resSz;
2495	}
2496	return resp;
2497}
2498
2499static void
2500sg_page_free(struct page *page, int size)
2501{
2502	int order, a_size;
2503
2504	if (!page)
2505		return;
2506	for (order = 0, a_size = PAGE_SIZE; a_size < size;
2507	     order++, a_size <<= 1) ;
2508	__free_pages(page, order);
2509}
2510
2511#ifndef MAINTENANCE_IN_CMD
2512#define MAINTENANCE_IN_CMD 0xa3
2513#endif
2514
2515static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE,
2516	INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12,
2517	READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS,
2518	SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD
2519};
2520
2521static int
2522sg_allow_access(unsigned char opcode, char dev_type)
2523{
2524	int k;
2525
2526	if (TYPE_SCANNER == dev_type)	/* TYPE_ROM maybe burner */
2527		return 1;
2528	for (k = 0; k < sizeof (allow_ops); ++k) {
2529		if (opcode == allow_ops[k])
2530			return 1;
2531	}
2532	return 0;
2533}
2534
2535#ifdef CONFIG_SCSI_PROC_FS
2536static int
2537sg_last_dev(void)
2538{
2539	int k;
2540	unsigned long iflags;
2541
2542	read_lock_irqsave(&sg_dev_arr_lock, iflags);
2543	for (k = sg_dev_max - 1; k >= 0; --k)
2544		if (sg_dev_arr[k] && sg_dev_arr[k]->device)
2545			break;
2546	read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2547	return k + 1;		/* origin 1 */
2548}
2549#endif
2550
2551static Sg_device *
2552sg_get_dev(int dev)
2553{
2554	Sg_device *sdp = NULL;
2555	unsigned long iflags;
2556
2557	if (sg_dev_arr && (dev >= 0)) {
2558		read_lock_irqsave(&sg_dev_arr_lock, iflags);
2559		if (dev < sg_dev_max)
2560			sdp = sg_dev_arr[dev];
2561		read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2562	}
2563	return sdp;
2564}
2565
2566#ifdef CONFIG_SCSI_PROC_FS
2567
2568static struct proc_dir_entry *sg_proc_sgp = NULL;
2569
2570static char sg_proc_sg_dirname[] = "scsi/sg";
2571
2572static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2573
2574static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2575static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2576			          size_t count, loff_t *off);
2577static struct file_operations adio_fops = {
2578	/* .owner, .read and .llseek added in sg_proc_init() */
2579	.open = sg_proc_single_open_adio,
2580	.write = sg_proc_write_adio,
2581	.release = single_release,
2582};
2583
2584static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2585static ssize_t sg_proc_write_dressz(struct file *filp,
2586		const char __user *buffer, size_t count, loff_t *off);
2587static struct file_operations dressz_fops = {
2588	.open = sg_proc_single_open_dressz,
2589	.write = sg_proc_write_dressz,
2590	.release = single_release,
2591};
2592
2593static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2594static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2595static struct file_operations version_fops = {
2596	.open = sg_proc_single_open_version,
2597	.release = single_release,
2598};
2599
2600static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2601static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2602static struct file_operations devhdr_fops = {
2603	.open = sg_proc_single_open_devhdr,
2604	.release = single_release,
2605};
2606
2607static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
2608static int sg_proc_open_dev(struct inode *inode, struct file *file);
2609static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2610static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2611static void dev_seq_stop(struct seq_file *s, void *v);
2612static struct file_operations dev_fops = {
2613	.open = sg_proc_open_dev,
2614	.release = seq_release,
2615};
2616static struct seq_operations dev_seq_ops = {
2617	.start = dev_seq_start,
2618	.next  = dev_seq_next,
2619	.stop  = dev_seq_stop,
2620	.show  = sg_proc_seq_show_dev,
2621};
2622
2623static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2624static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2625static struct file_operations devstrs_fops = {
2626	.open = sg_proc_open_devstrs,
2627	.release = seq_release,
2628};
2629static struct seq_operations devstrs_seq_ops = {
2630	.start = dev_seq_start,
2631	.next  = dev_seq_next,
2632	.stop  = dev_seq_stop,
2633	.show  = sg_proc_seq_show_devstrs,
2634};
2635
2636static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2637static int sg_proc_open_debug(struct inode *inode, struct file *file);
2638static struct file_operations debug_fops = {
2639	.open = sg_proc_open_debug,
2640	.release = seq_release,
2641};
2642static struct seq_operations debug_seq_ops = {
2643	.start = dev_seq_start,
2644	.next  = dev_seq_next,
2645	.stop  = dev_seq_stop,
2646	.show  = sg_proc_seq_show_debug,
2647};
2648
2649
2650struct sg_proc_leaf {
2651	const char * name;
2652	struct file_operations * fops;
2653};
2654
2655static struct sg_proc_leaf sg_proc_leaf_arr[] = {
2656	{"allow_dio", &adio_fops},
2657	{"debug", &debug_fops},
2658	{"def_reserved_size", &dressz_fops},
2659	{"device_hdr", &devhdr_fops},
2660	{"devices", &dev_fops},
2661	{"device_strs", &devstrs_fops},
2662	{"version", &version_fops}
2663};
2664
2665static int
2666sg_proc_init(void)
2667{
2668	int k, mask;
2669	int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2670	struct proc_dir_entry *pdep;
2671	struct sg_proc_leaf * leaf;
2672
2673	sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
2674	if (!sg_proc_sgp)
2675		return 1;
2676	for (k = 0; k < num_leaves; ++k) {
2677		leaf = &sg_proc_leaf_arr[k];
2678		mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2679		pdep = create_proc_entry(leaf->name, mask, sg_proc_sgp);
2680		if (pdep) {
2681			leaf->fops->owner = THIS_MODULE,
2682			leaf->fops->read = seq_read,
2683			leaf->fops->llseek = seq_lseek,
2684			pdep->proc_fops = leaf->fops;
2685		}
2686	}
2687	return 0;
2688}
2689
2690static void
2691sg_proc_cleanup(void)
2692{
2693	int k;
2694	int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2695
2696	if (!sg_proc_sgp)
2697		return;
2698	for (k = 0; k < num_leaves; ++k)
2699		remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
2700	remove_proc_entry(sg_proc_sg_dirname, NULL);
2701}
2702
2703
2704static int sg_proc_seq_show_int(struct seq_file *s, void *v)
2705{
2706	seq_printf(s, "%d\n", *((int *)s->private));
2707	return 0;
2708}
2709
2710static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
2711{
2712	return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
2713}
2714
2715static ssize_t
2716sg_proc_write_adio(struct file *filp, const char __user *buffer,
2717		   size_t count, loff_t *off)
2718{
2719	int num;
2720	char buff[11];
2721
2722	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2723		return -EACCES;
2724	num = (count < 10) ? count : 10;
2725	if (copy_from_user(buff, buffer, num))
2726		return -EFAULT;
2727	buff[num] = '\0';
2728	sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
2729	return count;
2730}
2731
2732static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
2733{
2734	return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
2735}
2736
2737static ssize_t
2738sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2739		     size_t count, loff_t *off)
2740{
2741	int num;
2742	unsigned long k = ULONG_MAX;
2743	char buff[11];
2744
2745	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2746		return -EACCES;
2747	num = (count < 10) ? count : 10;
2748	if (copy_from_user(buff, buffer, num))
2749		return -EFAULT;
2750	buff[num] = '\0';
2751	k = simple_strtoul(buff, NULL, 10);
2752	if (k <= 1048576) {	/* limit "big buff" to 1 MB */
2753		sg_big_buff = k;
2754		return count;
2755	}
2756	return -ERANGE;
2757}
2758
2759static int sg_proc_seq_show_version(struct seq_file *s, void *v)
2760{
2761	seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
2762		   sg_version_date);
2763	return 0;
2764}
2765
2766static int sg_proc_single_open_version(struct inode *inode, struct file *file)
2767{
2768	return single_open(file, sg_proc_seq_show_version, NULL);
2769}
2770
2771static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2772{
2773	seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
2774		   "online\n");
2775	return 0;
2776}
2777
2778static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
2779{
2780	return single_open(file, sg_proc_seq_show_devhdr, NULL);
2781}
2782
2783struct sg_proc_deviter {
2784	loff_t	index;
2785	size_t	max;
2786};
2787
2788static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2789{
2790	struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2791
2792	s->private = it;
2793	if (! it)
2794		return NULL;
2795
2796	if (NULL == sg_dev_arr)
2797		return NULL;
2798	it->index = *pos;
2799	it->max = sg_last_dev();
2800	if (it->index >= it->max)
2801		return NULL;
2802	return it;
2803}
2804
2805static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2806{
2807	struct sg_proc_deviter * it = s->private;
2808
2809	*pos = ++it->index;
2810	return (it->index < it->max) ? it : NULL;
2811}
2812
2813static void dev_seq_stop(struct seq_file *s, void *v)
2814{
2815	kfree(s->private);
2816}
2817
2818static int sg_proc_open_dev(struct inode *inode, struct file *file)
2819{
2820        return seq_open(file, &dev_seq_ops);
2821}
2822
2823static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2824{
2825	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2826	Sg_device *sdp;
2827	struct scsi_device *scsidp;
2828
2829	sdp = it ? sg_get_dev(it->index) : NULL;
2830	if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2831		seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
2832			      scsidp->host->host_no, scsidp->channel,
2833			      scsidp->id, scsidp->lun, (int) scsidp->type,
2834			      1,
2835			      (int) scsidp->queue_depth,
2836			      (int) scsidp->device_busy,
2837			      (int) scsi_device_online(scsidp));
2838	else
2839		seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2840	return 0;
2841}
2842
2843static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
2844{
2845        return seq_open(file, &devstrs_seq_ops);
2846}
2847
2848static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2849{
2850	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2851	Sg_device *sdp;
2852	struct scsi_device *scsidp;
2853
2854	sdp = it ? sg_get_dev(it->index) : NULL;
2855	if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2856		seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
2857			   scsidp->vendor, scsidp->model, scsidp->rev);
2858	else
2859		seq_printf(s, "<no active device>\n");
2860	return 0;
2861}
2862
2863static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2864{
2865	int k, m, new_interface, blen, usg;
2866	Sg_request *srp;
2867	Sg_fd *fp;
2868	const sg_io_hdr_t *hp;
2869	const char * cp;
2870	unsigned int ms;
2871
2872	for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
2873		seq_printf(s, "   FD(%d): timeout=%dms bufflen=%d "
2874			   "(res)sgat=%d low_dma=%d\n", k + 1,
2875			   jiffies_to_msecs(fp->timeout),
2876			   fp->reserve.bufflen,
2877			   (int) fp->reserve.k_use_sg,
2878			   (int) fp->low_dma);
2879		seq_printf(s, "   cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
2880			   (int) fp->cmd_q, (int) fp->force_packid,
2881			   (int) fp->keep_orphan, (int) fp->closed);
2882		for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) {
2883			hp = &srp->header;
2884			new_interface = (hp->interface_id == '\0') ? 0 : 1;
2885			if (srp->res_used) {
2886				if (new_interface &&
2887				    (SG_FLAG_MMAP_IO & hp->flags))
2888					cp = "     mmap>> ";
2889				else
2890					cp = "     rb>> ";
2891			} else {
2892				if (SG_INFO_DIRECT_IO_MASK & hp->info)
2893					cp = "     dio>> ";
2894				else
2895					cp = "     ";
2896			}
2897			seq_printf(s, cp);
2898			blen = srp->data.bufflen;
2899			usg = srp->data.k_use_sg;
2900			seq_printf(s, srp->done ?
2901				   ((1 == srp->done) ?  "rcv:" : "fin:")
2902				   : "act:");
2903			seq_printf(s, " id=%d blen=%d",
2904				   srp->header.pack_id, blen);
2905			if (srp->done)
2906				seq_printf(s, " dur=%d", hp->duration);
2907			else {
2908				ms = jiffies_to_msecs(jiffies);
2909				seq_printf(s, " t_o/elap=%d/%d",
2910					(new_interface ? hp->timeout :
2911						  jiffies_to_msecs(fp->timeout)),
2912					(ms > hp->duration ? ms - hp->duration : 0));
2913			}
2914			seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
2915				   (int) srp->data.cmd_opcode);
2916		}
2917		if (0 == m)
2918			seq_printf(s, "     No requests active\n");
2919	}
2920}
2921
2922static int sg_proc_open_debug(struct inode *inode, struct file *file)
2923{
2924        return seq_open(file, &debug_seq_ops);
2925}
2926
2927static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2928{
2929	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2930	Sg_device *sdp;
2931
2932	if (it && (0 == it->index)) {
2933		seq_printf(s, "dev_max(currently)=%d max_active_device=%d "
2934			   "(origin 1)\n", sg_dev_max, (int)it->max);
2935		seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
2936	}
2937	sdp = it ? sg_get_dev(it->index) : NULL;
2938	if (sdp) {
2939		struct scsi_device *scsidp = sdp->device;
2940
2941		if (NULL == scsidp) {
2942			seq_printf(s, "device %d detached ??\n",
2943				   (int)it->index);
2944			return 0;
2945		}
2946
2947		if (sg_get_nth_sfp(sdp, 0)) {
2948			seq_printf(s, " >>> device=%s ",
2949				sdp->disk->disk_name);
2950			if (sdp->detached)
2951				seq_printf(s, "detached pending close ");
2952			else
2953				seq_printf
2954				    (s, "scsi%d chan=%d id=%d lun=%d   em=%d",
2955				     scsidp->host->host_no,
2956				     scsidp->channel, scsidp->id,
2957				     scsidp->lun,
2958				     scsidp->host->hostt->emulated);
2959			seq_printf(s, " sg_tablesize=%d excl=%d\n",
2960				   sdp->sg_tablesize, sdp->exclude);
2961		}
2962		sg_proc_debug_helper(s, sdp);
2963	}
2964	return 0;
2965}
2966
2967#endif				/* CONFIG_SCSI_PROC_FS */
2968
2969module_init(init_sg);
2970module_exit(exit_sg);
2971