1/*-
2 * Copyright (c) 2008 Yahoo!, Inc.
3 * All rights reserved.
4 * Written by: John Baldwin <jhb@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the author nor the names of any co-contributors
15 *    may be used to endorse or promote products derived from this software
16 *    without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * LSI MPT-Fusion Host Adapter FreeBSD userland interface
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD$");
35
36#include <sys/param.h>
37#include <sys/conf.h>
38#include <sys/errno.h>
39#include <sys/ioccom.h>
40#include <sys/mpt_ioctl.h>
41
42#include <dev/mpt/mpt.h>
43
44struct mpt_user_raid_action_result {
45	uint32_t	volume_status;
46	uint32_t	action_data[4];
47	uint16_t	action_status;
48};
49
50struct mpt_page_memory {
51	bus_dma_tag_t	tag;
52	bus_dmamap_t	map;
53	bus_addr_t	paddr;
54	void		*vaddr;
55};
56
57static mpt_probe_handler_t	mpt_user_probe;
58static mpt_attach_handler_t	mpt_user_attach;
59static mpt_enable_handler_t	mpt_user_enable;
60static mpt_ready_handler_t	mpt_user_ready;
61static mpt_event_handler_t	mpt_user_event;
62static mpt_reset_handler_t	mpt_user_reset;
63static mpt_detach_handler_t	mpt_user_detach;
64
65static struct mpt_personality mpt_user_personality = {
66	.name		= "mpt_user",
67	.probe		= mpt_user_probe,
68	.attach		= mpt_user_attach,
69	.enable		= mpt_user_enable,
70	.ready		= mpt_user_ready,
71	.event		= mpt_user_event,
72	.reset		= mpt_user_reset,
73	.detach		= mpt_user_detach,
74};
75
76DECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND);
77
78static mpt_reply_handler_t	mpt_user_reply_handler;
79
80static d_open_t		mpt_open;
81static d_close_t	mpt_close;
82static d_ioctl_t	mpt_ioctl;
83
84static struct cdevsw mpt_cdevsw = {
85	.d_version =	D_VERSION,
86	.d_flags =	0,
87	.d_open =	mpt_open,
88	.d_close =	mpt_close,
89	.d_ioctl =	mpt_ioctl,
90	.d_name =	"mpt",
91};
92
93static MALLOC_DEFINE(M_MPTUSER, "mpt_user", "Buffers for mpt(4) ioctls");
94
95static uint32_t user_handler_id = MPT_HANDLER_ID_NONE;
96
97static int
98mpt_user_probe(struct mpt_softc *mpt)
99{
100
101	/* Attach to every controller. */
102	return (0);
103}
104
105static int
106mpt_user_attach(struct mpt_softc *mpt)
107{
108	mpt_handler_t handler;
109	int error, unit;
110
111	MPT_LOCK(mpt);
112	handler.reply_handler = mpt_user_reply_handler;
113	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
114				     &user_handler_id);
115	MPT_UNLOCK(mpt);
116	if (error != 0) {
117		mpt_prt(mpt, "Unable to register user handler!\n");
118		return (error);
119	}
120	unit = device_get_unit(mpt->dev);
121	mpt->cdev = make_dev(&mpt_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
122	    "mpt%d", unit);
123	if (mpt->cdev == NULL) {
124		MPT_LOCK(mpt);
125		mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
126		    user_handler_id);
127		MPT_UNLOCK(mpt);
128		return (ENOMEM);
129	}
130	mpt->cdev->si_drv1 = mpt;
131	return (0);
132}
133
134static int
135mpt_user_enable(struct mpt_softc *mpt)
136{
137
138	return (0);
139}
140
141static void
142mpt_user_ready(struct mpt_softc *mpt)
143{
144
145}
146
147static int
148mpt_user_event(struct mpt_softc *mpt, request_t *req,
149    MSG_EVENT_NOTIFY_REPLY *msg)
150{
151
152	/* Someday we may want to let a user daemon listen for events? */
153	return (0);
154}
155
156static void
157mpt_user_reset(struct mpt_softc *mpt, int type)
158{
159
160}
161
162static void
163mpt_user_detach(struct mpt_softc *mpt)
164{
165	mpt_handler_t handler;
166
167	/* XXX: do a purge of pending requests? */
168	destroy_dev(mpt->cdev);
169
170	MPT_LOCK(mpt);
171	handler.reply_handler = mpt_user_reply_handler;
172	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
173	    user_handler_id);
174	MPT_UNLOCK(mpt);
175}
176
177static int
178mpt_open(struct cdev *dev, int flags, int fmt, struct thread *td)
179{
180
181	return (0);
182}
183
184static int
185mpt_close(struct cdev *dev, int flags, int fmt, struct thread *td)
186{
187
188	return (0);
189}
190
191static int
192mpt_alloc_buffer(struct mpt_softc *mpt, struct mpt_page_memory *page_mem,
193    size_t len)
194{
195	struct mpt_map_info mi;
196	int error;
197
198	page_mem->vaddr = NULL;
199
200	/* Limit requests to 16M. */
201	if (len > 16 * 1024 * 1024)
202		return (ENOSPC);
203	error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
204	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
205	    len, 1, len, 0, &page_mem->tag);
206	if (error)
207		return (error);
208	error = bus_dmamem_alloc(page_mem->tag, &page_mem->vaddr,
209	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &page_mem->map);
210	if (error) {
211		bus_dma_tag_destroy(page_mem->tag);
212		return (error);
213	}
214	mi.mpt = mpt;
215	error = bus_dmamap_load(page_mem->tag, page_mem->map, page_mem->vaddr,
216	    len, mpt_map_rquest, &mi, BUS_DMA_NOWAIT);
217	if (error == 0)
218		error = mi.error;
219	if (error) {
220		bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
221		bus_dma_tag_destroy(page_mem->tag);
222		page_mem->vaddr = NULL;
223		return (error);
224	}
225	page_mem->paddr = mi.phys;
226	return (0);
227}
228
229static void
230mpt_free_buffer(struct mpt_page_memory *page_mem)
231{
232
233	if (page_mem->vaddr == NULL)
234		return;
235	bus_dmamap_unload(page_mem->tag, page_mem->map);
236	bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
237	bus_dma_tag_destroy(page_mem->tag);
238	page_mem->vaddr = NULL;
239}
240
241static int
242mpt_user_read_cfg_header(struct mpt_softc *mpt,
243    struct mpt_cfg_page_req *page_req)
244{
245	request_t  *req;
246	cfgparms_t params;
247	MSG_CONFIG *cfgp;
248	int	    error;
249
250	req = mpt_get_request(mpt, TRUE);
251	if (req == NULL) {
252		mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n");
253		return (ENOMEM);
254	}
255
256	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
257	params.PageVersion = 0;
258	params.PageLength = 0;
259	params.PageNumber = page_req->header.PageNumber;
260	params.PageType = page_req->header.PageType;
261	params.PageAddress = le32toh(page_req->page_address);
262	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
263				  TRUE, 5000);
264	if (error != 0) {
265		/*
266		 * Leave the request. Without resetting the chip, it's
267		 * still owned by it and we'll just get into trouble
268		 * freeing it now. Mark it as abandoned so that if it
269		 * shows up later it can be freed.
270		 */
271		mpt_prt(mpt, "read_cfg_header timed out\n");
272		return (ETIMEDOUT);
273	}
274
275	page_req->ioc_status = htole16(req->IOCStatus);
276	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
277		cfgp = req->req_vbuf;
278		bcopy(&cfgp->Header, &page_req->header,
279		    sizeof(page_req->header));
280	}
281	mpt_free_request(mpt, req);
282	return (0);
283}
284
285static int
286mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req,
287    struct mpt_page_memory *mpt_page)
288{
289	CONFIG_PAGE_HEADER *hdr;
290	request_t    *req;
291	cfgparms_t    params;
292	int	      error;
293
294	req = mpt_get_request(mpt, TRUE);
295	if (req == NULL) {
296		mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n");
297		return (ENOMEM);
298	}
299
300	hdr = mpt_page->vaddr;
301	params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
302	params.PageVersion = hdr->PageVersion;
303	params.PageLength = hdr->PageLength;
304	params.PageNumber = hdr->PageNumber;
305	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
306	params.PageAddress = le32toh(page_req->page_address);
307	bus_dmamap_sync(mpt_page->tag, mpt_page->map,
308	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
309	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
310	    le32toh(page_req->len), TRUE, 5000);
311	if (error != 0) {
312		mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n");
313		return (ETIMEDOUT);
314	}
315
316	page_req->ioc_status = htole16(req->IOCStatus);
317	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
318		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
319		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
320	mpt_free_request(mpt, req);
321	return (0);
322}
323
324static int
325mpt_user_read_extcfg_header(struct mpt_softc *mpt,
326    struct mpt_ext_cfg_page_req *ext_page_req)
327{
328	request_t  *req;
329	cfgparms_t params;
330	MSG_CONFIG_REPLY *cfgp;
331	int	    error;
332
333	req = mpt_get_request(mpt, TRUE);
334	if (req == NULL) {
335		mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n");
336		return (ENOMEM);
337	}
338
339	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
340	params.PageVersion = ext_page_req->header.PageVersion;
341	params.PageLength = 0;
342	params.PageNumber = ext_page_req->header.PageNumber;
343	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
344	params.PageAddress = le32toh(ext_page_req->page_address);
345	params.ExtPageType = ext_page_req->header.ExtPageType;
346	params.ExtPageLength = 0;
347	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
348				  TRUE, 5000);
349	if (error != 0) {
350		/*
351		 * Leave the request. Without resetting the chip, it's
352		 * still owned by it and we'll just get into trouble
353		 * freeing it now. Mark it as abandoned so that if it
354		 * shows up later it can be freed.
355		 */
356		mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n");
357		return (ETIMEDOUT);
358	}
359
360	ext_page_req->ioc_status = htole16(req->IOCStatus);
361	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
362		cfgp = req->req_vbuf;
363		ext_page_req->header.PageVersion = cfgp->Header.PageVersion;
364		ext_page_req->header.PageNumber = cfgp->Header.PageNumber;
365		ext_page_req->header.PageType = cfgp->Header.PageType;
366		ext_page_req->header.ExtPageLength = cfgp->ExtPageLength;
367		ext_page_req->header.ExtPageType = cfgp->ExtPageType;
368	}
369	mpt_free_request(mpt, req);
370	return (0);
371}
372
373static int
374mpt_user_read_extcfg_page(struct mpt_softc *mpt,
375    struct mpt_ext_cfg_page_req *ext_page_req, struct mpt_page_memory *mpt_page)
376{
377	CONFIG_EXTENDED_PAGE_HEADER *hdr;
378	request_t    *req;
379	cfgparms_t    params;
380	int	      error;
381
382	req = mpt_get_request(mpt, TRUE);
383	if (req == NULL) {
384		mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n");
385		return (ENOMEM);
386	}
387
388	hdr = mpt_page->vaddr;
389	params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
390	params.PageVersion = hdr->PageVersion;
391	params.PageLength = 0;
392	params.PageNumber = hdr->PageNumber;
393	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
394	params.PageAddress = le32toh(ext_page_req->page_address);
395	params.ExtPageType = hdr->ExtPageType;
396	params.ExtPageLength = hdr->ExtPageLength;
397	bus_dmamap_sync(mpt_page->tag, mpt_page->map,
398	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
399	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
400	    le32toh(ext_page_req->len), TRUE, 5000);
401	if (error != 0) {
402		mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n");
403		return (ETIMEDOUT);
404	}
405
406	ext_page_req->ioc_status = htole16(req->IOCStatus);
407	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
408		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
409		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
410	mpt_free_request(mpt, req);
411	return (0);
412}
413
414static int
415mpt_user_write_cfg_page(struct mpt_softc *mpt,
416    struct mpt_cfg_page_req *page_req, struct mpt_page_memory *mpt_page)
417{
418	CONFIG_PAGE_HEADER *hdr;
419	request_t    *req;
420	cfgparms_t    params;
421	u_int	      hdr_attr;
422	int	      error;
423
424	hdr = mpt_page->vaddr;
425	hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
426	if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
427	    hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
428		mpt_prt(mpt, "page type 0x%x not changeable\n",
429			hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
430		return (EINVAL);
431	}
432
433#if	0
434	/*
435	 * We shouldn't mask off other bits here.
436	 */
437	hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK;
438#endif
439
440	req = mpt_get_request(mpt, TRUE);
441	if (req == NULL)
442		return (ENOMEM);
443
444	bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_PREREAD |
445	    BUS_DMASYNC_PREWRITE);
446
447	/*
448	 * There isn't any point in restoring stripped out attributes
449	 * if you then mask them going down to issue the request.
450	 */
451
452	params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
453	params.PageVersion = hdr->PageVersion;
454	params.PageLength = hdr->PageLength;
455	params.PageNumber = hdr->PageNumber;
456	params.PageAddress = le32toh(page_req->page_address);
457#if	0
458	/* Restore stripped out attributes */
459	hdr->PageType |= hdr_attr;
460	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
461#else
462	params.PageType = hdr->PageType;
463#endif
464	error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
465	    le32toh(page_req->len), TRUE, 5000);
466	if (error != 0) {
467		mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
468		return (ETIMEDOUT);
469	}
470
471	page_req->ioc_status = htole16(req->IOCStatus);
472	bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_POSTREAD |
473	    BUS_DMASYNC_POSTWRITE);
474	mpt_free_request(mpt, req);
475	return (0);
476}
477
478static int
479mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req,
480    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
481{
482	MSG_RAID_ACTION_REPLY *reply;
483	struct mpt_user_raid_action_result *res;
484
485	if (req == NULL)
486		return (TRUE);
487
488	if (reply_frame != NULL) {
489		reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
490		req->IOCStatus = le16toh(reply->IOCStatus);
491		res = (struct mpt_user_raid_action_result *)
492		    (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
493		res->action_status = reply->ActionStatus;
494		res->volume_status = reply->VolumeStatus;
495		bcopy(&reply->ActionData, res->action_data,
496		    sizeof(res->action_data));
497	}
498
499	req->state &= ~REQ_STATE_QUEUED;
500	req->state |= REQ_STATE_DONE;
501	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
502
503	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
504		wakeup(req);
505	} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
506		/*
507		 * Whew- we can free this request (late completion)
508		 */
509		mpt_free_request(mpt, req);
510	}
511
512	return (TRUE);
513}
514
515/*
516 * We use the first part of the request buffer after the request frame
517 * to hold the action data and action status from the RAID reply.  The
518 * rest of the request buffer is used to hold the buffer for the
519 * action SGE.
520 */
521static int
522mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act,
523	struct mpt_page_memory *mpt_page)
524{
525	request_t *req;
526	struct mpt_user_raid_action_result *res;
527	MSG_RAID_ACTION_REQUEST *rap;
528	SGE_SIMPLE32 *se;
529	int error;
530
531	req = mpt_get_request(mpt, TRUE);
532	if (req == NULL)
533		return (ENOMEM);
534	rap = req->req_vbuf;
535	memset(rap, 0, sizeof *rap);
536	rap->Action = raid_act->action;
537	rap->ActionDataWord = raid_act->action_data_word;
538	rap->Function = MPI_FUNCTION_RAID_ACTION;
539	rap->VolumeID = raid_act->volume_id;
540	rap->VolumeBus = raid_act->volume_bus;
541	rap->PhysDiskNum = raid_act->phys_disk_num;
542	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
543	if (mpt_page->vaddr != NULL && raid_act->len != 0) {
544		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
545		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
546		se->Address = htole32(mpt_page->paddr);
547		MPI_pSGE_SET_LENGTH(se, le32toh(raid_act->len));
548		MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
549		    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
550		    MPI_SGE_FLAGS_END_OF_LIST |
551		    (raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC :
552		    MPI_SGE_FLAGS_IOC_TO_HOST)));
553	}
554	se->FlagsLength = htole32(se->FlagsLength);
555	rap->MsgContext = htole32(req->index | user_handler_id);
556
557	mpt_check_doorbell(mpt);
558	mpt_send_cmd(mpt, req);
559
560	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE,
561	    2000);
562	if (error != 0) {
563		/*
564		 * Leave request so it can be cleaned up later.
565		 */
566		mpt_prt(mpt, "mpt_user_raid_action timed out\n");
567		return (error);
568	}
569
570	raid_act->ioc_status = htole16(req->IOCStatus);
571	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
572		mpt_free_request(mpt, req);
573		return (0);
574	}
575
576	res = (struct mpt_user_raid_action_result *)
577	    (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
578	raid_act->volume_status = res->volume_status;
579	raid_act->action_status = res->action_status;
580	bcopy(res->action_data, raid_act->action_data,
581	    sizeof(res->action_data));
582	if (mpt_page->vaddr != NULL)
583		bus_dmamap_sync(mpt_page->tag, mpt_page->map,
584		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
585	mpt_free_request(mpt, req);
586	return (0);
587}
588
589#ifdef __amd64__
590#define	PTRIN(p)		((void *)(uintptr_t)(p))
591#define PTROUT(v)		((u_int32_t)(uintptr_t)(v))
592#endif
593
594static int
595mpt_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
596{
597	struct mpt_softc *mpt;
598	struct mpt_cfg_page_req *page_req;
599	struct mpt_ext_cfg_page_req *ext_page_req;
600	struct mpt_raid_action *raid_act;
601	struct mpt_page_memory mpt_page;
602#ifdef __amd64__
603	struct mpt_cfg_page_req32 *page_req32;
604	struct mpt_cfg_page_req page_req_swab;
605	struct mpt_ext_cfg_page_req32 *ext_page_req32;
606	struct mpt_ext_cfg_page_req ext_page_req_swab;
607	struct mpt_raid_action32 *raid_act32;
608	struct mpt_raid_action raid_act_swab;
609#endif
610	int error;
611
612	mpt = dev->si_drv1;
613	page_req = (void *)arg;
614	ext_page_req = (void *)arg;
615	raid_act = (void *)arg;
616	mpt_page.vaddr = NULL;
617
618#ifdef __amd64__
619	/* Convert 32-bit structs to native ones. */
620	page_req32 = (void *)arg;
621	ext_page_req32 = (void *)arg;
622	raid_act32 = (void *)arg;
623	switch (cmd) {
624	case MPTIO_READ_CFG_HEADER32:
625	case MPTIO_READ_CFG_PAGE32:
626	case MPTIO_WRITE_CFG_PAGE32:
627		page_req = &page_req_swab;
628		page_req->header = page_req32->header;
629		page_req->page_address = page_req32->page_address;
630		page_req->buf = PTRIN(page_req32->buf);
631		page_req->len = page_req32->len;
632		page_req->ioc_status = page_req32->ioc_status;
633		break;
634	case MPTIO_READ_EXT_CFG_HEADER32:
635	case MPTIO_READ_EXT_CFG_PAGE32:
636		ext_page_req = &ext_page_req_swab;
637		ext_page_req->header = ext_page_req32->header;
638		ext_page_req->page_address = ext_page_req32->page_address;
639		ext_page_req->buf = PTRIN(ext_page_req32->buf);
640		ext_page_req->len = ext_page_req32->len;
641		ext_page_req->ioc_status = ext_page_req32->ioc_status;
642		break;
643	case MPTIO_RAID_ACTION32:
644		raid_act = &raid_act_swab;
645		raid_act->action = raid_act32->action;
646		raid_act->volume_bus = raid_act32->volume_bus;
647		raid_act->volume_id = raid_act32->volume_id;
648		raid_act->phys_disk_num = raid_act32->phys_disk_num;
649		raid_act->action_data_word = raid_act32->action_data_word;
650		raid_act->buf = PTRIN(raid_act32->buf);
651		raid_act->len = raid_act32->len;
652		raid_act->volume_status = raid_act32->volume_status;
653		bcopy(raid_act32->action_data, raid_act->action_data,
654		    sizeof(raid_act->action_data));
655		raid_act->action_status = raid_act32->action_status;
656		raid_act->ioc_status = raid_act32->ioc_status;
657		raid_act->write = raid_act32->write;
658		break;
659	}
660#endif
661
662	switch (cmd) {
663#ifdef __amd64__
664	case MPTIO_READ_CFG_HEADER32:
665#endif
666	case MPTIO_READ_CFG_HEADER:
667		MPT_LOCK(mpt);
668		error = mpt_user_read_cfg_header(mpt, page_req);
669		MPT_UNLOCK(mpt);
670		break;
671#ifdef __amd64__
672	case MPTIO_READ_CFG_PAGE32:
673#endif
674	case MPTIO_READ_CFG_PAGE:
675		error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
676		if (error)
677			break;
678		error = copyin(page_req->buf, mpt_page.vaddr,
679		    sizeof(CONFIG_PAGE_HEADER));
680		if (error)
681			break;
682		MPT_LOCK(mpt);
683		error = mpt_user_read_cfg_page(mpt, page_req, &mpt_page);
684		MPT_UNLOCK(mpt);
685		if (error)
686			break;
687		error = copyout(mpt_page.vaddr, page_req->buf, page_req->len);
688		break;
689#ifdef __amd64__
690	case MPTIO_READ_EXT_CFG_HEADER32:
691#endif
692	case MPTIO_READ_EXT_CFG_HEADER:
693		MPT_LOCK(mpt);
694		error = mpt_user_read_extcfg_header(mpt, ext_page_req);
695		MPT_UNLOCK(mpt);
696		break;
697#ifdef __amd64__
698	case MPTIO_READ_EXT_CFG_PAGE32:
699#endif
700	case MPTIO_READ_EXT_CFG_PAGE:
701		error = mpt_alloc_buffer(mpt, &mpt_page, ext_page_req->len);
702		if (error)
703			break;
704		error = copyin(ext_page_req->buf, mpt_page.vaddr,
705		    sizeof(CONFIG_EXTENDED_PAGE_HEADER));
706		if (error)
707			break;
708		MPT_LOCK(mpt);
709		error = mpt_user_read_extcfg_page(mpt, ext_page_req, &mpt_page);
710		MPT_UNLOCK(mpt);
711		if (error)
712			break;
713		error = copyout(mpt_page.vaddr, ext_page_req->buf,
714		    ext_page_req->len);
715		break;
716#ifdef __amd64__
717	case MPTIO_WRITE_CFG_PAGE32:
718#endif
719	case MPTIO_WRITE_CFG_PAGE:
720		error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
721		if (error)
722			break;
723		error = copyin(page_req->buf, mpt_page.vaddr, page_req->len);
724		if (error)
725			break;
726		MPT_LOCK(mpt);
727		error = mpt_user_write_cfg_page(mpt, page_req, &mpt_page);
728		MPT_UNLOCK(mpt);
729		break;
730#ifdef __amd64__
731	case MPTIO_RAID_ACTION32:
732#endif
733	case MPTIO_RAID_ACTION:
734		if (raid_act->buf != NULL) {
735			error = mpt_alloc_buffer(mpt, &mpt_page, raid_act->len);
736			if (error)
737				break;
738			error = copyin(raid_act->buf, mpt_page.vaddr,
739			    raid_act->len);
740			if (error)
741				break;
742		}
743		MPT_LOCK(mpt);
744		error = mpt_user_raid_action(mpt, raid_act, &mpt_page);
745		MPT_UNLOCK(mpt);
746		if (error)
747			break;
748		if (raid_act->buf != NULL)
749			error = copyout(mpt_page.vaddr, raid_act->buf,
750			    raid_act->len);
751		break;
752	default:
753		error = ENOIOCTL;
754		break;
755	}
756
757	mpt_free_buffer(&mpt_page);
758
759	if (error)
760		return (error);
761
762#ifdef __amd64__
763	/* Convert native structs to 32-bit ones. */
764	switch (cmd) {
765	case MPTIO_READ_CFG_HEADER32:
766	case MPTIO_READ_CFG_PAGE32:
767	case MPTIO_WRITE_CFG_PAGE32:
768		page_req32->header = page_req->header;
769		page_req32->page_address = page_req->page_address;
770		page_req32->buf = PTROUT(page_req->buf);
771		page_req32->len = page_req->len;
772		page_req32->ioc_status = page_req->ioc_status;
773		break;
774	case MPTIO_READ_EXT_CFG_HEADER32:
775	case MPTIO_READ_EXT_CFG_PAGE32:
776		ext_page_req32->header = ext_page_req->header;
777		ext_page_req32->page_address = ext_page_req->page_address;
778		ext_page_req32->buf = PTROUT(ext_page_req->buf);
779		ext_page_req32->len = ext_page_req->len;
780		ext_page_req32->ioc_status = ext_page_req->ioc_status;
781		break;
782	case MPTIO_RAID_ACTION32:
783		raid_act32->action = raid_act->action;
784		raid_act32->volume_bus = raid_act->volume_bus;
785		raid_act32->volume_id = raid_act->volume_id;
786		raid_act32->phys_disk_num = raid_act->phys_disk_num;
787		raid_act32->action_data_word = raid_act->action_data_word;
788		raid_act32->buf = PTROUT(raid_act->buf);
789		raid_act32->len = raid_act->len;
790		raid_act32->volume_status = raid_act->volume_status;
791		bcopy(raid_act->action_data, raid_act32->action_data,
792		    sizeof(raid_act->action_data));
793		raid_act32->action_status = raid_act->action_status;
794		raid_act32->ioc_status = raid_act->ioc_status;
795		raid_act32->write = raid_act->write;
796		break;
797	}
798#endif
799
800	return (0);
801}
802