1/*-
2 * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3 *
4 * Copyright (c) 2005, WHEEL Sp. z o.o.
5 * Copyright (c) 2005 Justin T. Gibbs.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are
10 * met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 *    substantially similar to the "NO WARRANTY" disclaimer below
15 *    ("Disclaimer") and any redistribution must be conditioned upon including
16 *    a substantially similar Disclaimer requirement for further binary
17 *    redistribution.
18 * 3. Neither the names of the above listed copyright holders nor the names
19 *    of any contributors may be used to endorse or promote products derived
20 *    from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34/*-
35 * Some Breakage and Bug Fixing added later.
36 * Copyright (c) 2006, by Matthew Jacob
37 * All Rights Reserved
38 *
39 * Support from LSI-Logic has also gone a great deal toward making this a
40 * workable subsystem and is gratefully acknowledged.
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD$");
45
46#include <dev/mpt/mpt.h>
47#include <dev/mpt/mpt_raid.h>
48
49#include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50#include "dev/mpt/mpilib/mpi_raid.h"
51
52#include <cam/cam.h>
53#include <cam/cam_ccb.h>
54#include <cam/cam_periph.h>
55#include <cam/cam_sim.h>
56#include <cam/cam_xpt_sim.h>
57
58#include <sys/callout.h>
59#include <sys/kthread.h>
60#include <sys/sysctl.h>
61
62#include <machine/stdarg.h>
63
64struct mpt_raid_action_result
65{
66	union {
67		MPI_RAID_VOL_INDICATOR	indicator_struct;
68		uint32_t		new_settings;
69		uint8_t			phys_disk_num;
70	} action_data;
71	uint16_t			action_status;
72};
73
74#define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
75	(((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
76
77#define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
78
79static mpt_probe_handler_t	mpt_raid_probe;
80static mpt_attach_handler_t	mpt_raid_attach;
81static mpt_enable_handler_t	mpt_raid_enable;
82static mpt_event_handler_t	mpt_raid_event;
83static mpt_shutdown_handler_t	mpt_raid_shutdown;
84static mpt_reset_handler_t	mpt_raid_ioc_reset;
85static mpt_detach_handler_t	mpt_raid_detach;
86
87static struct mpt_personality mpt_raid_personality =
88{
89	.name		= "mpt_raid",
90	.probe		= mpt_raid_probe,
91	.attach		= mpt_raid_attach,
92	.enable		= mpt_raid_enable,
93	.event		= mpt_raid_event,
94	.reset		= mpt_raid_ioc_reset,
95	.shutdown	= mpt_raid_shutdown,
96	.detach		= mpt_raid_detach,
97};
98
99DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
100MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
101
102static mpt_reply_handler_t mpt_raid_reply_handler;
103static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
104					MSG_DEFAULT_REPLY *reply_frame);
105static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
106static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
107static void mpt_raid_thread(void *arg);
108static timeout_t mpt_raid_timer;
109#if 0
110static void mpt_enable_vol(struct mpt_softc *mpt,
111			   struct mpt_raid_volume *mpt_vol, int enable);
112#endif
113static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
114static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
115    struct cam_path *);
116static void mpt_raid_sysctl_attach(struct mpt_softc *);
117
118static const char *mpt_vol_type(struct mpt_raid_volume *vol);
119static const char *mpt_vol_state(struct mpt_raid_volume *vol);
120static const char *mpt_disk_state(struct mpt_raid_disk *disk);
121static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
122    const char *fmt, ...);
123static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
124    const char *fmt, ...);
125
126static int mpt_issue_raid_req(struct mpt_softc *mpt,
127    struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
128    u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
129    int write, int wait);
130
131static int mpt_refresh_raid_data(struct mpt_softc *mpt);
132static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
133
134static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
135
136static const char *
137mpt_vol_type(struct mpt_raid_volume *vol)
138{
139	switch (vol->config_page->VolumeType) {
140	case MPI_RAID_VOL_TYPE_IS:
141		return ("RAID-0");
142	case MPI_RAID_VOL_TYPE_IME:
143		return ("RAID-1E");
144	case MPI_RAID_VOL_TYPE_IM:
145		return ("RAID-1");
146	default:
147		return ("Unknown");
148	}
149}
150
151static const char *
152mpt_vol_state(struct mpt_raid_volume *vol)
153{
154	switch (vol->config_page->VolumeStatus.State) {
155	case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
156		return ("Optimal");
157	case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
158		return ("Degraded");
159	case MPI_RAIDVOL0_STATUS_STATE_FAILED:
160		return ("Failed");
161	default:
162		return ("Unknown");
163	}
164}
165
166static const char *
167mpt_disk_state(struct mpt_raid_disk *disk)
168{
169	switch (disk->config_page.PhysDiskStatus.State) {
170	case MPI_PHYSDISK0_STATUS_ONLINE:
171		return ("Online");
172	case MPI_PHYSDISK0_STATUS_MISSING:
173		return ("Missing");
174	case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
175		return ("Incompatible");
176	case MPI_PHYSDISK0_STATUS_FAILED:
177		return ("Failed");
178	case MPI_PHYSDISK0_STATUS_INITIALIZING:
179		return ("Initializing");
180	case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
181		return ("Offline Requested");
182	case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
183		return ("Failed per Host Request");
184	case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
185		return ("Offline");
186	default:
187		return ("Unknown");
188	}
189}
190
191static void
192mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
193	    const char *fmt, ...)
194{
195	va_list ap;
196
197	printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
198	       (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
199	       vol->config_page->VolumeBus, vol->config_page->VolumeID);
200	va_start(ap, fmt);
201	vprintf(fmt, ap);
202	va_end(ap);
203}
204
205static void
206mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
207	     const char *fmt, ...)
208{
209	va_list ap;
210
211	if (disk->volume != NULL) {
212		printf("(%s:vol%d:%d): ",
213		       device_get_nameunit(mpt->dev),
214		       disk->volume->config_page->VolumeID,
215		       disk->member_number);
216	} else {
217		printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
218		       disk->config_page.PhysDiskBus,
219		       disk->config_page.PhysDiskID);
220	}
221	va_start(ap, fmt);
222	vprintf(fmt, ap);
223	va_end(ap);
224}
225
226static void
227mpt_raid_async(void *callback_arg, u_int32_t code,
228	       struct cam_path *path, void *arg)
229{
230	struct mpt_softc *mpt;
231
232	mpt = (struct mpt_softc*)callback_arg;
233	switch (code) {
234	case AC_FOUND_DEVICE:
235	{
236		struct ccb_getdev *cgd;
237		struct mpt_raid_volume *mpt_vol;
238
239		cgd = (struct ccb_getdev *)arg;
240		if (cgd == NULL) {
241			break;
242		}
243
244		mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
245			 cgd->ccb_h.target_id);
246
247		RAID_VOL_FOREACH(mpt, mpt_vol) {
248			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
249				continue;
250
251			if (mpt_vol->config_page->VolumeID
252			 == cgd->ccb_h.target_id) {
253				mpt_adjust_queue_depth(mpt, mpt_vol, path);
254				break;
255			}
256		}
257	}
258	default:
259		break;
260	}
261}
262
263static int
264mpt_raid_probe(struct mpt_softc *mpt)
265{
266
267	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
268		return (ENODEV);
269	}
270	return (0);
271}
272
273static int
274mpt_raid_attach(struct mpt_softc *mpt)
275{
276	struct ccb_setasync csa;
277	mpt_handler_t	 handler;
278	int		 error;
279
280	mpt_callout_init(mpt, &mpt->raid_timer);
281
282	error = mpt_spawn_raid_thread(mpt);
283	if (error != 0) {
284		mpt_prt(mpt, "Unable to spawn RAID thread!\n");
285		goto cleanup;
286	}
287
288	MPT_LOCK(mpt);
289	handler.reply_handler = mpt_raid_reply_handler;
290	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
291				     &raid_handler_id);
292	if (error != 0) {
293		mpt_prt(mpt, "Unable to register RAID haandler!\n");
294		goto cleanup;
295	}
296
297	xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
298	csa.ccb_h.func_code = XPT_SASYNC_CB;
299	csa.event_enable = AC_FOUND_DEVICE;
300	csa.callback = mpt_raid_async;
301	csa.callback_arg = mpt;
302	xpt_action((union ccb *)&csa);
303	if (csa.ccb_h.status != CAM_REQ_CMP) {
304		mpt_prt(mpt, "mpt_raid_attach: Unable to register "
305			"CAM async handler.\n");
306	}
307	MPT_UNLOCK(mpt);
308
309	mpt_raid_sysctl_attach(mpt);
310	return (0);
311cleanup:
312	MPT_UNLOCK(mpt);
313	mpt_raid_detach(mpt);
314	return (error);
315}
316
317static int
318mpt_raid_enable(struct mpt_softc *mpt)
319{
320
321	return (0);
322}
323
324static void
325mpt_raid_detach(struct mpt_softc *mpt)
326{
327	struct ccb_setasync csa;
328	mpt_handler_t handler;
329
330	mpt_callout_drain(mpt, &mpt->raid_timer);
331
332	MPT_LOCK(mpt);
333	mpt_terminate_raid_thread(mpt);
334	handler.reply_handler = mpt_raid_reply_handler;
335	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
336			       raid_handler_id);
337	xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
338	csa.ccb_h.func_code = XPT_SASYNC_CB;
339	csa.event_enable = 0;
340	csa.callback = mpt_raid_async;
341	csa.callback_arg = mpt;
342	xpt_action((union ccb *)&csa);
343	MPT_UNLOCK(mpt);
344}
345
346static void
347mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
348{
349
350	/* Nothing to do yet. */
351}
352
353static const char *raid_event_txt[] =
354{
355	"Volume Created",
356	"Volume Deleted",
357	"Volume Settings Changed",
358	"Volume Status Changed",
359	"Volume Physical Disk Membership Changed",
360	"Physical Disk Created",
361	"Physical Disk Deleted",
362	"Physical Disk Settings Changed",
363	"Physical Disk Status Changed",
364	"Domain Validation Required",
365	"SMART Data Received",
366	"Replace Action Started",
367};
368
369static int
370mpt_raid_event(struct mpt_softc *mpt, request_t *req,
371	       MSG_EVENT_NOTIFY_REPLY *msg)
372{
373	EVENT_DATA_RAID *raid_event;
374	struct mpt_raid_volume *mpt_vol;
375	struct mpt_raid_disk *mpt_disk;
376	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
377	int i;
378	int print_event;
379
380	if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
381		return (0);
382	}
383
384	raid_event = (EVENT_DATA_RAID *)&msg->Data;
385
386	mpt_vol = NULL;
387	vol_pg = NULL;
388	if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
389		for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
390			mpt_vol = &mpt->raid_volumes[i];
391			vol_pg = mpt_vol->config_page;
392
393			if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
394				continue;
395
396			if (vol_pg->VolumeID == raid_event->VolumeID
397			 && vol_pg->VolumeBus == raid_event->VolumeBus)
398				break;
399		}
400		if (i >= mpt->ioc_page2->MaxVolumes) {
401			mpt_vol = NULL;
402			vol_pg = NULL;
403		}
404	}
405
406	mpt_disk = NULL;
407	if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
408		mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
409		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
410			mpt_disk = NULL;
411		}
412	}
413
414	print_event = 1;
415	switch(raid_event->ReasonCode) {
416	case MPI_EVENT_RAID_RC_VOLUME_CREATED:
417	case MPI_EVENT_RAID_RC_VOLUME_DELETED:
418		break;
419	case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
420		if (mpt_vol != NULL) {
421			if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
422				mpt_vol->flags &= ~MPT_RVF_UP2DATE;
423			} else {
424				/*
425				 * Coalesce status messages into one
426				 * per background run of our RAID thread.
427				 * This removes "spurious" status messages
428				 * from our output.
429				 */
430				print_event = 0;
431			}
432		}
433		break;
434	case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
435	case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
436		mpt->raid_rescan++;
437		if (mpt_vol != NULL) {
438			mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
439		}
440		break;
441	case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
442	case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
443		mpt->raid_rescan++;
444		break;
445	case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
446	case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
447		mpt->raid_rescan++;
448		if (mpt_disk != NULL) {
449			mpt_disk->flags &= ~MPT_RDF_UP2DATE;
450		}
451		break;
452	case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
453		mpt->raid_rescan++;
454		break;
455	case MPI_EVENT_RAID_RC_SMART_DATA:
456	case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
457		break;
458	}
459
460	if (print_event) {
461		if (mpt_disk != NULL) {
462			mpt_disk_prt(mpt, mpt_disk, "");
463		} else if (mpt_vol != NULL) {
464			mpt_vol_prt(mpt, mpt_vol, "");
465		} else {
466			mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
467				raid_event->VolumeID);
468
469			if (raid_event->PhysDiskNum != 0xFF)
470				mpt_prtc(mpt, ":%d): ",
471					 raid_event->PhysDiskNum);
472			else
473				mpt_prtc(mpt, "): ");
474		}
475
476		if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
477			mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
478				 raid_event->ReasonCode);
479		else
480			mpt_prtc(mpt, "%s\n",
481				 raid_event_txt[raid_event->ReasonCode]);
482	}
483
484	if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
485		/* XXX Use CAM's print sense for this... */
486		if (mpt_disk != NULL)
487			mpt_disk_prt(mpt, mpt_disk, "");
488		else
489			mpt_prt(mpt, "Volume(%d:%d:%d: ",
490			    raid_event->VolumeBus, raid_event->VolumeID,
491			    raid_event->PhysDiskNum);
492		mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
493			 raid_event->ASC, raid_event->ASCQ);
494	}
495
496	mpt_raid_wakeup(mpt);
497	return (1);
498}
499
500static void
501mpt_raid_shutdown(struct mpt_softc *mpt)
502{
503	struct mpt_raid_volume *mpt_vol;
504
505	if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
506		return;
507	}
508
509	mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
510	RAID_VOL_FOREACH(mpt, mpt_vol) {
511		mpt_verify_mwce(mpt, mpt_vol);
512	}
513}
514
515static int
516mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
517    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
518{
519	int free_req;
520
521	if (req == NULL)
522		return (TRUE);
523
524	free_req = TRUE;
525	if (reply_frame != NULL)
526		free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
527#ifdef NOTYET
528	else if (req->ccb != NULL) {
529		/* Complete Quiesce CCB with error... */
530	}
531#endif
532
533	req->state &= ~REQ_STATE_QUEUED;
534	req->state |= REQ_STATE_DONE;
535	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
536
537	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
538		wakeup(req);
539	} else if (free_req) {
540		mpt_free_request(mpt, req);
541	}
542
543	return (TRUE);
544}
545
546/*
547 * Parse additional completion information in the reply
548 * frame for RAID I/O requests.
549 */
550static int
551mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
552    MSG_DEFAULT_REPLY *reply_frame)
553{
554	MSG_RAID_ACTION_REPLY *reply;
555	struct mpt_raid_action_result *action_result;
556	MSG_RAID_ACTION_REQUEST *rap;
557
558	reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
559	req->IOCStatus = le16toh(reply->IOCStatus);
560	rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
561
562	switch (rap->Action) {
563	case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
564		mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
565		break;
566	case MPI_RAID_ACTION_ENABLE_PHYS_IO:
567		mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
568		break;
569	default:
570		break;
571	}
572	action_result = REQ_TO_RAID_ACTION_RESULT(req);
573	memcpy(&action_result->action_data, &reply->ActionData,
574	    sizeof(action_result->action_data));
575	action_result->action_status = le16toh(reply->ActionStatus);
576	return (TRUE);
577}
578
579/*
580 * Utiltity routine to perform a RAID action command;
581 */
582static int
583mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
584		   struct mpt_raid_disk *disk, request_t *req, u_int Action,
585		   uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
586		   int write, int wait)
587{
588	MSG_RAID_ACTION_REQUEST *rap;
589	SGE_SIMPLE32 *se;
590
591	rap = req->req_vbuf;
592	memset(rap, 0, sizeof *rap);
593	rap->Action = Action;
594	rap->ActionDataWord = htole32(ActionDataWord);
595	rap->Function = MPI_FUNCTION_RAID_ACTION;
596	rap->VolumeID = vol->config_page->VolumeID;
597	rap->VolumeBus = vol->config_page->VolumeBus;
598	if (disk != 0)
599		rap->PhysDiskNum = disk->config_page.PhysDiskNum;
600	else
601		rap->PhysDiskNum = 0xFF;
602	se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
603	se->Address = htole32(addr);
604	MPI_pSGE_SET_LENGTH(se, len);
605	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
606	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
607	    MPI_SGE_FLAGS_END_OF_LIST |
608	    (write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
609	se->FlagsLength = htole32(se->FlagsLength);
610	rap->MsgContext = htole32(req->index | raid_handler_id);
611
612	mpt_check_doorbell(mpt);
613	mpt_send_cmd(mpt, req);
614
615	if (wait) {
616		return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
617				     /*sleep_ok*/FALSE, /*time_ms*/2000));
618	} else {
619		return (0);
620	}
621}
622
623/*************************** RAID Status Monitoring ***************************/
624static int
625mpt_spawn_raid_thread(struct mpt_softc *mpt)
626{
627	int error;
628
629	/*
630	 * Freeze out any CAM transactions until our thread
631	 * is able to run at least once.  We need to update
632	 * our RAID pages before acception I/O or we may
633	 * reject I/O to an ID we later determine is for a
634	 * hidden physdisk.
635	 */
636	MPT_LOCK(mpt);
637	xpt_freeze_simq(mpt->phydisk_sim, 1);
638	MPT_UNLOCK(mpt);
639	error = mpt_kthread_create(mpt_raid_thread, mpt,
640	    &mpt->raid_thread, /*flags*/0, /*altstack*/0,
641	    "mpt_raid%d", mpt->unit);
642	if (error != 0) {
643		MPT_LOCK(mpt);
644		xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
645		MPT_UNLOCK(mpt);
646	}
647	return (error);
648}
649
650static void
651mpt_terminate_raid_thread(struct mpt_softc *mpt)
652{
653
654	if (mpt->raid_thread == NULL) {
655		return;
656	}
657	mpt->shutdwn_raid = 1;
658	wakeup(&mpt->raid_volumes);
659	/*
660	 * Sleep on a slightly different location
661	 * for this interlock just for added safety.
662	 */
663	mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
664}
665
666static void
667mpt_raid_thread(void *arg)
668{
669	struct mpt_softc *mpt;
670	int firstrun;
671
672	mpt = (struct mpt_softc *)arg;
673	firstrun = 1;
674	MPT_LOCK(mpt);
675	while (mpt->shutdwn_raid == 0) {
676
677		if (mpt->raid_wakeup == 0) {
678			mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
679			continue;
680		}
681
682		mpt->raid_wakeup = 0;
683
684		if (mpt_refresh_raid_data(mpt)) {
685			mpt_schedule_raid_refresh(mpt);	/* XX NOT QUITE RIGHT */
686			continue;
687		}
688
689		/*
690		 * Now that we have our first snapshot of RAID data,
691		 * allow CAM to access our physical disk bus.
692		 */
693		if (firstrun) {
694			firstrun = 0;
695			xpt_release_simq(mpt->phydisk_sim, TRUE);
696		}
697
698		if (mpt->raid_rescan != 0) {
699			union ccb *ccb;
700			int error;
701
702			mpt->raid_rescan = 0;
703			MPT_UNLOCK(mpt);
704
705			ccb = xpt_alloc_ccb();
706
707			MPT_LOCK(mpt);
708			error = xpt_create_path(&ccb->ccb_h.path, NULL,
709			    cam_sim_path(mpt->phydisk_sim),
710			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
711			if (error != CAM_REQ_CMP) {
712				xpt_free_ccb(ccb);
713				mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
714			} else {
715				xpt_rescan(ccb);
716			}
717		}
718	}
719	mpt->raid_thread = NULL;
720	wakeup(&mpt->raid_thread);
721	MPT_UNLOCK(mpt);
722	mpt_kthread_exit(0);
723}
724
725#if 0
726static void
727mpt_raid_quiesce_timeout(void *arg)
728{
729
730	/* Complete the CCB with error */
731	/* COWWWW */
732}
733
734static timeout_t mpt_raid_quiesce_timeout;
735cam_status
736mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
737		      request_t *req)
738{
739	union ccb *ccb;
740
741	ccb = req->ccb;
742	if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
743		return (CAM_REQ_CMP);
744
745	if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
746		int rv;
747
748		mpt_disk->flags |= MPT_RDF_QUIESCING;
749		xpt_freeze_devq(ccb->ccb_h.path, 1);
750
751		rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
752					MPI_RAID_ACTION_QUIESCE_PHYS_IO,
753					/*ActionData*/0, /*addr*/0,
754					/*len*/0, /*write*/FALSE,
755					/*wait*/FALSE);
756		if (rv != 0)
757			return (CAM_REQ_CMP_ERR);
758
759		mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
760#if 0
761		if (rv == ETIMEDOUT) {
762			mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
763				     "Quiece Timed-out\n");
764			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
765			return (CAM_REQ_CMP_ERR);
766		}
767
768		ar = REQ_TO_RAID_ACTION_RESULT(req);
769		if (rv != 0
770		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
771		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
772			mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
773				    "%d:%x:%x\n", rv, req->IOCStatus,
774				    ar->action_status);
775			xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
776			return (CAM_REQ_CMP_ERR);
777		}
778#endif
779		return (CAM_REQ_INPROG);
780	}
781	return (CAM_REQUEUE_REQ);
782}
783#endif
784
785/* XXX Ignores that there may be multiple busses/IOCs involved. */
786cam_status
787mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
788{
789	struct mpt_raid_disk *mpt_disk;
790
791	mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
792	if (ccb->ccb_h.target_id < mpt->raid_max_disks
793	 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
794		*tgt = mpt_disk->config_page.PhysDiskID;
795		return (0);
796	}
797	mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
798		 ccb->ccb_h.target_id);
799	return (-1);
800}
801
802/* XXX Ignores that there may be multiple busses/IOCs involved. */
803int
804mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
805{
806	struct mpt_raid_disk *mpt_disk;
807	int i;
808
809	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
810		return (0);
811	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
812		mpt_disk = &mpt->raid_disks[i];
813		if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
814		    mpt_disk->config_page.PhysDiskID == tgt)
815			return (1);
816	}
817	return (0);
818
819}
820
821/* XXX Ignores that there may be multiple busses/IOCs involved. */
822int
823mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
824{
825	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
826	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
827
828	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
829		return (0);
830	}
831	ioc_vol = mpt->ioc_page2->RaidVolume;
832	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
833	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
834		if (ioc_vol->VolumeID == tgt) {
835			return (1);
836		}
837	}
838	return (0);
839}
840
841#if 0
842static void
843mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
844	       int enable)
845{
846	request_t *req;
847	struct mpt_raid_action_result *ar;
848	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
849	int enabled;
850	int rv;
851
852	vol_pg = mpt_vol->config_page;
853	enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
854
855	/*
856	 * If the setting matches the configuration,
857	 * there is nothing to do.
858	 */
859	if ((enabled && enable)
860	 || (!enabled && !enable))
861		return;
862
863	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
864	if (req == NULL) {
865		mpt_vol_prt(mpt, mpt_vol,
866			    "mpt_enable_vol: Get request failed!\n");
867		return;
868	}
869
870	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
871				enable ? MPI_RAID_ACTION_ENABLE_VOLUME
872				       : MPI_RAID_ACTION_DISABLE_VOLUME,
873				/*data*/0, /*addr*/0, /*len*/0,
874				/*write*/FALSE, /*wait*/TRUE);
875	if (rv == ETIMEDOUT) {
876		mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
877			    "%s Volume Timed-out\n",
878			    enable ? "Enable" : "Disable");
879		return;
880	}
881	ar = REQ_TO_RAID_ACTION_RESULT(req);
882	if (rv != 0
883	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
884	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
885		mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
886			    enable ? "Enable" : "Disable",
887			    rv, req->IOCStatus, ar->action_status);
888	}
889
890	mpt_free_request(mpt, req);
891}
892#endif
893
894static void
895mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
896{
897	request_t *req;
898	struct mpt_raid_action_result *ar;
899	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
900	uint32_t data;
901	int rv;
902	int resyncing;
903	int mwce;
904
905	vol_pg = mpt_vol->config_page;
906	resyncing = vol_pg->VolumeStatus.Flags
907		  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
908	mwce = vol_pg->VolumeSettings.Settings
909	     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
910
911	/*
912	 * If the setting matches the configuration,
913	 * there is nothing to do.
914	 */
915	switch (mpt->raid_mwce_setting) {
916	case MPT_RAID_MWCE_REBUILD_ONLY:
917		if ((resyncing && mwce) || (!resyncing && !mwce)) {
918			return;
919		}
920		mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
921		if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
922			/*
923			 * Wait one more status update to see if
924			 * resyncing gets enabled.  It gets disabled
925			 * temporarilly when WCE is changed.
926			 */
927			return;
928		}
929		break;
930	case MPT_RAID_MWCE_ON:
931		if (mwce)
932			return;
933		break;
934	case MPT_RAID_MWCE_OFF:
935		if (!mwce)
936			return;
937		break;
938	case MPT_RAID_MWCE_NC:
939		return;
940	}
941
942	req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
943	if (req == NULL) {
944		mpt_vol_prt(mpt, mpt_vol,
945			    "mpt_verify_mwce: Get request failed!\n");
946		return;
947	}
948
949	vol_pg->VolumeSettings.Settings ^=
950	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
951	memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
952	vol_pg->VolumeSettings.Settings ^=
953	    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
954	rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
955				MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
956				data, /*addr*/0, /*len*/0,
957				/*write*/FALSE, /*wait*/TRUE);
958	if (rv == ETIMEDOUT) {
959		mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
960			    "Write Cache Enable Timed-out\n");
961		return;
962	}
963	ar = REQ_TO_RAID_ACTION_RESULT(req);
964	if (rv != 0
965	 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
966	 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
967		mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
968			    "%d:%x:%x\n", rv, req->IOCStatus,
969			    ar->action_status);
970	} else {
971		vol_pg->VolumeSettings.Settings ^=
972		    MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
973	}
974	mpt_free_request(mpt, req);
975}
976
977static void
978mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
979{
980	request_t *req;
981	struct mpt_raid_action_result *ar;
982	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
983	u_int prio;
984	int rv;
985
986	vol_pg = mpt_vol->config_page;
987
988	if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
989		return;
990
991	/*
992	 * If the current RAID resync rate does not
993	 * match our configured rate, update it.
994	 */
995	prio = vol_pg->VolumeSettings.Settings
996	     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
997	if (vol_pg->ResyncRate != 0
998	 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
999
1000		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1001		if (req == NULL) {
1002			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1003				    "Get request failed!\n");
1004			return;
1005		}
1006
1007		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1008					MPI_RAID_ACTION_SET_RESYNC_RATE,
1009					mpt->raid_resync_rate, /*addr*/0,
1010					/*len*/0, /*write*/FALSE, /*wait*/TRUE);
1011		if (rv == ETIMEDOUT) {
1012			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1013				    "Resync Rate Setting Timed-out\n");
1014			return;
1015		}
1016
1017		ar = REQ_TO_RAID_ACTION_RESULT(req);
1018		if (rv != 0
1019		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1020		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1021			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1022				    "%d:%x:%x\n", rv, req->IOCStatus,
1023				    ar->action_status);
1024		} else
1025			vol_pg->ResyncRate = mpt->raid_resync_rate;
1026		mpt_free_request(mpt, req);
1027	} else if ((prio && mpt->raid_resync_rate < 128)
1028		|| (!prio && mpt->raid_resync_rate >= 128)) {
1029		uint32_t data;
1030
1031		req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1032		if (req == NULL) {
1033			mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1034				    "Get request failed!\n");
1035			return;
1036		}
1037
1038		vol_pg->VolumeSettings.Settings ^=
1039		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1040		memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1041		vol_pg->VolumeSettings.Settings ^=
1042		    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1043		rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1044					MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1045					data, /*addr*/0, /*len*/0,
1046					/*write*/FALSE, /*wait*/TRUE);
1047		if (rv == ETIMEDOUT) {
1048			mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1049				    "Resync Rate Setting Timed-out\n");
1050			return;
1051		}
1052		ar = REQ_TO_RAID_ACTION_RESULT(req);
1053		if (rv != 0
1054		 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1055		 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1056			mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1057				    "%d:%x:%x\n", rv, req->IOCStatus,
1058				    ar->action_status);
1059		} else {
1060			vol_pg->VolumeSettings.Settings ^=
1061			    MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1062		}
1063
1064		mpt_free_request(mpt, req);
1065	}
1066}
1067
1068static void
1069mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1070		       struct cam_path *path)
1071{
1072	struct ccb_relsim crs;
1073
1074	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1075	crs.ccb_h.func_code = XPT_REL_SIMQ;
1076	crs.ccb_h.flags = CAM_DEV_QFREEZE;
1077	crs.release_flags = RELSIM_ADJUST_OPENINGS;
1078	crs.openings = mpt->raid_queue_depth;
1079	xpt_action((union ccb *)&crs);
1080	if (crs.ccb_h.status != CAM_REQ_CMP)
1081		mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1082			    "with CAM status %#x\n", crs.ccb_h.status);
1083}
1084
1085static void
1086mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1087{
1088	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1089	u_int i;
1090
1091	vol_pg = mpt_vol->config_page;
1092	mpt_vol_prt(mpt, mpt_vol, "Settings (");
1093	for (i = 1; i <= 0x8000; i <<= 1) {
1094		switch (vol_pg->VolumeSettings.Settings & i) {
1095		case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1096			mpt_prtc(mpt, " Member-WCE");
1097			break;
1098		case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1099			mpt_prtc(mpt, " Offline-On-SMART-Err");
1100			break;
1101		case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1102			mpt_prtc(mpt, " Hot-Plug-Spares");
1103			break;
1104		case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1105			mpt_prtc(mpt, " High-Priority-ReSync");
1106			break;
1107		default:
1108			break;
1109		}
1110	}
1111	mpt_prtc(mpt, " )\n");
1112	if (vol_pg->VolumeSettings.HotSparePool != 0) {
1113		mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1114			    powerof2(vol_pg->VolumeSettings.HotSparePool)
1115			  ? ":" : "s:");
1116		for (i = 0; i < 8; i++) {
1117			u_int mask;
1118
1119			mask = 0x1 << i;
1120			if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1121				continue;
1122			mpt_prtc(mpt, " %d", i);
1123		}
1124		mpt_prtc(mpt, "\n");
1125	}
1126	mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1127	for (i = 0; i < vol_pg->NumPhysDisks; i++){
1128		struct mpt_raid_disk *mpt_disk;
1129		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1130		int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1131		U8 f, s;
1132
1133		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1134		disk_pg = &mpt_disk->config_page;
1135		mpt_prtc(mpt, "      ");
1136		mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1137			 pt_bus, disk_pg->PhysDiskID);
1138		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1139			mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1140			    "Primary" : "Secondary");
1141		} else {
1142			mpt_prtc(mpt, "Stripe Position %d",
1143				 mpt_disk->member_number);
1144		}
1145		f = disk_pg->PhysDiskStatus.Flags;
1146		s = disk_pg->PhysDiskStatus.State;
1147		if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1148			mpt_prtc(mpt, " Out of Sync");
1149		}
1150		if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1151			mpt_prtc(mpt, " Quiesced");
1152		}
1153		if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1154			mpt_prtc(mpt, " Inactive");
1155		}
1156		if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1157			mpt_prtc(mpt, " Was Optimal");
1158		}
1159		if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1160			mpt_prtc(mpt, " Was Non-Optimal");
1161		}
1162		switch (s) {
1163		case MPI_PHYSDISK0_STATUS_ONLINE:
1164			mpt_prtc(mpt, " Online");
1165			break;
1166		case MPI_PHYSDISK0_STATUS_MISSING:
1167			mpt_prtc(mpt, " Missing");
1168			break;
1169		case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1170			mpt_prtc(mpt, " Incompatible");
1171			break;
1172		case MPI_PHYSDISK0_STATUS_FAILED:
1173			mpt_prtc(mpt, " Failed");
1174			break;
1175		case MPI_PHYSDISK0_STATUS_INITIALIZING:
1176			mpt_prtc(mpt, " Initializing");
1177			break;
1178		case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1179			mpt_prtc(mpt, " Requested Offline");
1180			break;
1181		case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1182			mpt_prtc(mpt, " Requested Failed");
1183			break;
1184		case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1185		default:
1186			mpt_prtc(mpt, " Offline Other (%x)", s);
1187			break;
1188		}
1189		mpt_prtc(mpt, "\n");
1190	}
1191}
1192
1193static void
1194mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1195{
1196	CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1197	int rd_bus = cam_sim_bus(mpt->sim);
1198	int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1199	u_int i;
1200
1201	disk_pg = &mpt_disk->config_page;
1202	mpt_disk_prt(mpt, mpt_disk,
1203		     "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1204		     device_get_nameunit(mpt->dev), rd_bus,
1205		     disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1206		     pt_bus, mpt_disk - mpt->raid_disks);
1207	if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1208		return;
1209	mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1210		     powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1211		   ? ":" : "s:");
1212	for (i = 0; i < 8; i++) {
1213		u_int mask;
1214
1215		mask = 0x1 << i;
1216		if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1217			continue;
1218		mpt_prtc(mpt, " %d", i);
1219	}
1220	mpt_prtc(mpt, "\n");
1221}
1222
1223static void
1224mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1225		      IOC_3_PHYS_DISK *ioc_disk)
1226{
1227	int rv;
1228
1229	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1230				 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1231				 &mpt_disk->config_page.Header,
1232				 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1233	if (rv != 0) {
1234		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1235			"Failed to read RAID Disk Hdr(%d)\n",
1236		 	ioc_disk->PhysDiskNum);
1237		return;
1238	}
1239	rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1240				   &mpt_disk->config_page.Header,
1241				   sizeof(mpt_disk->config_page),
1242				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1243	if (rv != 0)
1244		mpt_prt(mpt, "mpt_refresh_raid_disk: "
1245			"Failed to read RAID Disk Page(%d)\n",
1246		 	ioc_disk->PhysDiskNum);
1247	mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1248}
1249
1250static void
1251mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1252    CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1253{
1254	CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1255	struct mpt_raid_action_result *ar;
1256	request_t *req;
1257	int rv;
1258	int i;
1259
1260	vol_pg = mpt_vol->config_page;
1261	mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1262
1263	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1264	    ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1265	if (rv != 0) {
1266		mpt_vol_prt(mpt, mpt_vol,
1267		    "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1268		    ioc_vol->VolumePageNumber);
1269		return;
1270	}
1271
1272	rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1273	    &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1274	if (rv != 0) {
1275		mpt_vol_prt(mpt, mpt_vol,
1276		    "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1277		    ioc_vol->VolumePageNumber);
1278		return;
1279	}
1280	mpt2host_config_page_raid_vol_0(vol_pg);
1281
1282	mpt_vol->flags |= MPT_RVF_ACTIVE;
1283
1284	/* Update disk entry array data. */
1285	for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1286		struct mpt_raid_disk *mpt_disk;
1287		mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1288		mpt_disk->volume = mpt_vol;
1289		mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1290		if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1291			mpt_disk->member_number--;
1292		}
1293	}
1294
1295	if ((vol_pg->VolumeStatus.Flags
1296	   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1297		return;
1298
1299	req = mpt_get_request(mpt, TRUE);
1300	if (req == NULL) {
1301		mpt_vol_prt(mpt, mpt_vol,
1302		    "mpt_refresh_raid_vol: Get request failed!\n");
1303		return;
1304	}
1305	rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1306	    MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1307	if (rv == ETIMEDOUT) {
1308		mpt_vol_prt(mpt, mpt_vol,
1309		    "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1310		mpt_free_request(mpt, req);
1311		return;
1312	}
1313
1314	ar = REQ_TO_RAID_ACTION_RESULT(req);
1315	if (rv == 0
1316	 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1317	 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1318		memcpy(&mpt_vol->sync_progress,
1319		       &ar->action_data.indicator_struct,
1320		       sizeof(mpt_vol->sync_progress));
1321		mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1322	} else {
1323		mpt_vol_prt(mpt, mpt_vol,
1324		    "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1325	}
1326	mpt_free_request(mpt, req);
1327}
1328
1329/*
1330 * Update in-core information about RAID support.  We update any entries
1331 * that didn't previously exists or have been marked as needing to
1332 * be updated by our event handler.  Interesting changes are displayed
1333 * to the console.
1334 */
1335static int
1336mpt_refresh_raid_data(struct mpt_softc *mpt)
1337{
1338	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1339	CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1340	IOC_3_PHYS_DISK *ioc_disk;
1341	IOC_3_PHYS_DISK *ioc_last_disk;
1342	CONFIG_PAGE_RAID_VOL_0	*vol_pg;
1343	size_t len;
1344	int rv;
1345	int i;
1346	u_int nonopt_volumes;
1347
1348	if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1349		return (0);
1350	}
1351
1352	/*
1353	 * Mark all items as unreferenced by the configuration.
1354	 * This allows us to find, report, and discard stale
1355	 * entries.
1356	 */
1357	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1358		mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1359	}
1360	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1361		mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1362	}
1363
1364	/*
1365	 * Get Physical Disk information.
1366	 */
1367	len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1368	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1369				   &mpt->ioc_page3->Header, len,
1370				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1371	if (rv) {
1372		mpt_prt(mpt,
1373		    "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1374		return (-1);
1375	}
1376	mpt2host_config_page_ioc3(mpt->ioc_page3);
1377
1378	ioc_disk = mpt->ioc_page3->PhysDisk;
1379	ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1380	for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1381		struct mpt_raid_disk *mpt_disk;
1382
1383		mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1384		mpt_disk->flags |= MPT_RDF_REFERENCED;
1385		if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1386		 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1387
1388			mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1389
1390		}
1391		mpt_disk->flags |= MPT_RDF_ACTIVE;
1392		mpt->raid_rescan++;
1393	}
1394
1395	/*
1396	 * Refresh volume data.
1397	 */
1398	len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1399	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1400				   &mpt->ioc_page2->Header, len,
1401				   /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1402	if (rv) {
1403		mpt_prt(mpt, "mpt_refresh_raid_data: "
1404			"Failed to read IOC Page 2\n");
1405		return (-1);
1406	}
1407	mpt2host_config_page_ioc2(mpt->ioc_page2);
1408
1409	ioc_vol = mpt->ioc_page2->RaidVolume;
1410	ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1411	for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1412		struct mpt_raid_volume *mpt_vol;
1413
1414		mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1415		mpt_vol->flags |= MPT_RVF_REFERENCED;
1416		vol_pg = mpt_vol->config_page;
1417		if (vol_pg == NULL)
1418			continue;
1419		if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1420		  != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1421		 || (vol_pg->VolumeStatus.Flags
1422		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1423
1424			mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1425		}
1426		mpt_vol->flags |= MPT_RVF_ACTIVE;
1427	}
1428
1429	nonopt_volumes = 0;
1430	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1431		struct mpt_raid_volume *mpt_vol;
1432		uint64_t total;
1433		uint64_t left;
1434		int m;
1435		u_int prio;
1436
1437		mpt_vol = &mpt->raid_volumes[i];
1438
1439		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1440			continue;
1441		}
1442
1443		vol_pg = mpt_vol->config_page;
1444		if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1445		 == MPT_RVF_ANNOUNCED) {
1446			mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1447			mpt_vol->flags = 0;
1448			continue;
1449		}
1450
1451		if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1452			mpt_announce_vol(mpt, mpt_vol);
1453			mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1454		}
1455
1456		if (vol_pg->VolumeStatus.State !=
1457		    MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1458			nonopt_volumes++;
1459
1460		if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1461			continue;
1462
1463		mpt_vol->flags |= MPT_RVF_UP2DATE;
1464		mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1465		    mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1466		mpt_verify_mwce(mpt, mpt_vol);
1467
1468		if (vol_pg->VolumeStatus.Flags == 0) {
1469			continue;
1470		}
1471
1472		mpt_vol_prt(mpt, mpt_vol, "Status (");
1473		for (m = 1; m <= 0x80; m <<= 1) {
1474			switch (vol_pg->VolumeStatus.Flags & m) {
1475			case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1476				mpt_prtc(mpt, " Enabled");
1477				break;
1478			case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1479				mpt_prtc(mpt, " Quiesced");
1480				break;
1481			case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1482				mpt_prtc(mpt, " Re-Syncing");
1483				break;
1484			case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1485				mpt_prtc(mpt, " Inactive");
1486				break;
1487			default:
1488				break;
1489			}
1490		}
1491		mpt_prtc(mpt, " )\n");
1492
1493		if ((vol_pg->VolumeStatus.Flags
1494		   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1495			continue;
1496
1497		mpt_verify_resync_rate(mpt, mpt_vol);
1498
1499		left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1500		total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1501		if (vol_pg->ResyncRate != 0) {
1502
1503			prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1504			mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1505			    prio / 1000, prio % 1000);
1506		} else {
1507			prio = vol_pg->VolumeSettings.Settings
1508			     & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1509			mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1510			    prio ? "High" : "Low");
1511		}
1512		mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1513			    "blocks remaining\n", (uintmax_t)left,
1514			    (uintmax_t)total);
1515
1516		/* Periodically report on sync progress. */
1517		mpt_schedule_raid_refresh(mpt);
1518	}
1519
1520	for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1521		struct mpt_raid_disk *mpt_disk;
1522		CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1523		int m;
1524
1525		mpt_disk = &mpt->raid_disks[i];
1526		disk_pg = &mpt_disk->config_page;
1527
1528		if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1529			continue;
1530
1531		if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1532		 == MPT_RDF_ANNOUNCED) {
1533			mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1534			mpt_disk->flags = 0;
1535			mpt->raid_rescan++;
1536			continue;
1537		}
1538
1539		if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1540
1541			mpt_announce_disk(mpt, mpt_disk);
1542			mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1543		}
1544
1545		if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1546			continue;
1547
1548		mpt_disk->flags |= MPT_RDF_UP2DATE;
1549		mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1550		if (disk_pg->PhysDiskStatus.Flags == 0)
1551			continue;
1552
1553		mpt_disk_prt(mpt, mpt_disk, "Status (");
1554		for (m = 1; m <= 0x80; m <<= 1) {
1555			switch (disk_pg->PhysDiskStatus.Flags & m) {
1556			case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1557				mpt_prtc(mpt, " Out-Of-Sync");
1558				break;
1559			case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1560				mpt_prtc(mpt, " Quiesced");
1561				break;
1562			default:
1563				break;
1564			}
1565		}
1566		mpt_prtc(mpt, " )\n");
1567	}
1568
1569	mpt->raid_nonopt_volumes = nonopt_volumes;
1570	return (0);
1571}
1572
1573static void
1574mpt_raid_timer(void *arg)
1575{
1576	struct mpt_softc *mpt;
1577
1578	mpt = (struct mpt_softc *)arg;
1579	MPT_LOCK_ASSERT(mpt);
1580	mpt_raid_wakeup(mpt);
1581}
1582
1583static void
1584mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1585{
1586
1587	callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1588		      mpt_raid_timer, mpt);
1589}
1590
1591void
1592mpt_raid_free_mem(struct mpt_softc *mpt)
1593{
1594
1595	if (mpt->raid_volumes) {
1596		struct mpt_raid_volume *mpt_raid;
1597		int i;
1598		for (i = 0; i < mpt->raid_max_volumes; i++) {
1599			mpt_raid = &mpt->raid_volumes[i];
1600			if (mpt_raid->config_page) {
1601				free(mpt_raid->config_page, M_DEVBUF);
1602				mpt_raid->config_page = NULL;
1603			}
1604		}
1605		free(mpt->raid_volumes, M_DEVBUF);
1606		mpt->raid_volumes = NULL;
1607	}
1608	if (mpt->raid_disks) {
1609		free(mpt->raid_disks, M_DEVBUF);
1610		mpt->raid_disks = NULL;
1611	}
1612	if (mpt->ioc_page2) {
1613		free(mpt->ioc_page2, M_DEVBUF);
1614		mpt->ioc_page2 = NULL;
1615	}
1616	if (mpt->ioc_page3) {
1617		free(mpt->ioc_page3, M_DEVBUF);
1618		mpt->ioc_page3 = NULL;
1619	}
1620	mpt->raid_max_volumes =  0;
1621	mpt->raid_max_disks =  0;
1622}
1623
1624static int
1625mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1626{
1627	struct mpt_raid_volume *mpt_vol;
1628
1629	if ((rate > MPT_RAID_RESYNC_RATE_MAX
1630	  || rate < MPT_RAID_RESYNC_RATE_MIN)
1631	 && rate != MPT_RAID_RESYNC_RATE_NC)
1632		return (EINVAL);
1633
1634	MPT_LOCK(mpt);
1635	mpt->raid_resync_rate = rate;
1636	RAID_VOL_FOREACH(mpt, mpt_vol) {
1637		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1638			continue;
1639		}
1640		mpt_verify_resync_rate(mpt, mpt_vol);
1641	}
1642	MPT_UNLOCK(mpt);
1643	return (0);
1644}
1645
1646static int
1647mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1648{
1649	struct mpt_raid_volume *mpt_vol;
1650
1651	if (vol_queue_depth > 255 || vol_queue_depth < 1)
1652		return (EINVAL);
1653
1654	MPT_LOCK(mpt);
1655	mpt->raid_queue_depth = vol_queue_depth;
1656	RAID_VOL_FOREACH(mpt, mpt_vol) {
1657		struct cam_path *path;
1658		int error;
1659
1660		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1661			continue;
1662
1663		mpt->raid_rescan = 0;
1664
1665		error = xpt_create_path(&path, NULL,
1666					cam_sim_path(mpt->sim),
1667					mpt_vol->config_page->VolumeID,
1668					/*lun*/0);
1669		if (error != CAM_REQ_CMP) {
1670			mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1671			continue;
1672		}
1673		mpt_adjust_queue_depth(mpt, mpt_vol, path);
1674		xpt_free_path(path);
1675	}
1676	MPT_UNLOCK(mpt);
1677	return (0);
1678}
1679
1680static int
1681mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1682{
1683	struct mpt_raid_volume *mpt_vol;
1684	int force_full_resync;
1685
1686	MPT_LOCK(mpt);
1687	if (mwce == mpt->raid_mwce_setting) {
1688		MPT_UNLOCK(mpt);
1689		return (0);
1690	}
1691
1692	/*
1693	 * Catch MWCE being left on due to a failed shutdown.  Since
1694	 * sysctls cannot be set by the loader, we treat the first
1695	 * setting of this varible specially and force a full volume
1696	 * resync if MWCE is enabled and a resync is in progress.
1697	 */
1698	force_full_resync = 0;
1699	if (mpt->raid_mwce_set == 0
1700	 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1701	 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1702		force_full_resync = 1;
1703
1704	mpt->raid_mwce_setting = mwce;
1705	RAID_VOL_FOREACH(mpt, mpt_vol) {
1706		CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1707		int resyncing;
1708		int mwce;
1709
1710		if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1711			continue;
1712
1713		vol_pg = mpt_vol->config_page;
1714		resyncing = vol_pg->VolumeStatus.Flags
1715			  & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1716		mwce = vol_pg->VolumeSettings.Settings
1717		     & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1718		if (force_full_resync && resyncing && mwce) {
1719
1720			/*
1721			 * XXX disable/enable volume should force a resync,
1722			 *     but we'll need to queice, drain, and restart
1723			 *     I/O to do that.
1724			 */
1725			mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1726				    "detected.  Suggest full resync.\n");
1727		}
1728		mpt_verify_mwce(mpt, mpt_vol);
1729	}
1730	mpt->raid_mwce_set = 1;
1731	MPT_UNLOCK(mpt);
1732	return (0);
1733}
1734
1735static const char *mpt_vol_mwce_strs[] =
1736{
1737	"On",
1738	"Off",
1739	"On-During-Rebuild",
1740	"NC"
1741};
1742
1743static int
1744mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1745{
1746	char inbuf[20];
1747	struct mpt_softc *mpt;
1748	const char *str;
1749	int error;
1750	u_int size;
1751	u_int i;
1752
1753	GIANT_REQUIRED;
1754
1755	mpt = (struct mpt_softc *)arg1;
1756	str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1757	error = SYSCTL_OUT(req, str, strlen(str) + 1);
1758	if (error || !req->newptr) {
1759		return (error);
1760	}
1761
1762	size = req->newlen - req->newidx;
1763	if (size >= sizeof(inbuf)) {
1764		return (EINVAL);
1765	}
1766
1767	error = SYSCTL_IN(req, inbuf, size);
1768	if (error) {
1769		return (error);
1770	}
1771	inbuf[size] = '\0';
1772	for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1773		if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1774			return (mpt_raid_set_vol_mwce(mpt, i));
1775		}
1776	}
1777	return (EINVAL);
1778}
1779
1780static int
1781mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1782{
1783	struct mpt_softc *mpt;
1784	u_int raid_resync_rate;
1785	int error;
1786
1787	GIANT_REQUIRED;
1788
1789	mpt = (struct mpt_softc *)arg1;
1790	raid_resync_rate = mpt->raid_resync_rate;
1791
1792	error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1793	if (error || !req->newptr) {
1794		return error;
1795	}
1796
1797	return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1798}
1799
1800static int
1801mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1802{
1803	struct mpt_softc *mpt;
1804	u_int raid_queue_depth;
1805	int error;
1806
1807	GIANT_REQUIRED;
1808
1809	mpt = (struct mpt_softc *)arg1;
1810	raid_queue_depth = mpt->raid_queue_depth;
1811
1812	error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1813	if (error || !req->newptr) {
1814		return error;
1815	}
1816
1817	return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1818}
1819
1820static void
1821mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1822{
1823	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1824	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1825
1826	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1827			"vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1828			mpt_raid_sysctl_vol_member_wce, "A",
1829			"volume member WCE(On,Off,On-During-Rebuild,NC)");
1830
1831	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1832			"vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1833			mpt_raid_sysctl_vol_queue_depth, "I",
1834			"default volume queue depth");
1835
1836	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1837			"vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1838			mpt_raid_sysctl_vol_resync_rate, "I",
1839			"volume resync priority (0 == NC, 1 - 255)");
1840	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1841			"nonoptimal_volumes", CTLFLAG_RD,
1842			&mpt->raid_nonopt_volumes, 0,
1843			"number of nonoptimal volumes");
1844}
1845