fm.c revision 270998
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25/*
26 * Fault Management Architecture (FMA) Resource and Protocol Support
27 *
28 * The routines contained herein provide services to support kernel subsystems
29 * in publishing fault management telemetry (see PSARC 2002/412 and 2003/089).
30 *
31 * Name-Value Pair Lists
32 *
33 * The embodiment of an FMA protocol element (event, fmri or authority) is a
34 * name-value pair list (nvlist_t).  FMA-specific nvlist construtor and
35 * destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used
36 * to create an nvpair list using custom allocators.  Callers may choose to
37 * allocate either from the kernel memory allocator, or from a preallocated
38 * buffer, useful in constrained contexts like high-level interrupt routines.
39 *
40 * Protocol Event and FMRI Construction
41 *
42 * Convenience routines are provided to construct nvlist events according to
43 * the FMA Event Protocol and Naming Schema specification for ereports and
44 * FMRIs for the dev, cpu, hc, mem, legacy hc and de schemes.
45 *
46 * ENA Manipulation
47 *
48 * Routines to generate ENA formats 0, 1 and 2 are available as well as
49 * routines to increment formats 1 and 2.  Individual fields within the
50 * ENA are extractable via fm_ena_time_get(), fm_ena_id_get(),
51 * fm_ena_format_get() and fm_ena_gen_get().
52 */
53
54#include <sys/types.h>
55#include <sys/time.h>
56#include <sys/sysevent.h>
57#include <sys/nvpair.h>
58#include <sys/cmn_err.h>
59#include <sys/cpuvar.h>
60#include <sys/sysmacros.h>
61#include <sys/systm.h>
62#include <sys/compress.h>
63#include <sys/cpuvar.h>
64#include <sys/kobj.h>
65#include <sys/kstat.h>
66#include <sys/processor.h>
67#include <sys/pcpu.h>
68#include <sys/sunddi.h>
69#include <sys/systeminfo.h>
70#include <sys/sysevent/eventdefs.h>
71#include <sys/fm/util.h>
72#include <sys/fm/protocol.h>
73
74/*
75 * URL and SUNW-MSG-ID value to display for fm_panic(), defined below.  These
76 * values must be kept in sync with the FMA source code in usr/src/cmd/fm.
77 */
78static const char *fm_url = "http://www.sun.com/msg";
79static const char *fm_msgid = "SUNOS-8000-0G";
80static char *volatile fm_panicstr = NULL;
81
82#ifdef sun
83errorq_t *ereport_errorq;
84#endif
85void *ereport_dumpbuf;
86size_t ereport_dumplen;
87
88static uint_t ereport_chanlen = ERPT_EVCH_MAX;
89static evchan_t *ereport_chan = NULL;
90static ulong_t ereport_qlen = 0;
91static size_t ereport_size = 0;
92static int ereport_cols = 80;
93
94extern void fastreboot_disable_highpil(void);
95
96/*
97 * Common fault management kstats to record ereport generation
98 * failures
99 */
100
101struct erpt_kstat {
102	kstat_named_t	erpt_dropped;		/* num erpts dropped on post */
103	kstat_named_t	erpt_set_failed;	/* num erpt set failures */
104	kstat_named_t	fmri_set_failed;	/* num fmri set failures */
105	kstat_named_t	payload_set_failed;	/* num payload set failures */
106};
107
108static struct erpt_kstat erpt_kstat_data = {
109	{ "erpt-dropped", KSTAT_DATA_UINT64 },
110	{ "erpt-set-failed", KSTAT_DATA_UINT64 },
111	{ "fmri-set-failed", KSTAT_DATA_UINT64 },
112	{ "payload-set-failed", KSTAT_DATA_UINT64 }
113};
114
115#ifdef sun
116/*ARGSUSED*/
117static void
118fm_drain(void *private, void *data, errorq_elem_t *eep)
119{
120	nvlist_t *nvl = errorq_elem_nvl(ereport_errorq, eep);
121
122	if (!panicstr)
123		(void) fm_ereport_post(nvl, EVCH_TRYHARD);
124	else
125		fm_nvprint(nvl);
126}
127#endif
128
129void
130fm_init(void)
131{
132	kstat_t *ksp;
133
134#ifdef sun
135	(void) sysevent_evc_bind(FM_ERROR_CHAN,
136	    &ereport_chan, EVCH_CREAT | EVCH_HOLD_PEND);
137
138	(void) sysevent_evc_control(ereport_chan,
139	    EVCH_SET_CHAN_LEN, &ereport_chanlen);
140#endif
141
142	if (ereport_qlen == 0)
143		ereport_qlen = ERPT_MAX_ERRS * MAX(max_ncpus, 4);
144
145	if (ereport_size == 0)
146		ereport_size = ERPT_DATA_SZ;
147
148#ifdef sun
149	ereport_errorq = errorq_nvcreate("fm_ereport_queue",
150	    (errorq_func_t)fm_drain, NULL, ereport_qlen, ereport_size,
151	    FM_ERR_PIL, ERRORQ_VITAL);
152	if (ereport_errorq == NULL)
153		panic("failed to create required ereport error queue");
154#endif
155
156	ereport_dumpbuf = kmem_alloc(ereport_size, KM_SLEEP);
157	ereport_dumplen = ereport_size;
158
159	/* Initialize ereport allocation and generation kstats */
160	ksp = kstat_create("unix", 0, "fm", "misc", KSTAT_TYPE_NAMED,
161	    sizeof (struct erpt_kstat) / sizeof (kstat_named_t),
162	    KSTAT_FLAG_VIRTUAL);
163
164	if (ksp != NULL) {
165		ksp->ks_data = &erpt_kstat_data;
166		kstat_install(ksp);
167	} else {
168		cmn_err(CE_NOTE, "failed to create fm/misc kstat\n");
169
170	}
171}
172
173#ifdef sun
174/*
175 * Formatting utility function for fm_nvprintr.  We attempt to wrap chunks of
176 * output so they aren't split across console lines, and return the end column.
177 */
178/*PRINTFLIKE4*/
179static int
180fm_printf(int depth, int c, int cols, const char *format, ...)
181{
182	va_list ap;
183	int width;
184	char c1;
185
186	va_start(ap, format);
187	width = vsnprintf(&c1, sizeof (c1), format, ap);
188	va_end(ap);
189
190	if (c + width >= cols) {
191		console_printf("\n\r");
192		c = 0;
193		if (format[0] != ' ' && depth > 0) {
194			console_printf(" ");
195			c++;
196		}
197	}
198
199	va_start(ap, format);
200	console_vprintf(format, ap);
201	va_end(ap);
202
203	return ((c + width) % cols);
204}
205
206/*
207 * Recursively print a nvlist in the specified column width and return the
208 * column we end up in.  This function is called recursively by fm_nvprint(),
209 * below.  We generically format the entire nvpair using hexadecimal
210 * integers and strings, and elide any integer arrays.  Arrays are basically
211 * used for cache dumps right now, so we suppress them so as not to overwhelm
212 * the amount of console output we produce at panic time.  This can be further
213 * enhanced as FMA technology grows based upon the needs of consumers.  All
214 * FMA telemetry is logged using the dump device transport, so the console
215 * output serves only as a fallback in case this procedure is unsuccessful.
216 */
217static int
218fm_nvprintr(nvlist_t *nvl, int d, int c, int cols)
219{
220	nvpair_t *nvp;
221
222	for (nvp = nvlist_next_nvpair(nvl, NULL);
223	    nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
224
225		data_type_t type = nvpair_type(nvp);
226		const char *name = nvpair_name(nvp);
227
228		boolean_t b;
229		uint8_t i8;
230		uint16_t i16;
231		uint32_t i32;
232		uint64_t i64;
233		char *str;
234		nvlist_t *cnv;
235
236		if (strcmp(name, FM_CLASS) == 0)
237			continue; /* already printed by caller */
238
239		c = fm_printf(d, c, cols, " %s=", name);
240
241		switch (type) {
242		case DATA_TYPE_BOOLEAN:
243			c = fm_printf(d + 1, c, cols, " 1");
244			break;
245
246		case DATA_TYPE_BOOLEAN_VALUE:
247			(void) nvpair_value_boolean_value(nvp, &b);
248			c = fm_printf(d + 1, c, cols, b ? "1" : "0");
249			break;
250
251		case DATA_TYPE_BYTE:
252			(void) nvpair_value_byte(nvp, &i8);
253			c = fm_printf(d + 1, c, cols, "%x", i8);
254			break;
255
256		case DATA_TYPE_INT8:
257			(void) nvpair_value_int8(nvp, (void *)&i8);
258			c = fm_printf(d + 1, c, cols, "%x", i8);
259			break;
260
261		case DATA_TYPE_UINT8:
262			(void) nvpair_value_uint8(nvp, &i8);
263			c = fm_printf(d + 1, c, cols, "%x", i8);
264			break;
265
266		case DATA_TYPE_INT16:
267			(void) nvpair_value_int16(nvp, (void *)&i16);
268			c = fm_printf(d + 1, c, cols, "%x", i16);
269			break;
270
271		case DATA_TYPE_UINT16:
272			(void) nvpair_value_uint16(nvp, &i16);
273			c = fm_printf(d + 1, c, cols, "%x", i16);
274			break;
275
276		case DATA_TYPE_INT32:
277			(void) nvpair_value_int32(nvp, (void *)&i32);
278			c = fm_printf(d + 1, c, cols, "%x", i32);
279			break;
280
281		case DATA_TYPE_UINT32:
282			(void) nvpair_value_uint32(nvp, &i32);
283			c = fm_printf(d + 1, c, cols, "%x", i32);
284			break;
285
286		case DATA_TYPE_INT64:
287			(void) nvpair_value_int64(nvp, (void *)&i64);
288			c = fm_printf(d + 1, c, cols, "%llx",
289			    (u_longlong_t)i64);
290			break;
291
292		case DATA_TYPE_UINT64:
293			(void) nvpair_value_uint64(nvp, &i64);
294			c = fm_printf(d + 1, c, cols, "%llx",
295			    (u_longlong_t)i64);
296			break;
297
298		case DATA_TYPE_HRTIME:
299			(void) nvpair_value_hrtime(nvp, (void *)&i64);
300			c = fm_printf(d + 1, c, cols, "%llx",
301			    (u_longlong_t)i64);
302			break;
303
304		case DATA_TYPE_STRING:
305			(void) nvpair_value_string(nvp, &str);
306			c = fm_printf(d + 1, c, cols, "\"%s\"",
307			    str ? str : "<NULL>");
308			break;
309
310		case DATA_TYPE_NVLIST:
311			c = fm_printf(d + 1, c, cols, "[");
312			(void) nvpair_value_nvlist(nvp, &cnv);
313			c = fm_nvprintr(cnv, d + 1, c, cols);
314			c = fm_printf(d + 1, c, cols, " ]");
315			break;
316
317		case DATA_TYPE_NVLIST_ARRAY: {
318			nvlist_t **val;
319			uint_t i, nelem;
320
321			c = fm_printf(d + 1, c, cols, "[");
322			(void) nvpair_value_nvlist_array(nvp, &val, &nelem);
323			for (i = 0; i < nelem; i++) {
324				c = fm_nvprintr(val[i], d + 1, c, cols);
325			}
326			c = fm_printf(d + 1, c, cols, " ]");
327			}
328			break;
329
330		case DATA_TYPE_BOOLEAN_ARRAY:
331		case DATA_TYPE_BYTE_ARRAY:
332		case DATA_TYPE_INT8_ARRAY:
333		case DATA_TYPE_UINT8_ARRAY:
334		case DATA_TYPE_INT16_ARRAY:
335		case DATA_TYPE_UINT16_ARRAY:
336		case DATA_TYPE_INT32_ARRAY:
337		case DATA_TYPE_UINT32_ARRAY:
338		case DATA_TYPE_INT64_ARRAY:
339		case DATA_TYPE_UINT64_ARRAY:
340		case DATA_TYPE_STRING_ARRAY:
341			c = fm_printf(d + 1, c, cols, "[...]");
342			break;
343		case DATA_TYPE_UNKNOWN:
344			c = fm_printf(d + 1, c, cols, "<unknown>");
345			break;
346		}
347	}
348
349	return (c);
350}
351
352void
353fm_nvprint(nvlist_t *nvl)
354{
355	char *class;
356	int c = 0;
357
358	console_printf("\r");
359
360	if (nvlist_lookup_string(nvl, FM_CLASS, &class) == 0)
361		c = fm_printf(0, c, ereport_cols, "%s", class);
362
363	if (fm_nvprintr(nvl, 0, c, ereport_cols) != 0)
364		console_printf("\n");
365
366	console_printf("\n");
367}
368
369/*
370 * Wrapper for panic() that first produces an FMA-style message for admins.
371 * Normally such messages are generated by fmd(1M)'s syslog-msgs agent: this
372 * is the one exception to that rule and the only error that gets messaged.
373 * This function is intended for use by subsystems that have detected a fatal
374 * error and enqueued appropriate ereports and wish to then force a panic.
375 */
376/*PRINTFLIKE1*/
377void
378fm_panic(const char *format, ...)
379{
380	va_list ap;
381
382	(void) atomic_cas_ptr((void *)&fm_panicstr, NULL, (void *)format);
383#if defined(__i386) || defined(__amd64)
384	fastreboot_disable_highpil();
385#endif /* __i386 || __amd64 */
386	va_start(ap, format);
387	vpanic(format, ap);
388	va_end(ap);
389}
390
391/*
392 * Simply tell the caller if fm_panicstr is set, ie. an fma event has
393 * caused the panic. If so, something other than the default panic
394 * diagnosis method will diagnose the cause of the panic.
395 */
396int
397is_fm_panic()
398{
399	if (fm_panicstr)
400		return (1);
401	else
402		return (0);
403}
404
405/*
406 * Print any appropriate FMA banner message before the panic message.  This
407 * function is called by panicsys() and prints the message for fm_panic().
408 * We print the message here so that it comes after the system is quiesced.
409 * A one-line summary is recorded in the log only (cmn_err(9F) with "!" prefix).
410 * The rest of the message is for the console only and not needed in the log,
411 * so it is printed using console_printf().  We break it up into multiple
412 * chunks so as to avoid overflowing any small legacy prom_printf() buffers.
413 */
414void
415fm_banner(void)
416{
417	timespec_t tod;
418	hrtime_t now;
419
420	if (!fm_panicstr)
421		return; /* panic was not initiated by fm_panic(); do nothing */
422
423	if (panicstr) {
424		tod = panic_hrestime;
425		now = panic_hrtime;
426	} else {
427		gethrestime(&tod);
428		now = gethrtime_waitfree();
429	}
430
431	cmn_err(CE_NOTE, "!SUNW-MSG-ID: %s, "
432	    "TYPE: Error, VER: 1, SEVERITY: Major\n", fm_msgid);
433
434	console_printf(
435"\n\rSUNW-MSG-ID: %s, TYPE: Error, VER: 1, SEVERITY: Major\n"
436"EVENT-TIME: 0x%lx.0x%lx (0x%llx)\n",
437	    fm_msgid, tod.tv_sec, tod.tv_nsec, (u_longlong_t)now);
438
439	console_printf(
440"PLATFORM: %s, CSN: -, HOSTNAME: %s\n"
441"SOURCE: %s, REV: %s %s\n",
442	    platform, utsname.nodename, utsname.sysname,
443	    utsname.release, utsname.version);
444
445	console_printf(
446"DESC: Errors have been detected that require a reboot to ensure system\n"
447"integrity.  See %s/%s for more information.\n",
448	    fm_url, fm_msgid);
449
450	console_printf(
451"AUTO-RESPONSE: Solaris will attempt to save and diagnose the error telemetry\n"
452"IMPACT: The system will sync files, save a crash dump if needed, and reboot\n"
453"REC-ACTION: Save the error summary below in case telemetry cannot be saved\n");
454
455	console_printf("\n");
456}
457
458/*
459 * Utility function to write all of the pending ereports to the dump device.
460 * This function is called at either normal reboot or panic time, and simply
461 * iterates over the in-transit messages in the ereport sysevent channel.
462 */
463void
464fm_ereport_dump(void)
465{
466	evchanq_t *chq;
467	sysevent_t *sep;
468	erpt_dump_t ed;
469
470	timespec_t tod;
471	hrtime_t now;
472	char *buf;
473	size_t len;
474
475	if (panicstr) {
476		tod = panic_hrestime;
477		now = panic_hrtime;
478	} else {
479		if (ereport_errorq != NULL)
480			errorq_drain(ereport_errorq);
481		gethrestime(&tod);
482		now = gethrtime_waitfree();
483	}
484
485	/*
486	 * In the panic case, sysevent_evc_walk_init() will return NULL.
487	 */
488	if ((chq = sysevent_evc_walk_init(ereport_chan, NULL)) == NULL &&
489	    !panicstr)
490		return; /* event channel isn't initialized yet */
491
492	while ((sep = sysevent_evc_walk_step(chq)) != NULL) {
493		if ((buf = sysevent_evc_event_attr(sep, &len)) == NULL)
494			break;
495
496		ed.ed_magic = ERPT_MAGIC;
497		ed.ed_chksum = checksum32(buf, len);
498		ed.ed_size = (uint32_t)len;
499		ed.ed_pad = 0;
500		ed.ed_hrt_nsec = SE_TIME(sep);
501		ed.ed_hrt_base = now;
502		ed.ed_tod_base.sec = tod.tv_sec;
503		ed.ed_tod_base.nsec = tod.tv_nsec;
504
505		dumpvp_write(&ed, sizeof (ed));
506		dumpvp_write(buf, len);
507	}
508
509	sysevent_evc_walk_fini(chq);
510}
511#endif
512
513/*
514 * Post an error report (ereport) to the sysevent error channel.  The error
515 * channel must be established with a prior call to sysevent_evc_create()
516 * before publication may occur.
517 */
518void
519fm_ereport_post(nvlist_t *ereport, int evc_flag)
520{
521	size_t nvl_size = 0;
522	evchan_t *error_chan;
523	sysevent_id_t eid;
524
525	(void) nvlist_size(ereport, &nvl_size, NV_ENCODE_NATIVE);
526	if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) {
527		atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
528		return;
529	}
530
531#ifdef sun
532	if (sysevent_evc_bind(FM_ERROR_CHAN, &error_chan,
533	    EVCH_CREAT|EVCH_HOLD_PEND) != 0) {
534		atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
535		return;
536	}
537
538	if (sysevent_evc_publish(error_chan, EC_FM, ESC_FM_ERROR,
539	    SUNW_VENDOR, FM_PUB, ereport, evc_flag) != 0) {
540		atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
541		(void) sysevent_evc_unbind(error_chan);
542		return;
543	}
544	(void) sysevent_evc_unbind(error_chan);
545#else
546	(void) ddi_log_sysevent(NULL, SUNW_VENDOR, EC_DEV_STATUS,
547	    ESC_DEV_DLE, ereport, &eid, DDI_SLEEP);
548#endif
549}
550
551/*
552 * Wrapppers for FM nvlist allocators
553 */
554/* ARGSUSED */
555static void *
556i_fm_alloc(nv_alloc_t *nva, size_t size)
557{
558	return (kmem_zalloc(size, KM_SLEEP));
559}
560
561/* ARGSUSED */
562static void
563i_fm_free(nv_alloc_t *nva, void *buf, size_t size)
564{
565	kmem_free(buf, size);
566}
567
568const nv_alloc_ops_t fm_mem_alloc_ops = {
569	NULL,
570	NULL,
571	i_fm_alloc,
572	i_fm_free,
573	NULL
574};
575
576/*
577 * Create and initialize a new nv_alloc_t for a fixed buffer, buf.  A pointer
578 * to the newly allocated nv_alloc_t structure is returned upon success or NULL
579 * is returned to indicate that the nv_alloc structure could not be created.
580 */
581nv_alloc_t *
582fm_nva_xcreate(char *buf, size_t bufsz)
583{
584	nv_alloc_t *nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
585
586	if (bufsz == 0 || nv_alloc_init(nvhdl, nv_fixed_ops, buf, bufsz) != 0) {
587		kmem_free(nvhdl, sizeof (nv_alloc_t));
588		return (NULL);
589	}
590
591	return (nvhdl);
592}
593
594/*
595 * Destroy a previously allocated nv_alloc structure.  The fixed buffer
596 * associated with nva must be freed by the caller.
597 */
598void
599fm_nva_xdestroy(nv_alloc_t *nva)
600{
601	nv_alloc_fini(nva);
602	kmem_free(nva, sizeof (nv_alloc_t));
603}
604
605/*
606 * Create a new nv list.  A pointer to a new nv list structure is returned
607 * upon success or NULL is returned to indicate that the structure could
608 * not be created.  The newly created nv list is created and managed by the
609 * operations installed in nva.   If nva is NULL, the default FMA nva
610 * operations are installed and used.
611 *
612 * When called from the kernel and nva == NULL, this function must be called
613 * from passive kernel context with no locks held that can prevent a
614 * sleeping memory allocation from occurring.  Otherwise, this function may
615 * be called from other kernel contexts as long a valid nva created via
616 * fm_nva_create() is supplied.
617 */
618nvlist_t *
619fm_nvlist_create(nv_alloc_t *nva)
620{
621	int hdl_alloced = 0;
622	nvlist_t *nvl;
623	nv_alloc_t *nvhdl;
624
625	if (nva == NULL) {
626		nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
627
628		if (nv_alloc_init(nvhdl, &fm_mem_alloc_ops, NULL, 0) != 0) {
629			kmem_free(nvhdl, sizeof (nv_alloc_t));
630			return (NULL);
631		}
632		hdl_alloced = 1;
633	} else {
634		nvhdl = nva;
635	}
636
637	if (nvlist_xalloc(&nvl, NV_UNIQUE_NAME, nvhdl) != 0) {
638		if (hdl_alloced) {
639			nv_alloc_fini(nvhdl);
640			kmem_free(nvhdl, sizeof (nv_alloc_t));
641		}
642		return (NULL);
643	}
644
645	return (nvl);
646}
647
648/*
649 * Destroy a previously allocated nvlist structure.  flag indicates whether
650 * or not the associated nva structure should be freed (FM_NVA_FREE) or
651 * retained (FM_NVA_RETAIN).  Retaining the nv alloc structure allows
652 * it to be re-used for future nvlist creation operations.
653 */
654void
655fm_nvlist_destroy(nvlist_t *nvl, int flag)
656{
657	nv_alloc_t *nva = nvlist_lookup_nv_alloc(nvl);
658
659	nvlist_free(nvl);
660
661	if (nva != NULL) {
662		if (flag == FM_NVA_FREE)
663			fm_nva_xdestroy(nva);
664	}
665}
666
667int
668i_fm_payload_set(nvlist_t *payload, const char *name, va_list ap)
669{
670	int nelem, ret = 0;
671	data_type_t type;
672
673	while (ret == 0 && name != NULL) {
674		type = va_arg(ap, data_type_t);
675		switch (type) {
676		case DATA_TYPE_BYTE:
677			ret = nvlist_add_byte(payload, name,
678			    va_arg(ap, uint_t));
679			break;
680		case DATA_TYPE_BYTE_ARRAY:
681			nelem = va_arg(ap, int);
682			ret = nvlist_add_byte_array(payload, name,
683			    va_arg(ap, uchar_t *), nelem);
684			break;
685		case DATA_TYPE_BOOLEAN_VALUE:
686			ret = nvlist_add_boolean_value(payload, name,
687			    va_arg(ap, boolean_t));
688			break;
689		case DATA_TYPE_BOOLEAN_ARRAY:
690			nelem = va_arg(ap, int);
691			ret = nvlist_add_boolean_array(payload, name,
692			    va_arg(ap, boolean_t *), nelem);
693			break;
694		case DATA_TYPE_INT8:
695			ret = nvlist_add_int8(payload, name,
696			    va_arg(ap, int));
697			break;
698		case DATA_TYPE_INT8_ARRAY:
699			nelem = va_arg(ap, int);
700			ret = nvlist_add_int8_array(payload, name,
701			    va_arg(ap, int8_t *), nelem);
702			break;
703		case DATA_TYPE_UINT8:
704			ret = nvlist_add_uint8(payload, name,
705			    va_arg(ap, uint_t));
706			break;
707		case DATA_TYPE_UINT8_ARRAY:
708			nelem = va_arg(ap, int);
709			ret = nvlist_add_uint8_array(payload, name,
710			    va_arg(ap, uint8_t *), nelem);
711			break;
712		case DATA_TYPE_INT16:
713			ret = nvlist_add_int16(payload, name,
714			    va_arg(ap, int));
715			break;
716		case DATA_TYPE_INT16_ARRAY:
717			nelem = va_arg(ap, int);
718			ret = nvlist_add_int16_array(payload, name,
719			    va_arg(ap, int16_t *), nelem);
720			break;
721		case DATA_TYPE_UINT16:
722			ret = nvlist_add_uint16(payload, name,
723			    va_arg(ap, uint_t));
724			break;
725		case DATA_TYPE_UINT16_ARRAY:
726			nelem = va_arg(ap, int);
727			ret = nvlist_add_uint16_array(payload, name,
728			    va_arg(ap, uint16_t *), nelem);
729			break;
730		case DATA_TYPE_INT32:
731			ret = nvlist_add_int32(payload, name,
732			    va_arg(ap, int32_t));
733			break;
734		case DATA_TYPE_INT32_ARRAY:
735			nelem = va_arg(ap, int);
736			ret = nvlist_add_int32_array(payload, name,
737			    va_arg(ap, int32_t *), nelem);
738			break;
739		case DATA_TYPE_UINT32:
740			ret = nvlist_add_uint32(payload, name,
741			    va_arg(ap, uint32_t));
742			break;
743		case DATA_TYPE_UINT32_ARRAY:
744			nelem = va_arg(ap, int);
745			ret = nvlist_add_uint32_array(payload, name,
746			    va_arg(ap, uint32_t *), nelem);
747			break;
748		case DATA_TYPE_INT64:
749			ret = nvlist_add_int64(payload, name,
750			    va_arg(ap, int64_t));
751			break;
752		case DATA_TYPE_INT64_ARRAY:
753			nelem = va_arg(ap, int);
754			ret = nvlist_add_int64_array(payload, name,
755			    va_arg(ap, int64_t *), nelem);
756			break;
757		case DATA_TYPE_UINT64:
758			ret = nvlist_add_uint64(payload, name,
759			    va_arg(ap, uint64_t));
760			break;
761		case DATA_TYPE_UINT64_ARRAY:
762			nelem = va_arg(ap, int);
763			ret = nvlist_add_uint64_array(payload, name,
764			    va_arg(ap, uint64_t *), nelem);
765			break;
766		case DATA_TYPE_STRING:
767			ret = nvlist_add_string(payload, name,
768			    va_arg(ap, char *));
769			break;
770		case DATA_TYPE_STRING_ARRAY:
771			nelem = va_arg(ap, int);
772			ret = nvlist_add_string_array(payload, name,
773			    va_arg(ap, char **), nelem);
774			break;
775		case DATA_TYPE_NVLIST:
776			ret = nvlist_add_nvlist(payload, name,
777			    va_arg(ap, nvlist_t *));
778			break;
779		case DATA_TYPE_NVLIST_ARRAY:
780			nelem = va_arg(ap, int);
781			ret = nvlist_add_nvlist_array(payload, name,
782			    va_arg(ap, nvlist_t **), nelem);
783			break;
784		default:
785			ret = EINVAL;
786		}
787
788		name = va_arg(ap, char *);
789	}
790	return (ret);
791}
792
793void
794fm_payload_set(nvlist_t *payload, ...)
795{
796	int ret;
797	const char *name;
798	va_list ap;
799
800	va_start(ap, payload);
801	name = va_arg(ap, char *);
802	ret = i_fm_payload_set(payload, name, ap);
803	va_end(ap);
804
805	if (ret)
806		atomic_add_64(
807		    &erpt_kstat_data.payload_set_failed.value.ui64, 1);
808}
809
810/*
811 * Set-up and validate the members of an ereport event according to:
812 *
813 *	Member name		Type		Value
814 *	====================================================
815 *	class			string		ereport
816 *	version			uint8_t		0
817 *	ena			uint64_t	<ena>
818 *	detector		nvlist_t	<detector>
819 *	ereport-payload		nvlist_t	<var args>
820 *
821 * We don't actually add a 'version' member to the payload.  Really,
822 * the version quoted to us by our caller is that of the category 1
823 * "ereport" event class (and we require FM_EREPORT_VERS0) but
824 * the payload version of the actual leaf class event under construction
825 * may be something else.  Callers should supply a version in the varargs,
826 * or (better) we could take two version arguments - one for the
827 * ereport category 1 classification (expect FM_EREPORT_VERS0) and one
828 * for the leaf class.
829 */
830void
831fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class,
832    uint64_t ena, const nvlist_t *detector, ...)
833{
834	char ereport_class[FM_MAX_CLASS];
835	const char *name;
836	va_list ap;
837	int ret;
838
839	if (version != FM_EREPORT_VERS0) {
840		atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
841		return;
842	}
843
844	(void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s",
845	    FM_EREPORT_CLASS, erpt_class);
846	if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) {
847		atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
848		return;
849	}
850
851	if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) {
852		atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
853	}
854
855	if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR,
856	    (nvlist_t *)detector) != 0) {
857		atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
858	}
859
860	va_start(ap, detector);
861	name = va_arg(ap, const char *);
862	ret = i_fm_payload_set(ereport, name, ap);
863	va_end(ap);
864
865	if (ret)
866		atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
867}
868
869/*
870 * Set-up and validate the members of an hc fmri according to;
871 *
872 *	Member name		Type		Value
873 *	===================================================
874 *	version			uint8_t		0
875 *	auth			nvlist_t	<auth>
876 *	hc-name			string		<name>
877 *	hc-id			string		<id>
878 *
879 * Note that auth and hc-id are optional members.
880 */
881
882#define	HC_MAXPAIRS	20
883#define	HC_MAXNAMELEN	50
884
885static int
886fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth)
887{
888	if (version != FM_HC_SCHEME_VERSION) {
889		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
890		return (0);
891	}
892
893	if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 ||
894	    nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) {
895		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
896		return (0);
897	}
898
899	if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
900	    (nvlist_t *)auth) != 0) {
901		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
902		return (0);
903	}
904
905	return (1);
906}
907
908void
909fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth,
910    nvlist_t *snvl, int npairs, ...)
911{
912	nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
913	nvlist_t *pairs[HC_MAXPAIRS];
914	va_list ap;
915	int i;
916
917	if (!fm_fmri_hc_set_common(fmri, version, auth))
918		return;
919
920	npairs = MIN(npairs, HC_MAXPAIRS);
921
922	va_start(ap, npairs);
923	for (i = 0; i < npairs; i++) {
924		const char *name = va_arg(ap, const char *);
925		uint32_t id = va_arg(ap, uint32_t);
926		char idstr[11];
927
928		(void) snprintf(idstr, sizeof (idstr), "%u", id);
929
930		pairs[i] = fm_nvlist_create(nva);
931		if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
932		    nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
933			atomic_add_64(
934			    &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
935		}
936	}
937	va_end(ap);
938
939	if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, npairs) != 0)
940		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
941
942	for (i = 0; i < npairs; i++)
943		fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
944
945	if (snvl != NULL) {
946		if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
947			atomic_add_64(
948			    &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
949		}
950	}
951}
952
953/*
954 * Set-up and validate the members of an dev fmri according to:
955 *
956 *	Member name		Type		Value
957 *	====================================================
958 *	version			uint8_t		0
959 *	auth			nvlist_t	<auth>
960 *	devpath			string		<devpath>
961 *	[devid]			string		<devid>
962 *	[target-port-l0id]	string		<target-port-lun0-id>
963 *
964 * Note that auth and devid are optional members.
965 */
966void
967fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth,
968    const char *devpath, const char *devid, const char *tpl0)
969{
970	int err = 0;
971
972	if (version != DEV_SCHEME_VERSION0) {
973		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
974		return;
975	}
976
977	err |= nvlist_add_uint8(fmri_dev, FM_VERSION, version);
978	err |= nvlist_add_string(fmri_dev, FM_FMRI_SCHEME, FM_FMRI_SCHEME_DEV);
979
980	if (auth != NULL) {
981		err |= nvlist_add_nvlist(fmri_dev, FM_FMRI_AUTHORITY,
982		    (nvlist_t *)auth);
983	}
984
985	err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_PATH, devpath);
986
987	if (devid != NULL)
988		err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_ID, devid);
989
990	if (tpl0 != NULL)
991		err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_TGTPTLUN0, tpl0);
992
993	if (err)
994		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
995
996}
997
998/*
999 * Set-up and validate the members of an cpu fmri according to:
1000 *
1001 *	Member name		Type		Value
1002 *	====================================================
1003 *	version			uint8_t		0
1004 *	auth			nvlist_t	<auth>
1005 *	cpuid			uint32_t	<cpu_id>
1006 *	cpumask			uint8_t		<cpu_mask>
1007 *	serial			uint64_t	<serial_id>
1008 *
1009 * Note that auth, cpumask, serial are optional members.
1010 *
1011 */
1012void
1013fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth,
1014    uint32_t cpu_id, uint8_t *cpu_maskp, const char *serial_idp)
1015{
1016	uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64;
1017
1018	if (version < CPU_SCHEME_VERSION1) {
1019		atomic_add_64(failedp, 1);
1020		return;
1021	}
1022
1023	if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) {
1024		atomic_add_64(failedp, 1);
1025		return;
1026	}
1027
1028	if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME,
1029	    FM_FMRI_SCHEME_CPU) != 0) {
1030		atomic_add_64(failedp, 1);
1031		return;
1032	}
1033
1034	if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY,
1035	    (nvlist_t *)auth) != 0)
1036		atomic_add_64(failedp, 1);
1037
1038	if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0)
1039		atomic_add_64(failedp, 1);
1040
1041	if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK,
1042	    *cpu_maskp) != 0)
1043		atomic_add_64(failedp, 1);
1044
1045	if (serial_idp == NULL || nvlist_add_string(fmri_cpu,
1046	    FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0)
1047			atomic_add_64(failedp, 1);
1048}
1049
1050/*
1051 * Set-up and validate the members of a mem according to:
1052 *
1053 *	Member name		Type		Value
1054 *	====================================================
1055 *	version			uint8_t		0
1056 *	auth			nvlist_t	<auth>		[optional]
1057 *	unum			string		<unum>
1058 *	serial			string		<serial>	[optional*]
1059 *	offset			uint64_t	<offset>	[optional]
1060 *
1061 *	* serial is required if offset is present
1062 */
1063void
1064fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth,
1065    const char *unum, const char *serial, uint64_t offset)
1066{
1067	if (version != MEM_SCHEME_VERSION0) {
1068		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1069		return;
1070	}
1071
1072	if (!serial && (offset != (uint64_t)-1)) {
1073		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1074		return;
1075	}
1076
1077	if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1078		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1079		return;
1080	}
1081
1082	if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) {
1083		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1084		return;
1085	}
1086
1087	if (auth != NULL) {
1088		if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
1089		    (nvlist_t *)auth) != 0) {
1090			atomic_add_64(
1091			    &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1092		}
1093	}
1094
1095	if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) {
1096		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1097	}
1098
1099	if (serial != NULL) {
1100		if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID,
1101		    (char **)&serial, 1) != 0) {
1102			atomic_add_64(
1103			    &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1104		}
1105		if (offset != (uint64_t)-1) {
1106			if (nvlist_add_uint64(fmri, FM_FMRI_MEM_OFFSET,
1107			    offset) != 0) {
1108				atomic_add_64(&erpt_kstat_data.
1109				    fmri_set_failed.value.ui64, 1);
1110			}
1111		}
1112	}
1113}
1114
1115void
1116fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid,
1117    uint64_t vdev_guid)
1118{
1119	if (version != ZFS_SCHEME_VERSION0) {
1120		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1121		return;
1122	}
1123
1124	if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1125		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1126		return;
1127	}
1128
1129	if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) {
1130		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1131		return;
1132	}
1133
1134	if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) {
1135		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1136	}
1137
1138	if (vdev_guid != 0) {
1139		if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) {
1140			atomic_add_64(
1141			    &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1142		}
1143	}
1144}
1145
1146uint64_t
1147fm_ena_increment(uint64_t ena)
1148{
1149	uint64_t new_ena;
1150
1151	switch (ENA_FORMAT(ena)) {
1152	case FM_ENA_FMT1:
1153		new_ena = ena + (1 << ENA_FMT1_GEN_SHFT);
1154		break;
1155	case FM_ENA_FMT2:
1156		new_ena = ena + (1 << ENA_FMT2_GEN_SHFT);
1157		break;
1158	default:
1159		new_ena = 0;
1160	}
1161
1162	return (new_ena);
1163}
1164
1165uint64_t
1166fm_ena_generate_cpu(uint64_t timestamp, processorid_t cpuid, uchar_t format)
1167{
1168	uint64_t ena = 0;
1169
1170	switch (format) {
1171	case FM_ENA_FMT1:
1172		if (timestamp) {
1173			ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1174			    ((cpuid << ENA_FMT1_CPUID_SHFT) &
1175			    ENA_FMT1_CPUID_MASK) |
1176			    ((timestamp << ENA_FMT1_TIME_SHFT) &
1177			    ENA_FMT1_TIME_MASK));
1178		} else {
1179			ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1180			    ((cpuid << ENA_FMT1_CPUID_SHFT) &
1181			    ENA_FMT1_CPUID_MASK) |
1182			    ((gethrtime_waitfree() << ENA_FMT1_TIME_SHFT) &
1183			    ENA_FMT1_TIME_MASK));
1184		}
1185		break;
1186	case FM_ENA_FMT2:
1187		ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1188		    ((timestamp << ENA_FMT2_TIME_SHFT) & ENA_FMT2_TIME_MASK));
1189		break;
1190	default:
1191		break;
1192	}
1193
1194	return (ena);
1195}
1196
1197uint64_t
1198fm_ena_generate(uint64_t timestamp, uchar_t format)
1199{
1200	return (fm_ena_generate_cpu(timestamp, PCPU_GET(cpuid), format));
1201}
1202
1203uint64_t
1204fm_ena_generation_get(uint64_t ena)
1205{
1206	uint64_t gen;
1207
1208	switch (ENA_FORMAT(ena)) {
1209	case FM_ENA_FMT1:
1210		gen = (ena & ENA_FMT1_GEN_MASK) >> ENA_FMT1_GEN_SHFT;
1211		break;
1212	case FM_ENA_FMT2:
1213		gen = (ena & ENA_FMT2_GEN_MASK) >> ENA_FMT2_GEN_SHFT;
1214		break;
1215	default:
1216		gen = 0;
1217		break;
1218	}
1219
1220	return (gen);
1221}
1222
1223uchar_t
1224fm_ena_format_get(uint64_t ena)
1225{
1226
1227	return (ENA_FORMAT(ena));
1228}
1229
1230uint64_t
1231fm_ena_id_get(uint64_t ena)
1232{
1233	uint64_t id;
1234
1235	switch (ENA_FORMAT(ena)) {
1236	case FM_ENA_FMT1:
1237		id = (ena & ENA_FMT1_ID_MASK) >> ENA_FMT1_ID_SHFT;
1238		break;
1239	case FM_ENA_FMT2:
1240		id = (ena & ENA_FMT2_ID_MASK) >> ENA_FMT2_ID_SHFT;
1241		break;
1242	default:
1243		id = 0;
1244	}
1245
1246	return (id);
1247}
1248
1249uint64_t
1250fm_ena_time_get(uint64_t ena)
1251{
1252	uint64_t time;
1253
1254	switch (ENA_FORMAT(ena)) {
1255	case FM_ENA_FMT1:
1256		time = (ena & ENA_FMT1_TIME_MASK) >> ENA_FMT1_TIME_SHFT;
1257		break;
1258	case FM_ENA_FMT2:
1259		time = (ena & ENA_FMT2_TIME_MASK) >> ENA_FMT2_TIME_SHFT;
1260		break;
1261	default:
1262		time = 0;
1263	}
1264
1265	return (time);
1266}
1267
1268#ifdef sun
1269/*
1270 * Convert a getpcstack() trace to symbolic name+offset, and add the resulting
1271 * string array to a Fault Management ereport as FM_EREPORT_PAYLOAD_NAME_STACK.
1272 */
1273void
1274fm_payload_stack_add(nvlist_t *payload, const pc_t *stack, int depth)
1275{
1276	int i;
1277	char *sym;
1278	ulong_t off;
1279	char *stkpp[FM_STK_DEPTH];
1280	char buf[FM_STK_DEPTH * FM_SYM_SZ];
1281	char *stkp = buf;
1282
1283	for (i = 0; i < depth && i != FM_STK_DEPTH; i++, stkp += FM_SYM_SZ) {
1284		if ((sym = kobj_getsymname(stack[i], &off)) != NULL)
1285			(void) snprintf(stkp, FM_SYM_SZ, "%s+%lx", sym, off);
1286		else
1287			(void) snprintf(stkp, FM_SYM_SZ, "%lx", (long)stack[i]);
1288		stkpp[i] = stkp;
1289	}
1290
1291	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_STACK,
1292	    DATA_TYPE_STRING_ARRAY, depth, stkpp, NULL);
1293}
1294#endif
1295
1296#ifdef sun
1297void
1298print_msg_hwerr(ctid_t ct_id, proc_t *p)
1299{
1300	uprintf("Killed process %d (%s) in contract id %d "
1301	    "due to hardware error\n", p->p_pid, p->p_user.u_comm, ct_id);
1302}
1303#endif
1304
1305void
1306fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
1307    nvlist_t *snvl, nvlist_t *bboard, int npairs, ...)
1308{
1309	nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
1310	nvlist_t *pairs[HC_MAXPAIRS];
1311	nvlist_t **hcl;
1312	uint_t n;
1313	int i, j;
1314	va_list ap;
1315	char *hcname, *hcid;
1316
1317	if (!fm_fmri_hc_set_common(fmri, version, auth))
1318		return;
1319
1320	/*
1321	 * copy the bboard nvpairs to the pairs array
1322	 */
1323	if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n)
1324	    != 0) {
1325		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1326		return;
1327	}
1328
1329	for (i = 0; i < n; i++) {
1330		if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME,
1331		    &hcname) != 0) {
1332			atomic_add_64(
1333			    &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1334			return;
1335		}
1336		if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) {
1337			atomic_add_64(
1338			    &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1339			return;
1340		}
1341
1342		pairs[i] = fm_nvlist_create(nva);
1343		if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, hcname) != 0 ||
1344		    nvlist_add_string(pairs[i], FM_FMRI_HC_ID, hcid) != 0) {
1345			for (j = 0; j <= i; j++) {
1346				if (pairs[j] != NULL)
1347					fm_nvlist_destroy(pairs[j],
1348					    FM_NVA_RETAIN);
1349			}
1350			atomic_add_64(
1351			    &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1352			return;
1353		}
1354	}
1355
1356	/*
1357	 * create the pairs from passed in pairs
1358	 */
1359	npairs = MIN(npairs, HC_MAXPAIRS);
1360
1361	va_start(ap, npairs);
1362	for (i = n; i < npairs + n; i++) {
1363		const char *name = va_arg(ap, const char *);
1364		uint32_t id = va_arg(ap, uint32_t);
1365		char idstr[11];
1366		(void) snprintf(idstr, sizeof (idstr), "%u", id);
1367		pairs[i] = fm_nvlist_create(nva);
1368		if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
1369		    nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
1370			for (j = 0; j <= i; j++) {
1371				if (pairs[j] != NULL)
1372					fm_nvlist_destroy(pairs[j],
1373					    FM_NVA_RETAIN);
1374			}
1375			atomic_add_64(
1376			    &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1377			return;
1378		}
1379	}
1380	va_end(ap);
1381
1382	/*
1383	 * Create the fmri hc list
1384	 */
1385	if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs,
1386	    npairs + n) != 0) {
1387		atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1388		return;
1389	}
1390
1391	for (i = 0; i < npairs + n; i++) {
1392			fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
1393	}
1394
1395	if (snvl != NULL) {
1396		if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
1397			atomic_add_64(
1398			    &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1399			return;
1400		}
1401	}
1402}
1403