4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Fault Management Architecture (FMA) Resource and Protocol Support
28 * The routines contained herein provide services to support kernel subsystems
29 * in publishing fault management telemetry (see PSARC 2002/412 and 2003/089).
31 * Name-Value Pair Lists
33 * The embodiment of an FMA protocol element (event, fmri or authority) is a
34 * name-value pair list (nvlist_t). FMA-specific nvlist construtor and
35 * destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used
36 * to create an nvpair list using custom allocators. Callers may choose to
37 * allocate either from the kernel memory allocator, or from a preallocated
38 * buffer, useful in constrained contexts like high-level interrupt routines.
40 * Protocol Event and FMRI Construction
42 * Convenience routines are provided to construct nvlist events according to
43 * the FMA Event Protocol and Naming Schema specification for ereports and
44 * FMRIs for the dev, cpu, hc, mem, legacy hc and de schemes.
48 * Routines to generate ENA formats 0, 1 and 2 are available as well as
49 * routines to increment formats 1 and 2. Individual fields within the
50 * ENA are extractable via fm_ena_time_get(), fm_ena_id_get(),
51 * fm_ena_format_get() and fm_ena_gen_get().
54 #include <sys/types.h>
56 #include <sys/sysevent.h>
57 #include <sys/nvpair.h>
58 #include <sys/cmn_err.h>
59 #include <sys/cpuvar.h>
60 #include <sys/sysmacros.h>
61 #include <sys/systm.h>
62 #include <sys/compress.h>
63 #include <sys/cpuvar.h>
65 #include <sys/kstat.h>
66 #include <sys/processor.h>
68 #include <sys/sunddi.h>
69 #include <sys/systeminfo.h>
70 #include <sys/sysevent/eventdefs.h>
71 #include <sys/fm/util.h>
72 #include <sys/fm/protocol.h>
75 * URL and SUNW-MSG-ID value to display for fm_panic(), defined below. These
76 * values must be kept in sync with the FMA source code in usr/src/cmd/fm.
78 static const char *fm_url = "http://www.sun.com/msg";
79 static const char *fm_msgid = "SUNOS-8000-0G";
80 static char *volatile fm_panicstr = NULL;
83 errorq_t *ereport_errorq;
85 void *ereport_dumpbuf;
86 size_t ereport_dumplen;
88 static uint_t ereport_chanlen = ERPT_EVCH_MAX;
89 static evchan_t *ereport_chan = NULL;
90 static ulong_t ereport_qlen = 0;
91 static size_t ereport_size = 0;
92 static int ereport_cols = 80;
94 extern void fastreboot_disable_highpil(void);
97 * Common fault management kstats to record ereport generation
102 kstat_named_t erpt_dropped; /* num erpts dropped on post */
103 kstat_named_t erpt_set_failed; /* num erpt set failures */
104 kstat_named_t fmri_set_failed; /* num fmri set failures */
105 kstat_named_t payload_set_failed; /* num payload set failures */
108 static struct erpt_kstat erpt_kstat_data = {
109 { "erpt-dropped", KSTAT_DATA_UINT64 },
110 { "erpt-set-failed", KSTAT_DATA_UINT64 },
111 { "fmri-set-failed", KSTAT_DATA_UINT64 },
112 { "payload-set-failed", KSTAT_DATA_UINT64 }
118 fm_drain(void *private, void *data, errorq_elem_t *eep)
120 nvlist_t *nvl = errorq_elem_nvl(ereport_errorq, eep);
122 if (!KERNEL_PANICKED())
123 (void) fm_ereport_post(nvl, EVCH_TRYHARD);
135 (void) sysevent_evc_bind(FM_ERROR_CHAN,
136 &ereport_chan, EVCH_CREAT | EVCH_HOLD_PEND);
138 (void) sysevent_evc_control(ereport_chan,
139 EVCH_SET_CHAN_LEN, &ereport_chanlen);
142 if (ereport_qlen == 0)
143 ereport_qlen = ERPT_MAX_ERRS * MAX(max_ncpus, 4);
145 if (ereport_size == 0)
146 ereport_size = ERPT_DATA_SZ;
149 ereport_errorq = errorq_nvcreate("fm_ereport_queue",
150 (errorq_func_t)fm_drain, NULL, ereport_qlen, ereport_size,
151 FM_ERR_PIL, ERRORQ_VITAL);
152 if (ereport_errorq == NULL)
153 panic("failed to create required ereport error queue");
156 ereport_dumpbuf = kmem_alloc(ereport_size, KM_SLEEP);
157 ereport_dumplen = ereport_size;
159 /* Initialize ereport allocation and generation kstats */
160 ksp = kstat_create("unix", 0, "fm", "misc", KSTAT_TYPE_NAMED,
161 sizeof (struct erpt_kstat) / sizeof (kstat_named_t),
165 ksp->ks_data = &erpt_kstat_data;
168 cmn_err(CE_NOTE, "failed to create fm/misc kstat\n");
175 * Formatting utility function for fm_nvprintr. We attempt to wrap chunks of
176 * output so they aren't split across console lines, and return the end column.
180 fm_printf(int depth, int c, int cols, const char *format, ...)
186 va_start(ap, format);
187 width = vsnprintf(&c1, sizeof (c1), format, ap);
190 if (c + width >= cols) {
191 console_printf("\n\r");
193 if (format[0] != ' ' && depth > 0) {
199 va_start(ap, format);
200 console_vprintf(format, ap);
203 return ((c + width) % cols);
207 * Recursively print a nvlist in the specified column width and return the
208 * column we end up in. This function is called recursively by fm_nvprint(),
209 * below. We generically format the entire nvpair using hexadecimal
210 * integers and strings, and elide any integer arrays. Arrays are basically
211 * used for cache dumps right now, so we suppress them so as not to overwhelm
212 * the amount of console output we produce at panic time. This can be further
213 * enhanced as FMA technology grows based upon the needs of consumers. All
214 * FMA telemetry is logged using the dump device transport, so the console
215 * output serves only as a fallback in case this procedure is unsuccessful.
218 fm_nvprintr(nvlist_t *nvl, int d, int c, int cols)
222 for (nvp = nvlist_next_nvpair(nvl, NULL);
223 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
225 data_type_t type = nvpair_type(nvp);
226 const char *name = nvpair_name(nvp);
236 if (strcmp(name, FM_CLASS) == 0)
237 continue; /* already printed by caller */
239 c = fm_printf(d, c, cols, " %s=", name);
242 case DATA_TYPE_BOOLEAN:
243 c = fm_printf(d + 1, c, cols, " 1");
246 case DATA_TYPE_BOOLEAN_VALUE:
247 (void) nvpair_value_boolean_value(nvp, &b);
248 c = fm_printf(d + 1, c, cols, b ? "1" : "0");
252 (void) nvpair_value_byte(nvp, &i8);
253 c = fm_printf(d + 1, c, cols, "%x", i8);
257 (void) nvpair_value_int8(nvp, (void *)&i8);
258 c = fm_printf(d + 1, c, cols, "%x", i8);
261 case DATA_TYPE_UINT8:
262 (void) nvpair_value_uint8(nvp, &i8);
263 c = fm_printf(d + 1, c, cols, "%x", i8);
266 case DATA_TYPE_INT16:
267 (void) nvpair_value_int16(nvp, (void *)&i16);
268 c = fm_printf(d + 1, c, cols, "%x", i16);
271 case DATA_TYPE_UINT16:
272 (void) nvpair_value_uint16(nvp, &i16);
273 c = fm_printf(d + 1, c, cols, "%x", i16);
276 case DATA_TYPE_INT32:
277 (void) nvpair_value_int32(nvp, (void *)&i32);
278 c = fm_printf(d + 1, c, cols, "%x", i32);
281 case DATA_TYPE_UINT32:
282 (void) nvpair_value_uint32(nvp, &i32);
283 c = fm_printf(d + 1, c, cols, "%x", i32);
286 case DATA_TYPE_INT64:
287 (void) nvpair_value_int64(nvp, (void *)&i64);
288 c = fm_printf(d + 1, c, cols, "%llx",
292 case DATA_TYPE_UINT64:
293 (void) nvpair_value_uint64(nvp, &i64);
294 c = fm_printf(d + 1, c, cols, "%llx",
298 case DATA_TYPE_HRTIME:
299 (void) nvpair_value_hrtime(nvp, (void *)&i64);
300 c = fm_printf(d + 1, c, cols, "%llx",
304 case DATA_TYPE_STRING:
305 (void) nvpair_value_string(nvp, &str);
306 c = fm_printf(d + 1, c, cols, "\"%s\"",
307 str ? str : "<NULL>");
310 case DATA_TYPE_NVLIST:
311 c = fm_printf(d + 1, c, cols, "[");
312 (void) nvpair_value_nvlist(nvp, &cnv);
313 c = fm_nvprintr(cnv, d + 1, c, cols);
314 c = fm_printf(d + 1, c, cols, " ]");
317 case DATA_TYPE_NVLIST_ARRAY: {
321 c = fm_printf(d + 1, c, cols, "[");
322 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
323 for (i = 0; i < nelem; i++) {
324 c = fm_nvprintr(val[i], d + 1, c, cols);
326 c = fm_printf(d + 1, c, cols, " ]");
330 case DATA_TYPE_BOOLEAN_ARRAY:
331 case DATA_TYPE_BYTE_ARRAY:
332 case DATA_TYPE_INT8_ARRAY:
333 case DATA_TYPE_UINT8_ARRAY:
334 case DATA_TYPE_INT16_ARRAY:
335 case DATA_TYPE_UINT16_ARRAY:
336 case DATA_TYPE_INT32_ARRAY:
337 case DATA_TYPE_UINT32_ARRAY:
338 case DATA_TYPE_INT64_ARRAY:
339 case DATA_TYPE_UINT64_ARRAY:
340 case DATA_TYPE_STRING_ARRAY:
341 c = fm_printf(d + 1, c, cols, "[...]");
343 case DATA_TYPE_UNKNOWN:
344 c = fm_printf(d + 1, c, cols, "<unknown>");
353 fm_nvprint(nvlist_t *nvl)
358 console_printf("\r");
360 if (nvlist_lookup_string(nvl, FM_CLASS, &class) == 0)
361 c = fm_printf(0, c, ereport_cols, "%s", class);
363 if (fm_nvprintr(nvl, 0, c, ereport_cols) != 0)
364 console_printf("\n");
366 console_printf("\n");
370 * Wrapper for panic() that first produces an FMA-style message for admins.
371 * Normally such messages are generated by fmd(1M)'s syslog-msgs agent: this
372 * is the one exception to that rule and the only error that gets messaged.
373 * This function is intended for use by subsystems that have detected a fatal
374 * error and enqueued appropriate ereports and wish to then force a panic.
378 fm_panic(const char *format, ...)
382 (void) atomic_cas_ptr((void *)&fm_panicstr, NULL, (void *)format);
383 #if defined(__i386) || defined(__amd64)
384 fastreboot_disable_highpil();
385 #endif /* __i386 || __amd64 */
386 va_start(ap, format);
392 * Simply tell the caller if fm_panicstr is set, ie. an fma event has
393 * caused the panic. If so, something other than the default panic
394 * diagnosis method will diagnose the cause of the panic.
406 * Print any appropriate FMA banner message before the panic message. This
407 * function is called by panicsys() and prints the message for fm_panic().
408 * We print the message here so that it comes after the system is quiesced.
409 * A one-line summary is recorded in the log only (cmn_err(9F) with "!" prefix).
410 * The rest of the message is for the console only and not needed in the log,
411 * so it is printed using console_printf(). We break it up into multiple
412 * chunks so as to avoid overflowing any small legacy prom_printf() buffers.
421 return; /* panic was not initiated by fm_panic(); do nothing */
423 if (KERNEL_PANICKED()) {
424 tod = panic_hrestime;
428 now = gethrtime_waitfree();
431 cmn_err(CE_NOTE, "!SUNW-MSG-ID: %s, "
432 "TYPE: Error, VER: 1, SEVERITY: Major\n", fm_msgid);
435 "\n\rSUNW-MSG-ID: %s, TYPE: Error, VER: 1, SEVERITY: Major\n"
436 "EVENT-TIME: 0x%lx.0x%lx (0x%llx)\n",
437 fm_msgid, tod.tv_sec, tod.tv_nsec, (u_longlong_t)now);
440 "PLATFORM: %s, CSN: -, HOSTNAME: %s\n"
441 "SOURCE: %s, REV: %s %s\n",
442 platform, utsname.nodename, utsname.sysname,
443 utsname.release, utsname.version);
446 "DESC: Errors have been detected that require a reboot to ensure system\n"
447 "integrity. See %s/%s for more information.\n",
451 "AUTO-RESPONSE: Solaris will attempt to save and diagnose the error telemetry\n"
452 "IMPACT: The system will sync files, save a crash dump if needed, and reboot\n"
453 "REC-ACTION: Save the error summary below in case telemetry cannot be saved\n");
455 console_printf("\n");
459 * Utility function to write all of the pending ereports to the dump device.
460 * This function is called at either normal reboot or panic time, and simply
461 * iterates over the in-transit messages in the ereport sysevent channel.
464 fm_ereport_dump(void)
475 if (KERNEL_PANICKED()) {
476 tod = panic_hrestime;
479 if (ereport_errorq != NULL)
480 errorq_drain(ereport_errorq);
482 now = gethrtime_waitfree();
486 * In the panic case, sysevent_evc_walk_init() will return NULL.
488 if ((chq = sysevent_evc_walk_init(ereport_chan, NULL)) == NULL &&
490 return; /* event channel isn't initialized yet */
492 while ((sep = sysevent_evc_walk_step(chq)) != NULL) {
493 if ((buf = sysevent_evc_event_attr(sep, &len)) == NULL)
496 ed.ed_magic = ERPT_MAGIC;
497 ed.ed_chksum = checksum32(buf, len);
498 ed.ed_size = (uint32_t)len;
500 ed.ed_hrt_nsec = SE_TIME(sep);
501 ed.ed_hrt_base = now;
502 ed.ed_tod_base.sec = tod.tv_sec;
503 ed.ed_tod_base.nsec = tod.tv_nsec;
505 dumpvp_write(&ed, sizeof (ed));
506 dumpvp_write(buf, len);
509 sysevent_evc_walk_fini(chq);
514 * Post an error report (ereport) to the sysevent error channel. The error
515 * channel must be established with a prior call to sysevent_evc_create()
516 * before publication may occur.
519 fm_ereport_post(nvlist_t *ereport, int evc_flag)
522 evchan_t *error_chan;
525 (void) nvlist_size(ereport, &nvl_size, NV_ENCODE_NATIVE);
526 if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) {
527 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
532 if (sysevent_evc_bind(FM_ERROR_CHAN, &error_chan,
533 EVCH_CREAT|EVCH_HOLD_PEND) != 0) {
534 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
538 if (sysevent_evc_publish(error_chan, EC_FM, ESC_FM_ERROR,
539 SUNW_VENDOR, FM_PUB, ereport, evc_flag) != 0) {
540 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
541 (void) sysevent_evc_unbind(error_chan);
544 (void) sysevent_evc_unbind(error_chan);
546 (void) ddi_log_sysevent(NULL, SUNW_VENDOR, EC_DEV_STATUS,
547 ESC_DEV_DLE, ereport, &eid, DDI_SLEEP);
552 * Wrapppers for FM nvlist allocators
556 i_fm_alloc(nv_alloc_t *nva, size_t size)
558 return (kmem_zalloc(size, KM_SLEEP));
563 i_fm_free(nv_alloc_t *nva, void *buf, size_t size)
565 kmem_free(buf, size);
568 const nv_alloc_ops_t fm_mem_alloc_ops = {
577 * Create and initialize a new nv_alloc_t for a fixed buffer, buf. A pointer
578 * to the newly allocated nv_alloc_t structure is returned upon success or NULL
579 * is returned to indicate that the nv_alloc structure could not be created.
582 fm_nva_xcreate(char *buf, size_t bufsz)
584 nv_alloc_t *nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
586 if (bufsz == 0 || nv_alloc_init(nvhdl, nv_fixed_ops, buf, bufsz) != 0) {
587 kmem_free(nvhdl, sizeof (nv_alloc_t));
595 * Destroy a previously allocated nv_alloc structure. The fixed buffer
596 * associated with nva must be freed by the caller.
599 fm_nva_xdestroy(nv_alloc_t *nva)
602 kmem_free(nva, sizeof (nv_alloc_t));
606 * Create a new nv list. A pointer to a new nv list structure is returned
607 * upon success or NULL is returned to indicate that the structure could
608 * not be created. The newly created nv list is created and managed by the
609 * operations installed in nva. If nva is NULL, the default FMA nva
610 * operations are installed and used.
612 * When called from the kernel and nva == NULL, this function must be called
613 * from passive kernel context with no locks held that can prevent a
614 * sleeping memory allocation from occurring. Otherwise, this function may
615 * be called from other kernel contexts as long a valid nva created via
616 * fm_nva_create() is supplied.
619 fm_nvlist_create(nv_alloc_t *nva)
626 nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
628 if (nv_alloc_init(nvhdl, &fm_mem_alloc_ops, NULL, 0) != 0) {
629 kmem_free(nvhdl, sizeof (nv_alloc_t));
637 if (nvlist_xalloc(&nvl, NV_UNIQUE_NAME, nvhdl) != 0) {
639 nv_alloc_fini(nvhdl);
640 kmem_free(nvhdl, sizeof (nv_alloc_t));
649 * Destroy a previously allocated nvlist structure. flag indicates whether
650 * or not the associated nva structure should be freed (FM_NVA_FREE) or
651 * retained (FM_NVA_RETAIN). Retaining the nv alloc structure allows
652 * it to be re-used for future nvlist creation operations.
655 fm_nvlist_destroy(nvlist_t *nvl, int flag)
657 nv_alloc_t *nva = nvlist_lookup_nv_alloc(nvl);
662 if (flag == FM_NVA_FREE)
663 fm_nva_xdestroy(nva);
668 i_fm_payload_set(nvlist_t *payload, const char *name, va_list ap)
673 while (ret == 0 && name != NULL) {
674 type = va_arg(ap, data_type_t);
677 ret = nvlist_add_byte(payload, name,
680 case DATA_TYPE_BYTE_ARRAY:
681 nelem = va_arg(ap, int);
682 ret = nvlist_add_byte_array(payload, name,
683 va_arg(ap, uchar_t *), nelem);
685 case DATA_TYPE_BOOLEAN_VALUE:
686 ret = nvlist_add_boolean_value(payload, name,
687 va_arg(ap, boolean_t));
689 case DATA_TYPE_BOOLEAN_ARRAY:
690 nelem = va_arg(ap, int);
691 ret = nvlist_add_boolean_array(payload, name,
692 va_arg(ap, boolean_t *), nelem);
695 ret = nvlist_add_int8(payload, name,
698 case DATA_TYPE_INT8_ARRAY:
699 nelem = va_arg(ap, int);
700 ret = nvlist_add_int8_array(payload, name,
701 va_arg(ap, int8_t *), nelem);
703 case DATA_TYPE_UINT8:
704 ret = nvlist_add_uint8(payload, name,
707 case DATA_TYPE_UINT8_ARRAY:
708 nelem = va_arg(ap, int);
709 ret = nvlist_add_uint8_array(payload, name,
710 va_arg(ap, uint8_t *), nelem);
712 case DATA_TYPE_INT16:
713 ret = nvlist_add_int16(payload, name,
716 case DATA_TYPE_INT16_ARRAY:
717 nelem = va_arg(ap, int);
718 ret = nvlist_add_int16_array(payload, name,
719 va_arg(ap, int16_t *), nelem);
721 case DATA_TYPE_UINT16:
722 ret = nvlist_add_uint16(payload, name,
725 case DATA_TYPE_UINT16_ARRAY:
726 nelem = va_arg(ap, int);
727 ret = nvlist_add_uint16_array(payload, name,
728 va_arg(ap, uint16_t *), nelem);
730 case DATA_TYPE_INT32:
731 ret = nvlist_add_int32(payload, name,
732 va_arg(ap, int32_t));
734 case DATA_TYPE_INT32_ARRAY:
735 nelem = va_arg(ap, int);
736 ret = nvlist_add_int32_array(payload, name,
737 va_arg(ap, int32_t *), nelem);
739 case DATA_TYPE_UINT32:
740 ret = nvlist_add_uint32(payload, name,
741 va_arg(ap, uint32_t));
743 case DATA_TYPE_UINT32_ARRAY:
744 nelem = va_arg(ap, int);
745 ret = nvlist_add_uint32_array(payload, name,
746 va_arg(ap, uint32_t *), nelem);
748 case DATA_TYPE_INT64:
749 ret = nvlist_add_int64(payload, name,
750 va_arg(ap, int64_t));
752 case DATA_TYPE_INT64_ARRAY:
753 nelem = va_arg(ap, int);
754 ret = nvlist_add_int64_array(payload, name,
755 va_arg(ap, int64_t *), nelem);
757 case DATA_TYPE_UINT64:
758 ret = nvlist_add_uint64(payload, name,
759 va_arg(ap, uint64_t));
761 case DATA_TYPE_UINT64_ARRAY:
762 nelem = va_arg(ap, int);
763 ret = nvlist_add_uint64_array(payload, name,
764 va_arg(ap, uint64_t *), nelem);
766 case DATA_TYPE_STRING:
767 ret = nvlist_add_string(payload, name,
770 case DATA_TYPE_STRING_ARRAY:
771 nelem = va_arg(ap, int);
772 ret = nvlist_add_string_array(payload, name,
773 va_arg(ap, char **), nelem);
775 case DATA_TYPE_NVLIST:
776 ret = nvlist_add_nvlist(payload, name,
777 va_arg(ap, nvlist_t *));
779 case DATA_TYPE_NVLIST_ARRAY:
780 nelem = va_arg(ap, int);
781 ret = nvlist_add_nvlist_array(payload, name,
782 va_arg(ap, nvlist_t **), nelem);
788 name = va_arg(ap, char *);
794 fm_payload_set(nvlist_t *payload, ...)
800 va_start(ap, payload);
801 name = va_arg(ap, char *);
802 ret = i_fm_payload_set(payload, name, ap);
806 atomic_inc_64(&erpt_kstat_data.payload_set_failed.value.ui64);
810 * Set-up and validate the members of an ereport event according to:
812 * Member name Type Value
813 * ====================================================
814 * class string ereport
817 * detector nvlist_t <detector>
818 * ereport-payload nvlist_t <var args>
820 * We don't actually add a 'version' member to the payload. Really,
821 * the version quoted to us by our caller is that of the category 1
822 * "ereport" event class (and we require FM_EREPORT_VERS0) but
823 * the payload version of the actual leaf class event under construction
824 * may be something else. Callers should supply a version in the varargs,
825 * or (better) we could take two version arguments - one for the
826 * ereport category 1 classification (expect FM_EREPORT_VERS0) and one
827 * for the leaf class.
830 fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class,
831 uint64_t ena, const nvlist_t *detector, ...)
833 char ereport_class[FM_MAX_CLASS];
838 if (version != FM_EREPORT_VERS0) {
839 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
843 (void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s",
844 FM_EREPORT_CLASS, erpt_class);
845 if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) {
846 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
850 if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) {
851 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
854 if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR,
855 (nvlist_t *)detector) != 0) {
856 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
859 va_start(ap, detector);
860 name = va_arg(ap, const char *);
861 ret = i_fm_payload_set(ereport, name, ap);
865 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
869 * Set-up and validate the members of an hc fmri according to;
871 * Member name Type Value
872 * ===================================================
874 * auth nvlist_t <auth>
875 * hc-name string <name>
878 * Note that auth and hc-id are optional members.
881 #define HC_MAXPAIRS 20
882 #define HC_MAXNAMELEN 50
885 fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth)
887 if (version != FM_HC_SCHEME_VERSION) {
888 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
892 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 ||
893 nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) {
894 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
898 if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
899 (nvlist_t *)auth) != 0) {
900 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
908 fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth,
909 nvlist_t *snvl, int npairs, ...)
911 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
912 nvlist_t *pairs[HC_MAXPAIRS];
916 if (!fm_fmri_hc_set_common(fmri, version, auth))
919 npairs = MIN(npairs, HC_MAXPAIRS);
921 va_start(ap, npairs);
922 for (i = 0; i < npairs; i++) {
923 const char *name = va_arg(ap, const char *);
924 uint32_t id = va_arg(ap, uint32_t);
927 (void) snprintf(idstr, sizeof (idstr), "%u", id);
929 pairs[i] = fm_nvlist_create(nva);
930 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
931 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
933 &erpt_kstat_data.fmri_set_failed.value.ui64);
938 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, npairs) != 0)
939 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
941 for (i = 0; i < npairs; i++)
942 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
945 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
947 &erpt_kstat_data.fmri_set_failed.value.ui64);
953 * Set-up and validate the members of an dev fmri according to:
955 * Member name Type Value
956 * ====================================================
958 * auth nvlist_t <auth>
959 * devpath string <devpath>
960 * [devid] string <devid>
961 * [target-port-l0id] string <target-port-lun0-id>
963 * Note that auth and devid are optional members.
966 fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth,
967 const char *devpath, const char *devid, const char *tpl0)
971 if (version != DEV_SCHEME_VERSION0) {
972 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
976 err |= nvlist_add_uint8(fmri_dev, FM_VERSION, version);
977 err |= nvlist_add_string(fmri_dev, FM_FMRI_SCHEME, FM_FMRI_SCHEME_DEV);
980 err |= nvlist_add_nvlist(fmri_dev, FM_FMRI_AUTHORITY,
984 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_PATH, devpath);
987 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_ID, devid);
990 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_TGTPTLUN0, tpl0);
993 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
998 * Set-up and validate the members of an cpu fmri according to:
1000 * Member name Type Value
1001 * ====================================================
1003 * auth nvlist_t <auth>
1004 * cpuid uint32_t <cpu_id>
1005 * cpumask uint8_t <cpu_mask>
1006 * serial uint64_t <serial_id>
1008 * Note that auth, cpumask, serial are optional members.
1012 fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth,
1013 uint32_t cpu_id, uint8_t *cpu_maskp, const char *serial_idp)
1015 uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64;
1017 if (version < CPU_SCHEME_VERSION1) {
1018 atomic_inc_64(failedp);
1022 if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) {
1023 atomic_inc_64(failedp);
1027 if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME,
1028 FM_FMRI_SCHEME_CPU) != 0) {
1029 atomic_inc_64(failedp);
1033 if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY,
1034 (nvlist_t *)auth) != 0)
1035 atomic_inc_64(failedp);
1037 if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0)
1038 atomic_inc_64(failedp);
1040 if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK,
1042 atomic_inc_64(failedp);
1044 if (serial_idp == NULL || nvlist_add_string(fmri_cpu,
1045 FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0)
1046 atomic_inc_64(failedp);
1050 * Set-up and validate the members of a mem according to:
1052 * Member name Type Value
1053 * ====================================================
1055 * auth nvlist_t <auth> [optional]
1056 * unum string <unum>
1057 * serial string <serial> [optional*]
1058 * offset uint64_t <offset> [optional]
1060 * * serial is required if offset is present
1063 fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth,
1064 const char *unum, const char *serial, uint64_t offset)
1066 if (version != MEM_SCHEME_VERSION0) {
1067 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1071 if (!serial && (offset != (uint64_t)-1)) {
1072 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1076 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1077 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1081 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) {
1082 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1087 if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
1088 (nvlist_t *)auth) != 0) {
1090 &erpt_kstat_data.fmri_set_failed.value.ui64);
1094 if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) {
1095 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1098 if (serial != NULL) {
1099 if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID,
1100 (char **)&serial, 1) != 0) {
1102 &erpt_kstat_data.fmri_set_failed.value.ui64);
1104 if (offset != (uint64_t)-1 && nvlist_add_uint64(fmri,
1105 FM_FMRI_MEM_OFFSET, offset) != 0) {
1107 &erpt_kstat_data.fmri_set_failed.value.ui64);
1113 fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid,
1116 if (version != ZFS_SCHEME_VERSION0) {
1117 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1121 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1122 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1126 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) {
1127 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1131 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) {
1132 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1135 if (vdev_guid != 0) {
1136 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) {
1138 &erpt_kstat_data.fmri_set_failed.value.ui64);
1144 fm_ena_increment(uint64_t ena)
1148 switch (ENA_FORMAT(ena)) {
1150 new_ena = ena + (1 << ENA_FMT1_GEN_SHFT);
1153 new_ena = ena + (1 << ENA_FMT2_GEN_SHFT);
1163 fm_ena_generate_cpu(uint64_t timestamp, processorid_t cpuid, uchar_t format)
1170 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1171 ((cpuid << ENA_FMT1_CPUID_SHFT) &
1172 ENA_FMT1_CPUID_MASK) |
1173 ((timestamp << ENA_FMT1_TIME_SHFT) &
1174 ENA_FMT1_TIME_MASK));
1176 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1177 ((cpuid << ENA_FMT1_CPUID_SHFT) &
1178 ENA_FMT1_CPUID_MASK) |
1179 ((gethrtime_waitfree() << ENA_FMT1_TIME_SHFT) &
1180 ENA_FMT1_TIME_MASK));
1184 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1185 ((timestamp << ENA_FMT2_TIME_SHFT) & ENA_FMT2_TIME_MASK));
1195 fm_ena_generate(uint64_t timestamp, uchar_t format)
1197 return (fm_ena_generate_cpu(timestamp, PCPU_GET(cpuid), format));
1201 fm_ena_generation_get(uint64_t ena)
1205 switch (ENA_FORMAT(ena)) {
1207 gen = (ena & ENA_FMT1_GEN_MASK) >> ENA_FMT1_GEN_SHFT;
1210 gen = (ena & ENA_FMT2_GEN_MASK) >> ENA_FMT2_GEN_SHFT;
1221 fm_ena_format_get(uint64_t ena)
1224 return (ENA_FORMAT(ena));
1228 fm_ena_id_get(uint64_t ena)
1232 switch (ENA_FORMAT(ena)) {
1234 id = (ena & ENA_FMT1_ID_MASK) >> ENA_FMT1_ID_SHFT;
1237 id = (ena & ENA_FMT2_ID_MASK) >> ENA_FMT2_ID_SHFT;
1247 fm_ena_time_get(uint64_t ena)
1251 switch (ENA_FORMAT(ena)) {
1253 time = (ena & ENA_FMT1_TIME_MASK) >> ENA_FMT1_TIME_SHFT;
1256 time = (ena & ENA_FMT2_TIME_MASK) >> ENA_FMT2_TIME_SHFT;
1267 * Convert a getpcstack() trace to symbolic name+offset, and add the resulting
1268 * string array to a Fault Management ereport as FM_EREPORT_PAYLOAD_NAME_STACK.
1271 fm_payload_stack_add(nvlist_t *payload, const pc_t *stack, int depth)
1276 char *stkpp[FM_STK_DEPTH];
1277 char buf[FM_STK_DEPTH * FM_SYM_SZ];
1280 for (i = 0; i < depth && i != FM_STK_DEPTH; i++, stkp += FM_SYM_SZ) {
1281 if ((sym = kobj_getsymname(stack[i], &off)) != NULL)
1282 (void) snprintf(stkp, FM_SYM_SZ, "%s+%lx", sym, off);
1284 (void) snprintf(stkp, FM_SYM_SZ, "%lx", (long)stack[i]);
1288 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_STACK,
1289 DATA_TYPE_STRING_ARRAY, depth, stkpp, NULL);
1295 print_msg_hwerr(ctid_t ct_id, proc_t *p)
1297 uprintf("Killed process %d (%s) in contract id %d "
1298 "due to hardware error\n", p->p_pid, p->p_user.u_comm, ct_id);
1303 fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
1304 nvlist_t *snvl, nvlist_t *bboard, int npairs, ...)
1306 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
1307 nvlist_t *pairs[HC_MAXPAIRS];
1312 char *hcname, *hcid;
1314 if (!fm_fmri_hc_set_common(fmri, version, auth))
1318 * copy the bboard nvpairs to the pairs array
1320 if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n)
1322 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1326 for (i = 0; i < n; i++) {
1327 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME,
1330 &erpt_kstat_data.fmri_set_failed.value.ui64);
1333 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) {
1335 &erpt_kstat_data.fmri_set_failed.value.ui64);
1339 pairs[i] = fm_nvlist_create(nva);
1340 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, hcname) != 0 ||
1341 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, hcid) != 0) {
1342 for (j = 0; j <= i; j++) {
1343 if (pairs[j] != NULL)
1344 fm_nvlist_destroy(pairs[j],
1348 &erpt_kstat_data.fmri_set_failed.value.ui64);
1354 * create the pairs from passed in pairs
1356 npairs = MIN(npairs, HC_MAXPAIRS);
1358 va_start(ap, npairs);
1359 for (i = n; i < npairs + n; i++) {
1360 const char *name = va_arg(ap, const char *);
1361 uint32_t id = va_arg(ap, uint32_t);
1363 (void) snprintf(idstr, sizeof (idstr), "%u", id);
1364 pairs[i] = fm_nvlist_create(nva);
1365 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
1366 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
1367 for (j = 0; j <= i; j++) {
1368 if (pairs[j] != NULL)
1369 fm_nvlist_destroy(pairs[j],
1373 &erpt_kstat_data.fmri_set_failed.value.ui64);
1380 * Create the fmri hc list
1382 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs,
1384 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1388 for (i = 0; i < npairs + n; i++) {
1389 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
1393 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
1395 &erpt_kstat_data.fmri_set_failed.value.ui64);