4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
25 * Copyright (c) 2013 Steven Hartland. All rights reserved.
29 * This file contains the functions which analyze the status of a pool. This
30 * include both the status of an active pool, as well as the status exported
31 * pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
32 * the pool. This status is independent (to a certain degree) from the state of
33 * the pool. A pool's state describes only whether or not it is capable of
34 * providing the necessary fault tolerance for data. The status describes the
35 * overall status of devices. A pool that is online can still have a device
36 * that is experiencing errors.
38 * Only a subset of the possible faults can be detected using 'zpool status',
39 * and not all possible errors correspond to a FMA message ID. The explanation
40 * is left up to the caller, depending on whether it is a live pool or an
47 #include "libzfs_impl.h"
48 #include "zfeature_common.h"
51 * Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines
52 * in libzfs.h. Note that there are some status results which go past the end
53 * of this table, and hence have no associated message ID.
55 static char *zfs_msgid_table[] = {
56 "ZFS-8000-14", /* ZPOOL_STATUS_CORRUPT_CACHE */
57 "ZFS-8000-2Q", /* ZPOOL_STATUS_MISSING_DEV_R */
58 "ZFS-8000-3C", /* ZPOOL_STATUS_MISSING_DEV_NR */
59 "ZFS-8000-4J", /* ZPOOL_STATUS_CORRUPT_LABEL_R */
60 "ZFS-8000-5E", /* ZPOOL_STATUS_CORRUPT_LABEL_NR */
61 "ZFS-8000-6X", /* ZPOOL_STATUS_BAD_GUID_SUM */
62 "ZFS-8000-72", /* ZPOOL_STATUS_CORRUPT_POOL */
63 "ZFS-8000-8A", /* ZPOOL_STATUS_CORRUPT_DATA */
64 "ZFS-8000-9P", /* ZPOOL_STATUS_FAILING_DEV */
65 "ZFS-8000-A5", /* ZPOOL_STATUS_VERSION_NEWER */
66 "ZFS-8000-EY", /* ZPOOL_STATUS_HOSTID_MISMATCH */
67 "ZFS-8000-EY", /* ZPOOL_STATUS_HOSTID_ACTIVE */
68 "ZFS-8000-EY", /* ZPOOL_STATUS_HOSTID_REQUIRED */
69 "ZFS-8000-HC", /* ZPOOL_STATUS_IO_FAILURE_WAIT */
70 "ZFS-8000-JQ", /* ZPOOL_STATUS_IO_FAILURE_CONTINUE */
71 "ZFS-8000-MM", /* ZPOOL_STATUS_IO_FAILURE_MMP */
72 "ZFS-8000-K4", /* ZPOOL_STATUS_BAD_LOG */
74 * The following results have no message ID.
75 * ZPOOL_STATUS_UNSUP_FEAT_READ
76 * ZPOOL_STATUS_UNSUP_FEAT_WRITE
77 * ZPOOL_STATUS_FAULTED_DEV_R
78 * ZPOOL_STATUS_FAULTED_DEV_NR
79 * ZPOOL_STATUS_VERSION_OLDER
80 * ZPOOL_STATUS_FEAT_DISABLED
81 * ZPOOL_STATUS_RESILVERING
82 * ZPOOL_STATUS_OFFLINE_DEV
83 * ZPOOL_STATUS_REMOVED_DEV
88 #define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
92 vdev_missing(vdev_stat_t *vs, uint_t vsc)
94 return (vs->vs_state == VDEV_STATE_CANT_OPEN &&
95 vs->vs_aux == VDEV_AUX_OPEN_FAILED);
100 vdev_faulted(vdev_stat_t *vs, uint_t vsc)
102 return (vs->vs_state == VDEV_STATE_FAULTED);
107 vdev_errors(vdev_stat_t *vs, uint_t vsc)
109 return (vs->vs_state == VDEV_STATE_DEGRADED ||
110 vs->vs_read_errors != 0 || vs->vs_write_errors != 0 ||
111 vs->vs_checksum_errors != 0);
116 vdev_broken(vdev_stat_t *vs, uint_t vsc)
118 return (vs->vs_state == VDEV_STATE_CANT_OPEN);
123 vdev_offlined(vdev_stat_t *vs, uint_t vsc)
125 return (vs->vs_state == VDEV_STATE_OFFLINE);
130 vdev_removed(vdev_stat_t *vs, uint_t vsc)
132 return (vs->vs_state == VDEV_STATE_REMOVED);
136 vdev_non_native_ashift(vdev_stat_t *vs, uint_t vsc)
138 return (VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
139 vs->vs_configured_ashift < vs->vs_physical_ashift);
143 * Detect if any leaf devices that have seen errors or could not be opened.
146 find_vdev_problem(nvlist_t *vdev, int (*func)(vdev_stat_t *, uint_t),
147 boolean_t ignore_replacing)
151 uint_t c, vsc, children;
154 * Ignore problems within a 'replacing' vdev, since we're presumably in
155 * the process of repairing any such errors, and don't want to call them
156 * out again. We'll pick up the fact that a resilver is happening
159 if (ignore_replacing == B_TRUE) {
162 verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE,
164 if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
168 if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
170 for (c = 0; c < children; c++)
171 if (find_vdev_problem(child[c], func, ignore_replacing))
174 verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
175 (uint64_t **)&vs, &vsc) == 0);
177 if (func(vs, vsc) != 0)
182 * Check any L2 cache devs
184 if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_L2CACHE, &child,
186 for (c = 0; c < children; c++)
187 if (find_vdev_problem(child[c], func, ignore_replacing))
195 * Active pool health status.
197 * To determine the status for a pool, we make several passes over the config,
198 * picking the most egregious error we find. In order of importance, we do the
201 * - Check for a complete and valid configuration
202 * - Look for any faulted or missing devices in a non-replicated config
203 * - Check for any data errors
204 * - Check for any faulted or missing devices in a replicated config
205 * - Look for any devices showing errors
206 * - Check for any resilvering devices
208 * There can obviously be multiple errors within a single pool, so this routine
209 * only picks the most damaging of all the current errors to report.
211 static zpool_status_t
212 check_status(nvlist_t *config, boolean_t isimport)
216 pool_scan_stat_t *ps = NULL;
223 unsigned long system_hostid = get_system_hostid();
225 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
227 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
229 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
230 (uint64_t **)&vs, &vsc) == 0);
231 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
235 * Currently resilvering a vdev
237 (void) nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
238 (uint64_t **)&ps, &psc);
239 if (ps != NULL && ps->pss_func == POOL_SCAN_RESILVER &&
240 ps->pss_state == DSS_SCANNING)
241 return (ZPOOL_STATUS_RESILVERING);
244 * The multihost property is set and the pool may be active.
246 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
247 vs->vs_aux == VDEV_AUX_ACTIVE) {
248 mmp_state_t mmp_state;
251 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
252 mmp_state = fnvlist_lookup_uint64(nvinfo,
253 ZPOOL_CONFIG_MMP_STATE);
255 if (mmp_state == MMP_STATE_ACTIVE)
256 return (ZPOOL_STATUS_HOSTID_ACTIVE);
257 else if (mmp_state == MMP_STATE_NO_HOSTID)
258 return (ZPOOL_STATUS_HOSTID_REQUIRED);
260 return (ZPOOL_STATUS_HOSTID_MISMATCH);
264 * Pool last accessed by another system.
266 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
267 if (hostid != 0 && (unsigned long)hostid != system_hostid &&
268 stateval == POOL_STATE_ACTIVE)
269 return (ZPOOL_STATUS_HOSTID_MISMATCH);
272 * Newer on-disk version.
274 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
275 vs->vs_aux == VDEV_AUX_VERSION_NEWER)
276 return (ZPOOL_STATUS_VERSION_NEWER);
279 * Unsupported feature(s).
281 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
282 vs->vs_aux == VDEV_AUX_UNSUP_FEAT) {
285 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
287 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_CAN_RDONLY))
288 return (ZPOOL_STATUS_UNSUP_FEAT_WRITE);
289 return (ZPOOL_STATUS_UNSUP_FEAT_READ);
293 * Check that the config is complete.
295 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
296 vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
297 return (ZPOOL_STATUS_BAD_GUID_SUM);
300 * Check whether the pool has suspended.
302 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
306 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED_REASON,
307 &reason) == 0 && reason == ZIO_SUSPEND_MMP)
308 return (ZPOOL_STATUS_IO_FAILURE_MMP);
310 if (suspended == ZIO_FAILURE_MODE_CONTINUE)
311 return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
312 return (ZPOOL_STATUS_IO_FAILURE_WAIT);
316 * Could not read a log.
318 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
319 vs->vs_aux == VDEV_AUX_BAD_LOG) {
320 return (ZPOOL_STATUS_BAD_LOG);
324 * Bad devices in non-replicated config.
326 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
327 find_vdev_problem(nvroot, vdev_faulted, B_TRUE))
328 return (ZPOOL_STATUS_FAULTED_DEV_NR);
330 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
331 find_vdev_problem(nvroot, vdev_missing, B_TRUE))
332 return (ZPOOL_STATUS_MISSING_DEV_NR);
334 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
335 find_vdev_problem(nvroot, vdev_broken, B_TRUE))
336 return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
339 * Corrupted pool metadata
341 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
342 vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
343 return (ZPOOL_STATUS_CORRUPT_POOL);
346 * Persistent data errors.
349 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
350 &nerr) == 0 && nerr != 0)
351 return (ZPOOL_STATUS_CORRUPT_DATA);
355 * Missing devices in a replicated config.
357 if (find_vdev_problem(nvroot, vdev_faulted, B_TRUE))
358 return (ZPOOL_STATUS_FAULTED_DEV_R);
359 if (find_vdev_problem(nvroot, vdev_missing, B_TRUE))
360 return (ZPOOL_STATUS_MISSING_DEV_R);
361 if (find_vdev_problem(nvroot, vdev_broken, B_TRUE))
362 return (ZPOOL_STATUS_CORRUPT_LABEL_R);
365 * Devices with errors
367 if (!isimport && find_vdev_problem(nvroot, vdev_errors, B_TRUE))
368 return (ZPOOL_STATUS_FAILING_DEV);
373 if (find_vdev_problem(nvroot, vdev_offlined, B_TRUE))
374 return (ZPOOL_STATUS_OFFLINE_DEV);
379 if (find_vdev_problem(nvroot, vdev_removed, B_TRUE))
380 return (ZPOOL_STATUS_REMOVED_DEV);
383 * Suboptimal, but usable, ashift configuration.
385 if (find_vdev_problem(nvroot, vdev_non_native_ashift, B_FALSE))
386 return (ZPOOL_STATUS_NON_NATIVE_ASHIFT);
389 * Outdated, but usable, version
391 if (SPA_VERSION_IS_SUPPORTED(version) && version != SPA_VERSION)
392 return (ZPOOL_STATUS_VERSION_OLDER);
395 * Usable pool with disabled features
397 if (version >= SPA_VERSION_FEATURES) {
402 feat = fnvlist_lookup_nvlist(config,
403 ZPOOL_CONFIG_LOAD_INFO);
404 if (nvlist_exists(feat, ZPOOL_CONFIG_ENABLED_FEAT))
405 feat = fnvlist_lookup_nvlist(feat,
406 ZPOOL_CONFIG_ENABLED_FEAT);
408 feat = fnvlist_lookup_nvlist(config,
409 ZPOOL_CONFIG_FEATURE_STATS);
412 for (i = 0; i < SPA_FEATURES; i++) {
413 zfeature_info_t *fi = &spa_feature_table[i];
414 if (!nvlist_exists(feat, fi->fi_guid))
415 return (ZPOOL_STATUS_FEAT_DISABLED);
419 return (ZPOOL_STATUS_OK);
423 zpool_get_status(zpool_handle_t *zhp, char **msgid)
425 zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE);
430 *msgid = zfs_msgid_table[ret];
436 zpool_import_status(nvlist_t *config, char **msgid)
438 zpool_status_t ret = check_status(config, B_TRUE);
443 *msgid = zfs_msgid_table[ret];
449 dump_ddt_stat(const ddt_stat_t *dds, int h)
452 char blocks[6], lsize[6], psize[6], dsize[6];
453 char ref_blocks[6], ref_lsize[6], ref_psize[6], ref_dsize[6];
455 if (dds == NULL || dds->dds_blocks == 0)
459 (void) strcpy(refcnt, "Total");
461 zfs_nicenum(1ULL << h, refcnt, sizeof (refcnt));
463 zfs_nicenum(dds->dds_blocks, blocks, sizeof (blocks));
464 zfs_nicenum(dds->dds_lsize, lsize, sizeof (lsize));
465 zfs_nicenum(dds->dds_psize, psize, sizeof (psize));
466 zfs_nicenum(dds->dds_dsize, dsize, sizeof (dsize));
467 zfs_nicenum(dds->dds_ref_blocks, ref_blocks, sizeof (ref_blocks));
468 zfs_nicenum(dds->dds_ref_lsize, ref_lsize, sizeof (ref_lsize));
469 zfs_nicenum(dds->dds_ref_psize, ref_psize, sizeof (ref_psize));
470 zfs_nicenum(dds->dds_ref_dsize, ref_dsize, sizeof (ref_dsize));
472 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
474 blocks, lsize, psize, dsize,
475 ref_blocks, ref_lsize, ref_psize, ref_dsize);
479 * Print the DDT histogram and the column totals.
482 zpool_dump_ddt(const ddt_stat_t *dds_total, const ddt_histogram_t *ddh)
488 (void) printf("bucket "
491 (void) printf("______ "
492 "______________________________ "
493 "______________________________\n");
495 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
497 "blocks", "LSIZE", "PSIZE", "DSIZE",
498 "blocks", "LSIZE", "PSIZE", "DSIZE");
500 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
502 "------", "-----", "-----", "-----",
503 "------", "-----", "-----", "-----");
505 for (h = 0; h < 64; h++)
506 dump_ddt_stat(&ddh->ddh_stat[h], h);
508 dump_ddt_stat(dds_total, -1);