diff options
Diffstat (limited to 'sys/contrib/openzfs/cmd/zpool/zpool_main.c')
| -rw-r--r-- | sys/contrib/openzfs/cmd/zpool/zpool_main.c | 211 |
1 files changed, 126 insertions, 85 deletions
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_main.c b/sys/contrib/openzfs/cmd/zpool/zpool_main.c index d401e087916b..a6658a9c2800 100644 --- a/sys/contrib/openzfs/cmd/zpool/zpool_main.c +++ b/sys/contrib/openzfs/cmd/zpool/zpool_main.c @@ -33,7 +33,7 @@ * Copyright (c) 2017, Intel Corporation. * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com> * Copyright (c) 2021, Colm Buckley <colm@tuatha.org> - * Copyright (c) 2021, 2023, Klara Inc. + * Copyright (c) 2021, 2023, 2025, Klara, Inc. * Copyright (c) 2021, 2025 Hewlett Packard Enterprise Development LP. */ @@ -456,7 +456,7 @@ get_usage(zpool_help_t idx) "<pool> <vdev> ...\n")); case HELP_ATTACH: return (gettext("\tattach [-fsw] [-o property=value] " - "<pool> <device> <new-device>\n")); + "<pool> <vdev> <new-device>\n")); case HELP_CLEAR: return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n")); case HELP_CREATE: @@ -752,10 +752,11 @@ usage(boolean_t requested) } /* - * zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...] + * zpool initialize [-c | -s | -u] [-w] <-a | pool> [<vdev> ...] * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool * if none specified. * + * -a Use all pools. * -c Cancel. Ends active initializing. * -s Suspend. Initializing can then be restarted with no flags. * -u Uninitialize. Clears initialization state. @@ -776,7 +777,7 @@ zpool_do_initialize(int argc, char **argv) {"suspend", no_argument, NULL, 's'}, {"uninit", no_argument, NULL, 'u'}, {"wait", no_argument, NULL, 'w'}, - {"all", no_argument, NULL, 'a'}, + {"all", no_argument, NULL, 'a'}, {0, 0, 0, 0} }; @@ -5760,24 +5761,6 @@ children: return (ret); } -static int -refresh_iostat(zpool_handle_t *zhp, void *data) -{ - iostat_cbdata_t *cb = data; - boolean_t missing; - - /* - * If the pool has disappeared, remove it from the list and continue. - */ - if (zpool_refresh_stats(zhp, &missing) != 0) - return (-1); - - if (missing) - pool_list_remove(cb->cb_list, zhp); - - return (0); -} - /* * Callback to print out the iostats for the given pool. */ @@ -6358,15 +6341,14 @@ get_namewidth_iostat(zpool_handle_t *zhp, void *data) * This command can be tricky because we want to be able to deal with pool * creation/destruction as well as vdev configuration changes. The bulk of this * processing is handled by the pool_list_* routines in zpool_iter.c. We rely - * on pool_list_update() to detect the addition of new pools. Configuration - * changes are all handled within libzfs. + * on pool_list_refresh() to detect the addition and removal of pools. + * Configuration changes are all handled within libzfs. */ int zpool_do_iostat(int argc, char **argv) { int c; int ret; - int npools; float interval = 0; unsigned long count = 0; zpool_list_t *list; @@ -6617,10 +6599,24 @@ zpool_do_iostat(int argc, char **argv) return (1); } + int last_npools = 0; for (;;) { - if ((npools = pool_list_count(list)) == 0) + /* + * Refresh all pools in list, adding or removing pools as + * necessary. + */ + int npools = pool_list_refresh(list); + if (npools == 0) { (void) fprintf(stderr, gettext("no pools available\n")); - else { + } else { + /* + * If the list of pools has changed since last time + * around, reset the iteration count to force the + * header to be redisplayed. + */ + if (last_npools != npools) + cb.cb_iteration = 0; + /* * If this is the first iteration and -y was supplied * we skip any printing. @@ -6629,15 +6625,6 @@ zpool_do_iostat(int argc, char **argv) cb.cb_iteration == 0); /* - * Refresh all statistics. This is done as an - * explicit step before calculating the maximum name - * width, so that any * configuration changes are - * properly accounted for. - */ - (void) pool_list_iter(list, B_FALSE, refresh_iostat, - &cb); - - /* * Iterate over all pools to determine the maximum width * for the pool / device name column across all pools. */ @@ -6690,6 +6677,7 @@ zpool_do_iostat(int argc, char **argv) if (skip) { (void) fflush(stdout); (void) fsleep(interval); + last_npools = npools; continue; } @@ -6727,6 +6715,8 @@ zpool_do_iostat(int argc, char **argv) (void) fflush(stdout); (void) fsleep(interval); + + last_npools = npools; } pool_list_free(list); @@ -6985,7 +6975,6 @@ collect_vdev_prop(zpool_prop_t prop, uint64_t value, const char *str, /* * print static default line per vdev - * not compatible with '-o' <proplist> option */ static void collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, @@ -7041,48 +7030,98 @@ collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, * 'toplevel' boolean value is passed to the print_one_column() * to indicate that the value is valid. */ - if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace) { - collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL, - scripted, B_TRUE, format, cb->cb_json, props, - cb->cb_json_as_int); - } else { - collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_space, NULL, - scripted, toplevel, format, cb->cb_json, props, - cb->cb_json_as_int); + for (zprop_list_t *pl = cb->cb_proplist; pl != NULL; + pl = pl->pl_next) { + switch (pl->pl_prop) { + case ZPOOL_PROP_SIZE: + if (VDEV_STAT_VALID(vs_pspace, c) && + vs->vs_pspace) { + collect_vdev_prop( + ZPOOL_PROP_SIZE, vs->vs_pspace, + NULL, scripted, B_TRUE, format, + cb->cb_json, props, + cb->cb_json_as_int); + } else { + collect_vdev_prop( + ZPOOL_PROP_SIZE, vs->vs_space, NULL, + scripted, toplevel, format, + cb->cb_json, props, + cb->cb_json_as_int); + } + break; + case ZPOOL_PROP_ALLOCATED: + collect_vdev_prop(ZPOOL_PROP_ALLOCATED, + vs->vs_alloc, NULL, scripted, toplevel, + format, cb->cb_json, props, + cb->cb_json_as_int); + break; + + case ZPOOL_PROP_FREE: + collect_vdev_prop(ZPOOL_PROP_FREE, + vs->vs_space - vs->vs_alloc, NULL, scripted, + toplevel, format, cb->cb_json, props, + cb->cb_json_as_int); + break; + + case ZPOOL_PROP_CHECKPOINT: + collect_vdev_prop(ZPOOL_PROP_CHECKPOINT, + vs->vs_checkpoint_space, NULL, scripted, + toplevel, format, cb->cb_json, props, + cb->cb_json_as_int); + break; + + case ZPOOL_PROP_EXPANDSZ: + collect_vdev_prop(ZPOOL_PROP_EXPANDSZ, + vs->vs_esize, NULL, scripted, B_TRUE, + format, cb->cb_json, props, + cb->cb_json_as_int); + break; + + case ZPOOL_PROP_FRAGMENTATION: + collect_vdev_prop( + ZPOOL_PROP_FRAGMENTATION, + vs->vs_fragmentation, NULL, scripted, + (vs->vs_fragmentation != ZFS_FRAG_INVALID && + toplevel), + format, cb->cb_json, props, + cb->cb_json_as_int); + break; + + case ZPOOL_PROP_CAPACITY: + cap = (vs->vs_space == 0) ? + 0 : (vs->vs_alloc * 10000 / vs->vs_space); + collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap, + NULL, scripted, toplevel, format, + cb->cb_json, props, cb->cb_json_as_int); + break; + + case ZPOOL_PROP_HEALTH: + state = zpool_state_to_name(vs->vs_state, + vs->vs_aux); + if (isspare) { + if (vs->vs_aux == VDEV_AUX_SPARED) + state = "INUSE"; + else if (vs->vs_state == + VDEV_STATE_HEALTHY) + state = "AVAIL"; + } + collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state, + scripted, B_TRUE, format, cb->cb_json, + props, cb->cb_json_as_int); + break; + + case ZPOOL_PROP_NAME: + break; + + default: + collect_vdev_prop(pl->pl_prop, 0, + NULL, scripted, B_FALSE, format, + cb->cb_json, props, cb->cb_json_as_int); + + } + + } - collect_vdev_prop(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL, - scripted, toplevel, format, cb->cb_json, props, - cb->cb_json_as_int); - collect_vdev_prop(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc, - NULL, scripted, toplevel, format, cb->cb_json, props, - cb->cb_json_as_int); - collect_vdev_prop(ZPOOL_PROP_CHECKPOINT, - vs->vs_checkpoint_space, NULL, scripted, toplevel, format, - cb->cb_json, props, cb->cb_json_as_int); - collect_vdev_prop(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL, - scripted, B_TRUE, format, cb->cb_json, props, - cb->cb_json_as_int); - collect_vdev_prop(ZPOOL_PROP_FRAGMENTATION, - vs->vs_fragmentation, NULL, scripted, - (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel), - format, cb->cb_json, props, cb->cb_json_as_int); - cap = (vs->vs_space == 0) ? 0 : - (vs->vs_alloc * 10000 / vs->vs_space); - collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap, NULL, - scripted, toplevel, format, cb->cb_json, props, - cb->cb_json_as_int); - collect_vdev_prop(ZPOOL_PROP_DEDUPRATIO, 0, NULL, - scripted, toplevel, format, cb->cb_json, props, - cb->cb_json_as_int); - state = zpool_state_to_name(vs->vs_state, vs->vs_aux); - if (isspare) { - if (vs->vs_aux == VDEV_AUX_SPARED) - state = "INUSE"; - else if (vs->vs_state == VDEV_STATE_HEALTHY) - state = "AVAIL"; - } - collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state, scripted, - B_TRUE, format, cb->cb_json, props, cb->cb_json_as_int); if (cb->cb_json) { fnvlist_add_nvlist(ent, "properties", props); @@ -7643,7 +7682,7 @@ zpool_do_replace(int argc, char **argv) } /* - * zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device> + * zpool attach [-fsw] [-o property=value] <pool> <vdev> <new_device> * * -f Force attach, even if <new_device> appears to be in use. * -s Use sequential instead of healing reconstruction for resilver. @@ -7651,9 +7690,9 @@ zpool_do_replace(int argc, char **argv) * -w Wait for resilvering (mirror) or expansion (raidz) to complete * before returning. * - * Attach <new_device> to a <device> or <vdev>, where the vdev can be of type - * mirror or raidz. If <device> is not part of a mirror, then <device> will - * be transformed into a mirror of <device> and <new_device>. When a mirror + * Attach <new_device> to a <vdev>, where the vdev can be of type + * device, mirror or raidz. If <vdev> is not part of a mirror, then <vdev> will + * be transformed into a mirror of <vdev> and <new_device>. When a mirror * is involved, <new_device> will begin life with a DTL of [0, now], and will * immediately begin to resilver itself. For the raidz case, a expansion will * commence and reflow the raidz data across all the disks including the @@ -8446,8 +8485,9 @@ date_string_to_sec(const char *timestr, boolean_t rounding) } /* - * zpool scrub [-e | -s | -p | -C | -E | -S] [-w] <pool> ... + * zpool scrub [-e | -s | -p | -C | -E | -S] [-w] [-a | <pool> ...] * + * -a Scrub all pools. * -e Only scrub blocks in the error log. * -E End date of scrub. * -S Start date of scrub. @@ -8621,8 +8661,9 @@ zpool_do_resilver(int argc, char **argv) } /* - * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...] + * zpool trim [-d] [-r <rate>] [-c | -s] <-a | pool> [<device> ...] * + * -a Trim all pools. * -c Cancel. Ends any in-progress trim. * -d Secure trim. Requires kernel and device support. * -r <rate> Sets the TRIM rate in bytes (per second). Supports @@ -12374,7 +12415,7 @@ zpool_do_events_next(ev_opts_t *opts) nvlist_free(nvl); } - VERIFY(0 == close(zevent_fd)); + VERIFY0(close(zevent_fd)); return (ret); } |
