aboutsummaryrefslogtreecommitdiff
path: root/sys/contrib/openzfs/cmd/zpool
diff options
context:
space:
mode:
Diffstat (limited to 'sys/contrib/openzfs/cmd/zpool')
-rw-r--r--sys/contrib/openzfs/cmd/zpool/Makefile.am5
-rw-r--r--sys/contrib/openzfs/cmd/zpool/compatibility.d/openzfs-2.448
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_iter.c118
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_main.c656
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_util.h3
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_vdev.c98
6 files changed, 559 insertions, 369 deletions
diff --git a/sys/contrib/openzfs/cmd/zpool/Makefile.am b/sys/contrib/openzfs/cmd/zpool/Makefile.am
index 2f962408e5a3..5bb6d8160b18 100644
--- a/sys/contrib/openzfs/cmd/zpool/Makefile.am
+++ b/sys/contrib/openzfs/cmd/zpool/Makefile.am
@@ -148,6 +148,7 @@ dist_zpoolcompat_DATA = \
%D%/compatibility.d/openzfs-2.1-linux \
%D%/compatibility.d/openzfs-2.2 \
%D%/compatibility.d/openzfs-2.3 \
+ %D%/compatibility.d/openzfs-2.4 \
%D%/compatibility.d/openzfsonosx-1.7.0 \
%D%/compatibility.d/openzfsonosx-1.8.1 \
%D%/compatibility.d/openzfsonosx-1.9.3 \
@@ -187,7 +188,9 @@ zpoolcompatlinks = \
"openzfs-2.2 openzfs-2.2-linux" \
"openzfs-2.2 openzfs-2.2-freebsd" \
"openzfs-2.3 openzfs-2.3-linux" \
- "openzfs-2.3 openzfs-2.3-freebsd"
+ "openzfs-2.3 openzfs-2.3-freebsd" \
+ "openzfs-2.4 openzfs-2.4-linux" \
+ "openzfs-2.4 openzfs-2.4-freebsd"
zpoolconfdir = $(sysconfdir)/zfs/zpool.d
INSTALL_DATA_HOOKS += zpool-install-data-hook
diff --git a/sys/contrib/openzfs/cmd/zpool/compatibility.d/openzfs-2.4 b/sys/contrib/openzfs/cmd/zpool/compatibility.d/openzfs-2.4
new file mode 100644
index 000000000000..3fbd91014c95
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zpool/compatibility.d/openzfs-2.4
@@ -0,0 +1,48 @@
+# Features supported by OpenZFS 2.4 on Linux and FreeBSD
+allocation_classes
+async_destroy
+blake3
+block_cloning
+block_cloning_endian
+bookmark_v2
+bookmark_written
+bookmarks
+device_rebuild
+device_removal
+draid
+dynamic_gang_header
+edonr
+embedded_data
+empty_bpobj
+enabled_txg
+encryption
+extensible_dataset
+fast_dedup
+filesystem_limits
+head_errlog
+hole_birth
+large_blocks
+large_dnode
+large_microzap
+livelist
+log_spacemap
+longname
+lz4_compress
+multi_vdev_crash_dump
+obsolete_counts
+physical_rewrite
+project_quota
+raidz_expansion
+redacted_datasets
+redaction_bookmarks
+redaction_list_spill
+resilver_defer
+sha512
+skein
+spacemap_histogram
+spacemap_v2
+userobj_accounting
+vdev_zaps_v2
+zilsaxattr
+zpool_checkpoint
+zstd_compress
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_iter.c b/sys/contrib/openzfs/cmd/zpool/zpool_iter.c
index 2eec9a95e24c..fef602736705 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_iter.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_iter.c
@@ -26,6 +26,7 @@
/*
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
+ * Copyright (c) 2025, Klara, Inc.
*/
#include <libintl.h>
@@ -52,7 +53,7 @@
typedef struct zpool_node {
zpool_handle_t *zn_handle;
uu_avl_node_t zn_avlnode;
- int zn_mark;
+ hrtime_t zn_last_refresh;
} zpool_node_t;
struct zpool_list {
@@ -62,6 +63,7 @@ struct zpool_list {
uu_avl_pool_t *zl_pool;
zprop_list_t **zl_proplist;
zfs_type_t zl_type;
+ hrtime_t zl_last_refresh;
};
static int
@@ -81,26 +83,30 @@ zpool_compare(const void *larg, const void *rarg, void *unused)
* of known pools.
*/
static int
-add_pool(zpool_handle_t *zhp, void *data)
+add_pool(zpool_handle_t *zhp, zpool_list_t *zlp)
{
- zpool_list_t *zlp = data;
- zpool_node_t *node = safe_malloc(sizeof (zpool_node_t));
+ zpool_node_t *node, *new = safe_malloc(sizeof (zpool_node_t));
uu_avl_index_t idx;
- node->zn_handle = zhp;
- uu_avl_node_init(node, &node->zn_avlnode, zlp->zl_pool);
- if (uu_avl_find(zlp->zl_avl, node, NULL, &idx) == NULL) {
+ new->zn_handle = zhp;
+ uu_avl_node_init(new, &new->zn_avlnode, zlp->zl_pool);
+
+ node = uu_avl_find(zlp->zl_avl, new, NULL, &idx);
+ if (node == NULL) {
if (zlp->zl_proplist &&
zpool_expand_proplist(zhp, zlp->zl_proplist,
zlp->zl_type, zlp->zl_literal) != 0) {
zpool_close(zhp);
- free(node);
+ free(new);
return (-1);
}
- uu_avl_insert(zlp->zl_avl, node, idx);
+ new->zn_last_refresh = zlp->zl_last_refresh;
+ uu_avl_insert(zlp->zl_avl, new, idx);
} else {
+ zpool_refresh_stats_from_handle(node->zn_handle, zhp);
+ node->zn_last_refresh = zlp->zl_last_refresh;
zpool_close(zhp);
- free(node);
+ free(new);
return (-1);
}
@@ -108,6 +114,18 @@ add_pool(zpool_handle_t *zhp, void *data)
}
/*
+ * add_pool(), but always returns 0. This allows zpool_iter() to continue
+ * even if a pool exists in the tree, or we fail to get the properties for
+ * a new one.
+ */
+static int
+add_pool_cb(zpool_handle_t *zhp, void *data)
+{
+ (void) add_pool(zhp, data);
+ return (0);
+}
+
+/*
* Create a list of pools based on the given arguments. If we're given no
* arguments, then iterate over all pools in the system and add them to the AVL
* tree. Otherwise, add only those pool explicitly specified on the command
@@ -135,9 +153,10 @@ pool_list_get(int argc, char **argv, zprop_list_t **proplist, zfs_type_t type,
zlp->zl_type = type;
zlp->zl_literal = literal;
+ zlp->zl_last_refresh = gethrtime();
if (argc == 0) {
- (void) zpool_iter(g_zfs, add_pool, zlp);
+ (void) zpool_iter(g_zfs, add_pool_cb, zlp);
zlp->zl_findall = B_TRUE;
} else {
int i;
@@ -159,15 +178,61 @@ pool_list_get(int argc, char **argv, zprop_list_t **proplist, zfs_type_t type,
}
/*
- * Search for any new pools, adding them to the list. We only add pools when no
- * options were given on the command line. Otherwise, we keep the list fixed as
- * those that were explicitly specified.
+ * Refresh the state of all pools on the list. Additionally, if no options were
+ * given on the command line, add any new pools and remove any that are no
+ * longer available.
*/
-void
-pool_list_update(zpool_list_t *zlp)
+int
+pool_list_refresh(zpool_list_t *zlp)
{
- if (zlp->zl_findall)
- (void) zpool_iter(g_zfs, add_pool, zlp);
+ zlp->zl_last_refresh = gethrtime();
+
+ if (!zlp->zl_findall) {
+ /*
+ * This list is a fixed list of pools, so we must not add
+ * or remove any. Just walk over them and refresh their
+ * state.
+ */
+ int navail = 0;
+ for (zpool_node_t *node = uu_avl_first(zlp->zl_avl);
+ node != NULL; node = uu_avl_next(zlp->zl_avl, node)) {
+ boolean_t missing;
+ zpool_refresh_stats(node->zn_handle, &missing);
+ navail += !missing;
+ node->zn_last_refresh = zlp->zl_last_refresh;
+ }
+ return (navail);
+ }
+
+ /* Search for any new pools and add them to the list. */
+ (void) zpool_iter(g_zfs, add_pool_cb, zlp);
+
+ /* Walk the list of existing pools, and update or remove them. */
+ zpool_node_t *node, *next;
+ for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next) {
+ next = uu_avl_next(zlp->zl_avl, node);
+
+ /*
+ * Skip any that were refreshed and are online; they were added
+ * by zpool_iter() and are already up to date.
+ */
+ if (node->zn_last_refresh == zlp->zl_last_refresh &&
+ zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL)
+ continue;
+
+ /* Refresh and remove if necessary. */
+ boolean_t missing;
+ zpool_refresh_stats(node->zn_handle, &missing);
+ if (missing) {
+ uu_avl_remove(zlp->zl_avl, node);
+ zpool_close(node->zn_handle);
+ free(node);
+ } else {
+ node->zn_last_refresh = zlp->zl_last_refresh;
+ }
+ }
+
+ return (uu_avl_numnodes(zlp->zl_avl));
}
/*
@@ -191,23 +256,6 @@ pool_list_iter(zpool_list_t *zlp, int unavail, zpool_iter_f func,
}
/*
- * Remove the given pool from the list. When running iostat, we want to remove
- * those pools that no longer exist.
- */
-void
-pool_list_remove(zpool_list_t *zlp, zpool_handle_t *zhp)
-{
- zpool_node_t search, *node;
-
- search.zn_handle = zhp;
- if ((node = uu_avl_find(zlp->zl_avl, &search, NULL, NULL)) != NULL) {
- uu_avl_remove(zlp->zl_avl, node);
- zpool_close(node->zn_handle);
- free(node);
- }
-}
-
-/*
* Free all the handles associated with this list.
*/
void
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_main.c b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
index d401e087916b..b0e05aa2776a 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_main.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
@@ -33,7 +33,7 @@
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
- * Copyright (c) 2021, 2023, Klara Inc.
+ * Copyright (c) 2021, 2023, 2025, Klara, Inc.
* Copyright (c) 2021, 2025 Hewlett Packard Enterprise Development LP.
*/
@@ -43,6 +43,7 @@
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
+#include <inttypes.h>
#include <libgen.h>
#include <libintl.h>
#include <libuutil.h>
@@ -51,6 +52,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <termios.h>
#include <thread_pool.h>
#include <time.h>
#include <unistd.h>
@@ -456,7 +458,7 @@ get_usage(zpool_help_t idx)
"<pool> <vdev> ...\n"));
case HELP_ATTACH:
return (gettext("\tattach [-fsw] [-o property=value] "
- "<pool> <device> <new-device>\n"));
+ "<pool> <vdev> <new-device>\n"));
case HELP_CLEAR:
return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));
case HELP_CREATE:
@@ -494,8 +496,7 @@ get_usage(zpool_help_t idx)
"[--json-int, --json-pool-key-guid]] ...\n"
"\t [-T d|u] [pool] [interval [count]]\n"));
case HELP_PREFETCH:
- return (gettext("\tprefetch -t <type> [<type opts>] <pool>\n"
- "\t -t ddt <pool>\n"));
+ return (gettext("\tprefetch [-t <type>] <pool>\n"));
case HELP_OFFLINE:
return (gettext("\toffline [--power]|[[-f][-t]] <pool> "
"<device> ...\n"));
@@ -635,7 +636,7 @@ zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev)
if (rc != 0)
return (rc);
- zpool_disk_wait(vdev_name_to_path(zhp, vdev));
+ (void) zpool_disk_wait(vdev_name_to_path(zhp, vdev));
return (0);
}
@@ -665,7 +666,7 @@ zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp)
*/
FOR_EACH_REAL_LEAF_VDEV(zhp, nv) {
path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
- zpool_disk_wait(path);
+ (void) zpool_disk_wait(path);
}
return (0);
@@ -752,10 +753,11 @@ usage(boolean_t requested)
}
/*
- * zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...]
+ * zpool initialize [-c | -s | -u] [-w] <-a | pool> [<vdev> ...]
* Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
* if none specified.
*
+ * -a Use all pools.
* -c Cancel. Ends active initializing.
* -s Suspend. Initializing can then be restarted with no flags.
* -u Uninitialize. Clears initialization state.
@@ -776,7 +778,7 @@ zpool_do_initialize(int argc, char **argv)
{"suspend", no_argument, NULL, 's'},
{"uninit", no_argument, NULL, 'u'},
{"wait", no_argument, NULL, 'w'},
- {"all", no_argument, NULL, 'a'},
+ {"all", no_argument, NULL, 'a'},
{0, 0, 0, 0}
};
@@ -847,7 +849,6 @@ zpool_do_initialize(int argc, char **argv)
if (argc < 1 && !initialize_all) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
- return (-1);
}
if (wait && (cmd_type != POOL_INITIALIZE_START)) {
@@ -1038,9 +1039,10 @@ nice_num_str_nvlist(nvlist_t *item, const char *key, uint64_t value,
boolean_t literal, boolean_t as_int, int format)
{
char buf[256];
+
if (literal) {
if (!as_int)
- snprintf(buf, 256, "%llu", (u_longlong_t)value);
+ (void) snprintf(buf, 256, "%llu", (u_longlong_t)value);
} else {
switch (format) {
case ZFS_NICENUM_1024:
@@ -1086,7 +1088,7 @@ zpool_json_schema(int maj_v, int min_v)
nvlist_t *sch = fnvlist_alloc();
nvlist_t *ov = fnvlist_alloc();
- snprintf(cmd, MAX_CMD_LEN, "zpool %s", current_command->name);
+ (void) snprintf(cmd, MAX_CMD_LEN, "zpool %s", current_command->name);
fnvlist_add_string(ov, "command", cmd);
fnvlist_add_uint32(ov, "vers_major", maj_v);
fnvlist_add_uint32(ov, "vers_minor", min_v);
@@ -1117,12 +1119,12 @@ fill_pool_info(nvlist_t *list, zpool_handle_t *zhp, boolean_t addtype,
} else {
char value[ZFS_MAXPROPLEN];
if (guid) {
- snprintf(value, ZFS_MAXPROPLEN, "%llu",
+ (void) snprintf(value, ZFS_MAXPROPLEN, "%llu",
(u_longlong_t)guid);
fnvlist_add_string(list, ZPOOL_CONFIG_POOL_GUID, value);
}
if (txg) {
- snprintf(value, ZFS_MAXPROPLEN, "%llu",
+ (void) snprintf(value, ZFS_MAXPROPLEN, "%llu",
(u_longlong_t)txg);
fnvlist_add_string(list, ZPOOL_CONFIG_POOL_TXG, value);
}
@@ -1181,7 +1183,7 @@ fill_vdev_info(nvlist_t *list, zpool_handle_t *zhp, char *name,
fnvlist_add_uint64(list, "guid", guid);
} else {
char buf[ZFS_MAXPROPLEN];
- snprintf(buf, ZFS_MAXPROPLEN, "%llu",
+ (void) snprintf(buf, ZFS_MAXPROPLEN, "%llu",
(u_longlong_t)guid);
fnvlist_add_string(list, "guid", buf);
}
@@ -1763,7 +1765,7 @@ vdev_is_active(char *vdev_path)
return (1); /* cant open O_EXCL - disk is active */
}
- close(fd);
+ (void) close(fd);
return (0); /* disk is inactive in the pool */
}
@@ -2414,12 +2416,12 @@ zpool_export_one(zpool_handle_t *zhp, void *data)
* So we serialize access here for 'zpool export -a' parallel case.
*/
if (cb->tpool != NULL)
- pthread_mutex_lock(&cb->mnttab_lock);
+ (void) pthread_mutex_lock(&cb->mnttab_lock);
int retval = zpool_disable_datasets(zhp, cb->force);
if (cb->tpool != NULL)
- pthread_mutex_unlock(&cb->mnttab_lock);
+ (void) pthread_mutex_unlock(&cb->mnttab_lock);
if (retval)
return (1);
@@ -2532,7 +2534,7 @@ zpool_do_export(int argc, char **argv)
cb.tpool = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),
0, NULL);
- pthread_mutex_init(&cb.mnttab_lock, NULL);
+ (void) pthread_mutex_init(&cb.mnttab_lock, NULL);
/* Asynchronously call zpool_export_one using thread pool */
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
@@ -2650,7 +2652,7 @@ zpool_nvlist_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path,
for (j = data->cols_cnt; j < data->lines_cnt; j++) {
if (data->lines[j]) {
- snprintf(tmp, 256, "extra_%d", k++);
+ (void) snprintf(tmp, 256, "extra_%d", k++);
fnvlist_add_string(item, tmp,
data->lines[j]);
}
@@ -2696,17 +2698,17 @@ zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path)
printf("%*s", vcdl->uniq_cols_width[j], val);
if (j < vcdl->uniq_cols_cnt - 1)
- fputs(" ", stdout);
+ (void) fputs(" ", stdout);
}
/* Print out any values that aren't in a column at the end */
for (j = data->cols_cnt; j < data->lines_cnt; j++) {
/* Did we have any columns? If so print a spacer. */
if (vcdl->uniq_cols_cnt > 0)
- fputs(" ", stdout);
+ (void) fputs(" ", stdout);
val = data->lines[j];
- fputs(val ?: "", stdout);
+ (void) fputs(val ?: "", stdout);
}
break;
}
@@ -2919,7 +2921,7 @@ print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
return;
}
- printf_color(health_str_to_color(state),
+ (void) printf_color(health_str_to_color(state),
"\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
name, state);
@@ -2937,26 +2939,26 @@ print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
scolor = ANSI_BLUE;
if (cb->cb_literal) {
- fputc(' ', stdout);
- printf_color(rcolor, "%5llu",
+ (void) fputc(' ', stdout);
+ (void) printf_color(rcolor, "%5llu",
(u_longlong_t)vs->vs_read_errors);
- fputc(' ', stdout);
- printf_color(wcolor, "%5llu",
+ (void) fputc(' ', stdout);
+ (void) printf_color(wcolor, "%5llu",
(u_longlong_t)vs->vs_write_errors);
- fputc(' ', stdout);
- printf_color(ccolor, "%5llu",
+ (void) fputc(' ', stdout);
+ (void) printf_color(ccolor, "%5llu",
(u_longlong_t)vs->vs_checksum_errors);
} else {
zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
zfs_nicenum(vs->vs_checksum_errors, cbuf,
sizeof (cbuf));
- fputc(' ', stdout);
- printf_color(rcolor, "%5s", rbuf);
- fputc(' ', stdout);
- printf_color(wcolor, "%5s", wbuf);
- fputc(' ', stdout);
- printf_color(ccolor, "%5s", cbuf);
+ (void) fputc(' ', stdout);
+ (void) printf_color(rcolor, "%5s", rbuf);
+ (void) fputc(' ', stdout);
+ (void) printf_color(wcolor, "%5s", wbuf);
+ (void) fputc(' ', stdout);
+ (void) printf_color(ccolor, "%5s", cbuf);
}
if (cb->cb_print_slow_ios) {
if (children == 0) {
@@ -2964,14 +2966,14 @@ print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
zfs_nicenum(vs->vs_slow_ios, rbuf,
sizeof (rbuf));
} else {
- snprintf(rbuf, sizeof (rbuf), "-");
+ (void) snprintf(rbuf, sizeof (rbuf), "-");
}
if (cb->cb_literal)
- printf_color(scolor, " %5llu",
+ (void) printf_color(scolor, " %5llu",
(u_longlong_t)vs->vs_slow_ios);
else
- printf_color(scolor, " %5s", rbuf);
+ (void) printf_color(scolor, " %5s", rbuf);
}
if (cb->cb_print_power) {
if (children == 0) {
@@ -2980,7 +2982,7 @@ print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
fnvlist_lookup_string(nv,
ZPOOL_CONFIG_PATH))) {
case 0:
- printf_color(ANSI_RED, " %5s",
+ (void) printf_color(ANSI_RED, " %5s",
gettext("off"));
break;
case 1:
@@ -3426,70 +3428,70 @@ show_import(nvlist_t *config, boolean_t report_error)
if (reason != ZPOOL_STATUS_OK) {
(void) printf("%s", indent);
- printf_color(ANSI_BOLD, gettext("status: "));
+ (void) printf_color(ANSI_BOLD, gettext("status: "));
}
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
case ZPOOL_STATUS_MISSING_DEV_NR:
case ZPOOL_STATUS_BAD_GUID_SUM:
- printf_color(ANSI_YELLOW, gettext("One or more devices are "
- "missing from the system.\n"));
+ (void) printf_color(ANSI_YELLOW, gettext("One or more devices "
+ "are missing from the system.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
- printf_color(ANSI_YELLOW, gettext("One or more devices "
+ (void) printf_color(ANSI_YELLOW, gettext("One or more devices "
"contains corrupted data.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
- printf_color(ANSI_YELLOW, gettext("The pool data is "
+ (void) printf_color(ANSI_YELLOW, gettext("The pool data is "
"corrupted.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
- printf_color(ANSI_YELLOW, gettext("One or more devices "
+ (void) printf_color(ANSI_YELLOW, gettext("One or more devices "
"are offlined.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
- printf_color(ANSI_YELLOW, gettext("The pool metadata is "
+ (void) printf_color(ANSI_YELLOW, gettext("The pool metadata is "
"corrupted.\n"));
break;
case ZPOOL_STATUS_VERSION_OLDER:
- printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
- "a legacy on-disk version.\n"));
+ (void) printf_color(ANSI_YELLOW, gettext("The pool is "
+ "formatted using a legacy on-disk version.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
- printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
- "an incompatible version.\n"));
+ (void) printf_color(ANSI_YELLOW, gettext("The pool is "
+ "formatted using an incompatible version.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
- printf_color(ANSI_YELLOW, gettext("Some supported "
+ (void) printf_color(ANSI_YELLOW, gettext("Some supported "
"features are not enabled on the pool.\n"
"\t%s(Note that they may be intentionally disabled if the\n"
"\t%s'compatibility' property is set.)\n"), indent, indent);
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
- printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
- "the file(s) indicated by the 'compatibility'\n"
+ (void) printf_color(ANSI_YELLOW, gettext("Error reading or "
+ "parsing the file(s) indicated by the 'compatibility'\n"
"\t%sproperty.\n"), indent);
break;
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
- printf_color(ANSI_YELLOW, gettext("One or more features "
+ (void) printf_color(ANSI_YELLOW, gettext("One or more features "
"are enabled on the pool despite not being\n"
"\t%srequested by the 'compatibility' property.\n"),
indent);
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
- printf_color(ANSI_YELLOW, gettext("The pool uses the following "
- "feature(s) not supported on this system:\n"));
+ (void) printf_color(ANSI_YELLOW, gettext("The pool uses the "
+ "following feature(s) not supported on this system:\n"));
color_start(ANSI_YELLOW);
zpool_collect_unsup_feat(config, buf, 2048);
(void) printf("%s", buf);
@@ -3497,7 +3499,7 @@ show_import(nvlist_t *config, boolean_t report_error)
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
- printf_color(ANSI_YELLOW, gettext("The pool can only be "
+ (void) printf_color(ANSI_YELLOW, gettext("The pool can only be "
"accessed in read-only mode on this system. It\n"
"\t%scannot be accessed in read-write mode because it uses "
"the following\n"
@@ -3510,47 +3512,48 @@ show_import(nvlist_t *config, boolean_t report_error)
break;
case ZPOOL_STATUS_HOSTID_ACTIVE:
- printf_color(ANSI_YELLOW, gettext("The pool is currently "
- "imported by another system.\n"));
+ (void) printf_color(ANSI_YELLOW, gettext("The pool is "
+ "currently imported by another system.\n"));
break;
case ZPOOL_STATUS_HOSTID_REQUIRED:
- printf_color(ANSI_YELLOW, gettext("The pool has the "
+ (void) printf_color(ANSI_YELLOW, gettext("The pool has the "
"multihost property on. It cannot\n"
"\t%sbe safely imported when the system hostid is not "
"set.\n"), indent);
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
- printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
- "by another system.\n"));
+ (void) printf_color(ANSI_YELLOW, gettext("The pool was last "
+ "accessed by another system.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
case ZPOOL_STATUS_FAULTED_DEV_NR:
- printf_color(ANSI_YELLOW, gettext("One or more devices are "
- "faulted.\n"));
+ (void) printf_color(ANSI_YELLOW, gettext("One or more devices "
+ "are faulted.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
- printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
- "be read.\n"));
+ (void) printf_color(ANSI_YELLOW, gettext("An intent log record "
+ "cannot be read.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
case ZPOOL_STATUS_REBUILDING:
- printf_color(ANSI_YELLOW, gettext("One or more devices were "
- "being resilvered.\n"));
+ (void) printf_color(ANSI_YELLOW, gettext("One or more devices "
+ "were being resilvered.\n"));
break;
case ZPOOL_STATUS_ERRATA:
- printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
+ (void) printf_color(ANSI_YELLOW,
+ gettext("Errata #%d detected.\n"),
errata);
break;
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
- printf_color(ANSI_YELLOW, gettext("One or more devices are "
- "configured to use a non-native block size.\n"
+ (void) printf_color(ANSI_YELLOW, gettext("One or more devices "
+ "are configured to use a non-native block size.\n"
"\t%sExpect reduced performance.\n"), indent);
break;
@@ -4199,7 +4202,7 @@ zpool_do_checkpoint(int argc, char **argv)
#define CHECKPOINT_OPT 1024
/*
- * zpool prefetch <type> [<type opts>] <pool>
+ * zpool prefetch [-t <type>] <pool>
*
* Prefetchs a particular type of data in the specified pool.
*/
@@ -4244,20 +4247,27 @@ zpool_do_prefetch(int argc, char **argv)
poolname = argv[0];
- argc--;
- argv++;
-
- if (strcmp(typestr, "ddt") == 0) {
- type = ZPOOL_PREFETCH_DDT;
- } else {
- (void) fprintf(stderr, gettext("unsupported prefetch type\n"));
- usage(B_FALSE);
- }
-
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
- err = zpool_prefetch(zhp, type);
+ if (typestr == NULL) {
+ /* Prefetch all types */
+ err = zpool_prefetch(zhp, ZPOOL_PREFETCH_DDT);
+ if (err == 0)
+ err = zpool_prefetch(zhp, ZPOOL_PREFETCH_BRT);
+ } else {
+ if (strcmp(typestr, "ddt") == 0) {
+ type = ZPOOL_PREFETCH_DDT;
+ } else if (strcmp(typestr, "brt") == 0) {
+ type = ZPOOL_PREFETCH_BRT;
+ } else {
+ (void) fprintf(stderr,
+ gettext("unsupported prefetch type\n"));
+ zpool_close(zhp);
+ usage(B_FALSE);
+ }
+ err = zpool_prefetch(zhp, type);
+ }
zpool_close(zhp);
@@ -4915,7 +4925,8 @@ print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
printf("-");
} else {
- printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
+ (void) printf_color(ANSI_BOLD, "%*s",
+ vcdl->uniq_cols_width[i],
vcdl->uniq_cols[i]);
}
}
@@ -5066,15 +5077,15 @@ print_iostat_header(iostat_cbdata_t *cb)
static void
print_stat_color(const char *statbuf, unsigned int column_size)
{
- fputs(" ", stdout);
+ (void) fputs(" ", stdout);
size_t len = strlen(statbuf);
while (len < column_size) {
- fputc(' ', stdout);
+ (void) fputc(' ', stdout);
column_size--;
}
if (*statbuf == '0') {
color_start(ANSI_GRAY);
- fputc('0', stdout);
+ (void) fputc('0', stdout);
} else {
for (; *statbuf; statbuf++) {
if (*statbuf == 'K') color_start(ANSI_GREEN);
@@ -5083,7 +5094,7 @@ print_stat_color(const char *statbuf, unsigned int column_size)
else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE);
else if (*statbuf == 'P') color_start(ANSI_MAGENTA);
else if (*statbuf == 'E') color_start(ANSI_CYAN);
- fputc(*statbuf, stdout);
+ (void) fputc(*statbuf, stdout);
if (--column_size <= 0)
break;
}
@@ -5760,24 +5771,6 @@ children:
return (ret);
}
-static int
-refresh_iostat(zpool_handle_t *zhp, void *data)
-{
- iostat_cbdata_t *cb = data;
- boolean_t missing;
-
- /*
- * If the pool has disappeared, remove it from the list and continue.
- */
- if (zpool_refresh_stats(zhp, &missing) != 0)
- return (-1);
-
- if (missing)
- pool_list_remove(cb->cb_list, zhp);
-
- return (0);
-}
-
/*
* Callback to print out the iostats for the given pool.
*/
@@ -6009,7 +6002,7 @@ get_stat_flags(zpool_list_t *list)
* get_stat_flags_cb() will lop off bits from "mask" until only the
* flags that are supported on all pools remain.
*/
- pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
+ (void) pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
return (mask);
}
@@ -6258,7 +6251,7 @@ print_zpool_dir_scripts(char *dirpath)
print_zpool_script_help(ent->d_name,
fullpath);
}
- closedir(dir);
+ (void) closedir(dir);
}
}
@@ -6358,15 +6351,14 @@ get_namewidth_iostat(zpool_handle_t *zhp, void *data)
* This command can be tricky because we want to be able to deal with pool
* creation/destruction as well as vdev configuration changes. The bulk of this
* processing is handled by the pool_list_* routines in zpool_iter.c. We rely
- * on pool_list_update() to detect the addition of new pools. Configuration
- * changes are all handled within libzfs.
+ * on pool_list_refresh() to detect the addition and removal of pools.
+ * Configuration changes are all handled within libzfs.
*/
int
zpool_do_iostat(int argc, char **argv)
{
int c;
int ret;
- int npools;
float interval = 0;
unsigned long count = 0;
zpool_list_t *list;
@@ -6519,7 +6511,6 @@ zpool_do_iostat(int argc, char **argv)
argv[0], &cb.cb_vdevs);
fprintf(stderr, "\n");
usage(B_FALSE);
- return (1);
}
} else {
/*
@@ -6562,7 +6553,6 @@ zpool_do_iostat(int argc, char **argv)
(void) fprintf(stderr,
gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
usage(B_FALSE);
- return (1);
}
if (l_histo && rq_histo) {
@@ -6570,7 +6560,6 @@ zpool_do_iostat(int argc, char **argv)
(void) fprintf(stderr,
gettext("Only one of [-r|-w] can be passed at a time\n"));
usage(B_FALSE);
- return (1);
}
/*
@@ -6617,10 +6606,24 @@ zpool_do_iostat(int argc, char **argv)
return (1);
}
+ int last_npools = 0;
for (;;) {
- if ((npools = pool_list_count(list)) == 0)
+ /*
+ * Refresh all pools in list, adding or removing pools as
+ * necessary.
+ */
+ int npools = pool_list_refresh(list);
+ if (npools == 0) {
(void) fprintf(stderr, gettext("no pools available\n"));
- else {
+ } else {
+ /*
+ * If the list of pools has changed since last time
+ * around, reset the iteration count to force the
+ * header to be redisplayed.
+ */
+ if (last_npools != npools)
+ cb.cb_iteration = 0;
+
/*
* If this is the first iteration and -y was supplied
* we skip any printing.
@@ -6629,15 +6632,6 @@ zpool_do_iostat(int argc, char **argv)
cb.cb_iteration == 0);
/*
- * Refresh all statistics. This is done as an
- * explicit step before calculating the maximum name
- * width, so that any * configuration changes are
- * properly accounted for.
- */
- (void) pool_list_iter(list, B_FALSE, refresh_iostat,
- &cb);
-
- /*
* Iterate over all pools to determine the maximum width
* for the pool / device name column across all pools.
*/
@@ -6690,10 +6684,11 @@ zpool_do_iostat(int argc, char **argv)
if (skip) {
(void) fflush(stdout);
(void) fsleep(interval);
+ last_npools = npools;
continue;
}
- pool_list_iter(list, B_FALSE, print_iostat, &cb);
+ (void) pool_list_iter(list, B_FALSE, print_iostat, &cb);
/*
* If there's more than one pool, and we're not in
@@ -6727,6 +6722,8 @@ zpool_do_iostat(int argc, char **argv)
(void) fflush(stdout);
(void) fsleep(interval);
+
+ last_npools = npools;
}
pool_list_free(list);
@@ -6906,7 +6903,7 @@ collect_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
uint64_t guid = fnvlist_lookup_uint64(
zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_POOL_GUID);
- snprintf(pool_guid, 256, "%llu",
+ (void) snprintf(pool_guid, 256, "%llu",
(u_longlong_t)guid);
fnvlist_add_nvlist(d, pool_guid, item);
} else {
@@ -6973,8 +6970,8 @@ collect_vdev_prop(zpool_prop_t prop, uint64_t value, const char *str,
(void) strlcpy(propval, "-", sizeof (propval));
if (json) {
- zprop_nvlist_one_property(zpool_prop_to_name(prop), propval,
- ZPROP_SRC_NONE, NULL, NULL, nvl, as_int);
+ (void) zprop_nvlist_one_property(zpool_prop_to_name(prop),
+ propval, ZPROP_SRC_NONE, NULL, NULL, nvl, as_int);
} else {
if (scripted)
(void) printf("\t%s", propval);
@@ -6985,7 +6982,6 @@ collect_vdev_prop(zpool_prop_t prop, uint64_t value, const char *str,
/*
* print static default line per vdev
- * not compatible with '-o' <proplist> option
*/
static void
collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
@@ -7041,48 +7037,98 @@ collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
* 'toplevel' boolean value is passed to the print_one_column()
* to indicate that the value is valid.
*/
- if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace) {
- collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL,
- scripted, B_TRUE, format, cb->cb_json, props,
- cb->cb_json_as_int);
- } else {
- collect_vdev_prop(ZPOOL_PROP_SIZE, vs->vs_space, NULL,
- scripted, toplevel, format, cb->cb_json, props,
- cb->cb_json_as_int);
+ for (zprop_list_t *pl = cb->cb_proplist; pl != NULL;
+ pl = pl->pl_next) {
+ switch (pl->pl_prop) {
+ case ZPOOL_PROP_SIZE:
+ if (VDEV_STAT_VALID(vs_pspace, c) &&
+ vs->vs_pspace) {
+ collect_vdev_prop(
+ ZPOOL_PROP_SIZE, vs->vs_pspace,
+ NULL, scripted, B_TRUE, format,
+ cb->cb_json, props,
+ cb->cb_json_as_int);
+ } else {
+ collect_vdev_prop(
+ ZPOOL_PROP_SIZE, vs->vs_space, NULL,
+ scripted, toplevel, format,
+ cb->cb_json, props,
+ cb->cb_json_as_int);
+ }
+ break;
+ case ZPOOL_PROP_ALLOCATED:
+ collect_vdev_prop(ZPOOL_PROP_ALLOCATED,
+ vs->vs_alloc, NULL, scripted, toplevel,
+ format, cb->cb_json, props,
+ cb->cb_json_as_int);
+ break;
+
+ case ZPOOL_PROP_FREE:
+ collect_vdev_prop(ZPOOL_PROP_FREE,
+ vs->vs_space - vs->vs_alloc, NULL, scripted,
+ toplevel, format, cb->cb_json, props,
+ cb->cb_json_as_int);
+ break;
+
+ case ZPOOL_PROP_CHECKPOINT:
+ collect_vdev_prop(ZPOOL_PROP_CHECKPOINT,
+ vs->vs_checkpoint_space, NULL, scripted,
+ toplevel, format, cb->cb_json, props,
+ cb->cb_json_as_int);
+ break;
+
+ case ZPOOL_PROP_EXPANDSZ:
+ collect_vdev_prop(ZPOOL_PROP_EXPANDSZ,
+ vs->vs_esize, NULL, scripted, B_TRUE,
+ format, cb->cb_json, props,
+ cb->cb_json_as_int);
+ break;
+
+ case ZPOOL_PROP_FRAGMENTATION:
+ collect_vdev_prop(
+ ZPOOL_PROP_FRAGMENTATION,
+ vs->vs_fragmentation, NULL, scripted,
+ (vs->vs_fragmentation != ZFS_FRAG_INVALID &&
+ toplevel),
+ format, cb->cb_json, props,
+ cb->cb_json_as_int);
+ break;
+
+ case ZPOOL_PROP_CAPACITY:
+ cap = (vs->vs_space == 0) ?
+ 0 : (vs->vs_alloc * 10000 / vs->vs_space);
+ collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap,
+ NULL, scripted, toplevel, format,
+ cb->cb_json, props, cb->cb_json_as_int);
+ break;
+
+ case ZPOOL_PROP_HEALTH:
+ state = zpool_state_to_name(vs->vs_state,
+ vs->vs_aux);
+ if (isspare) {
+ if (vs->vs_aux == VDEV_AUX_SPARED)
+ state = "INUSE";
+ else if (vs->vs_state ==
+ VDEV_STATE_HEALTHY)
+ state = "AVAIL";
+ }
+ collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state,
+ scripted, B_TRUE, format, cb->cb_json,
+ props, cb->cb_json_as_int);
+ break;
+
+ case ZPOOL_PROP_NAME:
+ break;
+
+ default:
+ collect_vdev_prop(pl->pl_prop, 0,
+ NULL, scripted, B_FALSE, format,
+ cb->cb_json, props, cb->cb_json_as_int);
+
+ }
+
+
}
- collect_vdev_prop(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
- scripted, toplevel, format, cb->cb_json, props,
- cb->cb_json_as_int);
- collect_vdev_prop(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
- NULL, scripted, toplevel, format, cb->cb_json, props,
- cb->cb_json_as_int);
- collect_vdev_prop(ZPOOL_PROP_CHECKPOINT,
- vs->vs_checkpoint_space, NULL, scripted, toplevel, format,
- cb->cb_json, props, cb->cb_json_as_int);
- collect_vdev_prop(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
- scripted, B_TRUE, format, cb->cb_json, props,
- cb->cb_json_as_int);
- collect_vdev_prop(ZPOOL_PROP_FRAGMENTATION,
- vs->vs_fragmentation, NULL, scripted,
- (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
- format, cb->cb_json, props, cb->cb_json_as_int);
- cap = (vs->vs_space == 0) ? 0 :
- (vs->vs_alloc * 10000 / vs->vs_space);
- collect_vdev_prop(ZPOOL_PROP_CAPACITY, cap, NULL,
- scripted, toplevel, format, cb->cb_json, props,
- cb->cb_json_as_int);
- collect_vdev_prop(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
- scripted, toplevel, format, cb->cb_json, props,
- cb->cb_json_as_int);
- state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
- if (isspare) {
- if (vs->vs_aux == VDEV_AUX_SPARED)
- state = "INUSE";
- else if (vs->vs_state == VDEV_STATE_HEALTHY)
- state = "AVAIL";
- }
- collect_vdev_prop(ZPOOL_PROP_HEALTH, 0, state, scripted,
- B_TRUE, format, cb->cb_json, props, cb->cb_json_as_int);
if (cb->cb_json) {
fnvlist_add_nvlist(ent, "properties", props);
@@ -7256,7 +7302,7 @@ list_callback(zpool_handle_t *zhp, void *data)
if (cbp->cb_json_pool_key_guid) {
guid = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID);
- snprintf(pool_guid, 256, "%llu",
+ (void) snprintf(pool_guid, 256, "%llu",
(u_longlong_t)guid);
p = fnvlist_lookup_nvlist(d, pool_guid);
} else {
@@ -7643,7 +7689,7 @@ zpool_do_replace(int argc, char **argv)
}
/*
- * zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device>
+ * zpool attach [-fsw] [-o property=value] <pool> <vdev> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -s Use sequential instead of healing reconstruction for resilver.
@@ -7651,9 +7697,9 @@ zpool_do_replace(int argc, char **argv)
* -w Wait for resilvering (mirror) or expansion (raidz) to complete
* before returning.
*
- * Attach <new_device> to a <device> or <vdev>, where the vdev can be of type
- * mirror or raidz. If <device> is not part of a mirror, then <device> will
- * be transformed into a mirror of <device> and <new_device>. When a mirror
+ * Attach <new_device> to a <vdev>, where the vdev can be of type
+ * device, mirror or raidz. If <vdev> is not part of a mirror, then <vdev> will
+ * be transformed into a mirror of <vdev> and <new_device>. When a mirror
* is involved, <new_device> will begin life with a DTL of [0, now], and will
* immediately begin to resilver itself. For the raidz case, a expansion will
* commence and reflow the raidz data across all the disks including the
@@ -8085,14 +8131,12 @@ zpool_do_offline(int argc, char **argv)
(void) fprintf(stderr,
gettext("-0 and -f cannot be used together\n"));
usage(B_FALSE);
- return (1);
}
if (is_power_off && istmp) {
(void) fprintf(stderr,
gettext("-0 and -t cannot be used together\n"));
usage(B_FALSE);
- return (1);
}
argc -= optind;
@@ -8130,7 +8174,8 @@ zpool_do_offline(int argc, char **argv)
gettext("unable to power off slot for"),
argv[i], ret);
}
- zpool_vdev_set_removed_state(zhp, guid, VDEV_AUX_NONE);
+ (void) zpool_vdev_set_removed_state(zhp, guid,
+ VDEV_AUX_NONE);
} else if (fault) {
vdev_aux_t aux;
@@ -8246,9 +8291,9 @@ zpool_do_clear(int argc, char **argv)
if (is_power_on) {
if (device == NULL) {
- zpool_power_on_pool_and_wait_for_devices(zhp);
+ (void) zpool_power_on_pool_and_wait_for_devices(zhp);
} else {
- zpool_power_on_and_disk_wait(zhp, device);
+ (void) zpool_power_on_and_disk_wait(zhp, device);
}
}
@@ -8446,8 +8491,9 @@ date_string_to_sec(const char *timestr, boolean_t rounding)
}
/*
- * zpool scrub [-e | -s | -p | -C | -E | -S] [-w] <pool> ...
+ * zpool scrub [-e | -s | -p | -C | -E | -S] [-w] [-a | <pool> ...]
*
+ * -a Scrub all pools.
* -e Only scrub blocks in the error log.
* -E End date of scrub.
* -S Start date of scrub.
@@ -8621,8 +8667,9 @@ zpool_do_resilver(int argc, char **argv)
}
/*
- * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
+ * zpool trim [-d] [-r <rate>] [-c | -s] <-a | pool> [<device> ...]
*
+ * -a Trim all pools.
* -c Cancel. Ends any in-progress trim.
* -d Secure trim. Requires kernel and device support.
* -r <rate> Sets the TRIM rate in bytes (per second). Supports
@@ -8729,7 +8776,6 @@ zpool_do_trim(int argc, char **argv)
if (argc < 1 && !trimall) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
- return (-1);
}
if (wait && (cmd_type != POOL_TRIM_START)) {
@@ -8878,7 +8924,7 @@ print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32];
printf(" ");
- printf_color(ANSI_BOLD, gettext("scan:"));
+ (void) printf_color(ANSI_BOLD, gettext("scan:"));
printf(" ");
/* If there's never been a scan, there's not much to say. */
@@ -9018,7 +9064,7 @@ print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name)
return;
printf(" ");
- printf_color(ANSI_BOLD, gettext("scan:"));
+ (void) printf_color(ANSI_BOLD, gettext("scan:"));
printf(" ");
uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
@@ -9721,7 +9767,7 @@ dedup_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *item)
entry = fnvlist_alloc();
ddt_stats_nvlist(&ddh->ddh_stat[h], cb,
entry);
- snprintf(buf, 16, "%d", h);
+ (void) snprintf(buf, 16, "%d", h);
fnvlist_add_nvlist(hist, buf, entry);
fnvlist_free(entry);
}
@@ -10100,7 +10146,7 @@ print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
vdev_name = zpool_vdev_name(g_zfs, zhp,
child[prs->prs_removing_vdev], B_TRUE);
- printf_color(ANSI_BOLD, gettext("remove: "));
+ (void) printf_color(ANSI_BOLD, gettext("remove: "));
start = prs->prs_start_time;
end = prs->prs_end_time;
@@ -10200,7 +10246,7 @@ print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres)
&child, &children) == 0);
assert(pres->pres_expanding_vdev < children);
- printf_color(ANSI_BOLD, gettext("expand: "));
+ (void) printf_color(ANSI_BOLD, gettext("expand: "));
time_t start = pres->pres_start_time;
time_t end = pres->pres_end_time;
@@ -10444,32 +10490,38 @@ print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
- snprintf(status, ST_SIZE, gettext("One or more devices could "
+ (void) snprintf(status, ST_SIZE,
+ gettext("One or more devices could "
"not be opened. Sufficient replicas exist for\n\tthe pool "
"to continue functioning in a degraded state.\n"));
- snprintf(action, AC_SIZE, gettext("Attach the missing device "
+ (void) snprintf(action, AC_SIZE,
+ gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_MISSING_DEV_NR:
- snprintf(status, ST_SIZE, gettext("One or more devices could "
+ (void) snprintf(status, ST_SIZE,
+ gettext("One or more devices could "
"not be opened. There are insufficient\n\treplicas for the"
" pool to continue functioning.\n"));
- snprintf(action, AC_SIZE, gettext("Attach the missing device "
+ (void) snprintf(action, AC_SIZE,
+ gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
- snprintf(status, ST_SIZE, gettext("One or more devices could "
+ (void) snprintf(status, ST_SIZE,
+ gettext("One or more devices could "
"not be used because the label is missing or\n\tinvalid. "
"Sufficient replicas exist for the pool to continue\n\t"
"functioning in a degraded state.\n"));
- snprintf(action, AC_SIZE, gettext("Replace the device using "
- "'zpool replace'.\n"));
+ (void) snprintf(action, AC_SIZE,
+ gettext("Replace the device using 'zpool replace'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
- snprintf(status, ST_SIZE, gettext("One or more devices could "
+ (void) snprintf(status, ST_SIZE,
+ gettext("One or more devices could "
"not be used because the label is missing \n\tor invalid. "
"There are insufficient replicas for the pool to "
"continue\n\tfunctioning.\n"));
@@ -10479,63 +10531,70 @@ print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
break;
case ZPOOL_STATUS_FAILING_DEV:
- snprintf(status, ST_SIZE, gettext("One or more devices has "
+ (void) snprintf(status, ST_SIZE,
+ gettext("One or more devices has "
"experienced an unrecoverable error. An\n\tattempt was "
"made to correct the error. Applications are "
"unaffected.\n"));
- snprintf(action, AC_SIZE, gettext("Determine if the "
+ (void) snprintf(action, AC_SIZE, gettext("Determine if the "
"device needs to be replaced, and clear the errors\n\tusing"
" 'zpool clear' or replace the device with 'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
- snprintf(status, ST_SIZE, gettext("One or more devices has "
+ (void) snprintf(status, ST_SIZE,
+ gettext("One or more devices has "
"been taken offline by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
- snprintf(action, AC_SIZE, gettext("Online the device "
+ (void) snprintf(action, AC_SIZE, gettext("Online the device "
"using 'zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_REMOVED_DEV:
- snprintf(status, ST_SIZE, gettext("One or more devices have "
+ (void) snprintf(status, ST_SIZE,
+ gettext("One or more devices have "
"been removed.\n\tSufficient replicas exist for the pool "
"to continue functioning in a\n\tdegraded state.\n"));
- snprintf(action, AC_SIZE, gettext("Online the device "
+ (void) snprintf(action, AC_SIZE, gettext("Online the device "
"using zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
case ZPOOL_STATUS_REBUILDING:
- snprintf(status, ST_SIZE, gettext("One or more devices is "
+ (void) snprintf(status, ST_SIZE,
+ gettext("One or more devices is "
"currently being resilvered. The pool will\n\tcontinue "
"to function, possibly in a degraded state.\n"));
- snprintf(action, AC_SIZE, gettext("Wait for the resilver to "
- "complete.\n"));
+ (void) snprintf(action, AC_SIZE,
+ gettext("Wait for the resilver to complete.\n"));
break;
case ZPOOL_STATUS_REBUILD_SCRUB:
- snprintf(status, ST_SIZE, gettext("One or more devices have "
+ (void) snprintf(status, ST_SIZE,
+ gettext("One or more devices have "
"been sequentially resilvered, scrubbing\n\tthe pool "
"is recommended.\n"));
- snprintf(action, AC_SIZE, gettext("Use 'zpool scrub' to "
+ (void) snprintf(action, AC_SIZE, gettext("Use 'zpool scrub' to "
"verify all data checksums.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
- snprintf(status, ST_SIZE, gettext("One or more devices has "
+ (void) snprintf(status, ST_SIZE,
+ gettext("One or more devices has "
"experienced an error resulting in data\n\tcorruption. "
"Applications may be affected.\n"));
- snprintf(action, AC_SIZE, gettext("Restore the file in question"
+ (void) snprintf(action, AC_SIZE,
+ gettext("Restore the file in question"
" if possible. Otherwise restore the\n\tentire pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
- snprintf(status, ST_SIZE, gettext("The pool metadata is "
+ (void) snprintf(status, ST_SIZE, gettext("The pool metadata is "
"corrupted and the pool cannot be opened.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
zpool_get_name(zhp), reason, zpool_get_config(zhp, NULL),
@@ -10543,75 +10602,84 @@ print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
break;
case ZPOOL_STATUS_VERSION_OLDER:
- snprintf(status, ST_SIZE, gettext("The pool is formatted using "
+ (void) snprintf(status, ST_SIZE,
+ gettext("The pool is formatted using "
"a legacy on-disk format. The pool can\n\tstill be used, "
"but some features are unavailable.\n"));
- snprintf(action, AC_SIZE, gettext("Upgrade the pool using "
+ (void) snprintf(action, AC_SIZE,
+ gettext("Upgrade the pool using "
"'zpool upgrade'. Once this is done, the\n\tpool will no "
"longer be accessible on software that does not support\n\t"
"feature flags.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
- snprintf(status, ST_SIZE, gettext("The pool has been upgraded "
+ (void) snprintf(status, ST_SIZE,
+ gettext("The pool has been upgraded "
"to a newer, incompatible on-disk version.\n\tThe pool "
"cannot be accessed on this system.\n"));
- snprintf(action, AC_SIZE, gettext("Access the pool from a "
+ (void) snprintf(action, AC_SIZE,
+ gettext("Access the pool from a "
"system running more recent software, or\n\trestore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
- snprintf(status, ST_SIZE, gettext("Some supported and "
+ (void) snprintf(status, ST_SIZE, gettext("Some supported and "
"requested features are not enabled on the pool.\n\t"
"The pool can still be used, but some features are "
"unavailable.\n"));
- snprintf(action, AC_SIZE, gettext("Enable all features using "
+ (void) snprintf(action, AC_SIZE,
+ gettext("Enable all features using "
"'zpool upgrade'. Once this is done,\n\tthe pool may no "
"longer be accessible by software that does not support\n\t"
"the features. See zpool-features(7) for details.\n"));
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
- snprintf(status, ST_SIZE, gettext("This pool has a "
+ (void) snprintf(status, ST_SIZE, gettext("This pool has a "
"compatibility list specified, but it could not be\n\t"
"read/parsed at this time. The pool can still be used, "
"but this\n\tshould be investigated.\n"));
- snprintf(action, AC_SIZE, gettext("Check the value of the "
+ (void) snprintf(action, AC_SIZE,
+ gettext("Check the value of the "
"'compatibility' property against the\n\t"
"appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
- snprintf(status, ST_SIZE, gettext("One or more features "
+ (void) snprintf(status, ST_SIZE, gettext("One or more features "
"are enabled on the pool despite not being\n\t"
"requested by the 'compatibility' property.\n"));
- snprintf(action, AC_SIZE, gettext("Consider setting "
+ (void) snprintf(action, AC_SIZE, gettext("Consider setting "
"'compatibility' to an appropriate value, or\n\t"
"adding needed features to the relevant file in\n\t"
ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
- snprintf(status, ST_SIZE, gettext("The pool cannot be accessed "
+ (void) snprintf(status, ST_SIZE,
+ gettext("The pool cannot be accessed "
"on this system because it uses the\n\tfollowing feature(s)"
" not supported on this system:\n"));
zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
1024);
- snprintf(action, AC_SIZE, gettext("Access the pool from a "
+ (void) snprintf(action, AC_SIZE,
+ gettext("Access the pool from a "
"system that supports the required feature(s),\n\tor "
"restore the pool from backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
- snprintf(status, ST_SIZE, gettext("The pool can only be "
+ (void) snprintf(status, ST_SIZE, gettext("The pool can only be "
"accessed in read-only mode on this system. It\n\tcannot be"
" accessed in read-write mode because it uses the "
"following\n\tfeature(s) not supported on this system:\n"));
zpool_collect_unsup_feat(zpool_get_config(zhp, NULL), status,
1024);
- snprintf(action, AC_SIZE, gettext("The pool cannot be accessed "
+ (void) snprintf(action, AC_SIZE,
+ gettext("The pool cannot be accessed "
"in read-write mode. Import the pool with\n"
"\t\"-o readonly=on\", access the pool from a system that "
"supports the\n\trequired feature(s), or restore the "
@@ -10619,90 +10687,105 @@ print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
- snprintf(status, ST_SIZE, gettext("One or more devices are "
+ (void) snprintf(status, ST_SIZE,
+ gettext("One or more devices are "
"faulted in response to persistent errors.\n\tSufficient "
"replicas exist for the pool to continue functioning "
"in a\n\tdegraded state.\n"));
- snprintf(action, AC_SIZE, gettext("Replace the faulted device, "
+ (void) snprintf(action, AC_SIZE,
+ gettext("Replace the faulted device, "
"or use 'zpool clear' to mark the device\n\trepaired.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_NR:
- snprintf(status, ST_SIZE, gettext("One or more devices are "
+ (void) snprintf(status, ST_SIZE,
+ gettext("One or more devices are "
"faulted in response to persistent errors. There are "
"insufficient replicas for the pool to\n\tcontinue "
"functioning.\n"));
- snprintf(action, AC_SIZE, gettext("Destroy and re-create the "
+ (void) snprintf(action, AC_SIZE,
+ gettext("Destroy and re-create the "
"pool from a backup source. Manually marking the device\n"
"\trepaired using 'zpool clear' may allow some data "
"to be recovered.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_MMP:
- snprintf(status, ST_SIZE, gettext("The pool is suspended "
+ (void) snprintf(status, ST_SIZE,
+ gettext("The pool is suspended "
"because multihost writes failed or were delayed;\n\t"
"another system could import the pool undetected.\n"));
- snprintf(action, AC_SIZE, gettext("Make sure the pool's devices"
+ (void) snprintf(action, AC_SIZE,
+ gettext("Make sure the pool's devices"
" are connected, then reboot your system and\n\timport the "
"pool or run 'zpool clear' to resume the pool.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_WAIT:
case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
- snprintf(status, ST_SIZE, gettext("One or more devices are "
+ (void) snprintf(status, ST_SIZE,
+ gettext("One or more devices are "
"faulted in response to IO failures.\n"));
- snprintf(action, AC_SIZE, gettext("Make sure the affected "
+ (void) snprintf(action, AC_SIZE,
+ gettext("Make sure the affected "
"devices are connected, then run 'zpool clear'.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
- snprintf(status, ST_SIZE, gettext("An intent log record "
+ (void) snprintf(status, ST_SIZE, gettext("An intent log record "
"could not be read.\n"
"\tWaiting for administrator intervention to fix the "
"faulted pool.\n"));
- snprintf(action, AC_SIZE, gettext("Either restore the affected "
+ (void) snprintf(action, AC_SIZE,
+ gettext("Either restore the affected "
"device(s) and run 'zpool online',\n"
"\tor ignore the intent log records by running "
"'zpool clear'.\n"));
break;
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
- snprintf(status, ST_SIZE, gettext("One or more devices are "
+ (void) snprintf(status, ST_SIZE,
+ gettext("One or more devices are "
"configured to use a non-native block size.\n"
"\tExpect reduced performance.\n"));
- snprintf(action, AC_SIZE, gettext("Replace affected devices "
+ (void) snprintf(action, AC_SIZE,
+ gettext("Replace affected devices "
"with devices that support the\n\tconfigured block size, "
"or migrate data to a properly configured\n\tpool.\n"));
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
- snprintf(status, ST_SIZE, gettext("Mismatch between pool hostid"
+ (void) snprintf(status, ST_SIZE,
+ gettext("Mismatch between pool hostid"
" and system hostid on imported pool.\n\tThis pool was "
"previously imported into a system with a different "
"hostid,\n\tand then was verbatim imported into this "
"system.\n"));
- snprintf(action, AC_SIZE, gettext("Export this pool on all "
+ (void) snprintf(action, AC_SIZE,
+ gettext("Export this pool on all "
"systems on which it is imported.\n"
"\tThen import it to correct the mismatch.\n"));
break;
case ZPOOL_STATUS_ERRATA:
- snprintf(status, ST_SIZE, gettext("Errata #%d detected.\n"),
- errata);
+ (void) snprintf(status, ST_SIZE,
+ gettext("Errata #%d detected.\n"), errata);
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
- snprintf(action, AC_SIZE, gettext("To correct the issue"
- " run 'zpool scrub'.\n"));
+ (void) snprintf(action, AC_SIZE,
+ gettext("To correct the issue run "
+ "'zpool scrub'.\n"));
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
(void) strlcat(status, gettext("\tExisting encrypted "
"datasets contain an on-disk incompatibility\n\t "
"which needs to be corrected.\n"), ST_SIZE);
- snprintf(action, AC_SIZE, gettext("To correct the issue"
+ (void) snprintf(action, AC_SIZE,
+ gettext("To correct the issue"
" backup existing encrypted datasets to new\n\t"
"encrypted datasets and destroy the old ones. "
"'zfs mount -o ro' can\n\tbe used to temporarily "
@@ -10715,7 +10798,8 @@ print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
"incompatibility. This may cause on-disk "
"corruption if they are used\n\twith "
"'zfs recv'.\n"), ST_SIZE);
- snprintf(action, AC_SIZE, gettext("To correct the"
+ (void) snprintf(action, AC_SIZE,
+ gettext("To correct the"
"issue, enable the bookmark_v2 feature. No "
"additional\n\taction is needed if there are no "
"encrypted snapshots or bookmarks.\n\tIf preserving"
@@ -10745,8 +10829,8 @@ print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
if (cbp->cb_json)
fnvlist_add_string(item, "status", status);
else {
- printf_color(ANSI_BOLD, gettext("status: "));
- printf_color(ANSI_YELLOW, status);
+ (void) printf_color(ANSI_BOLD, gettext("status: "));
+ (void) printf_color(ANSI_YELLOW, status);
}
}
@@ -10754,8 +10838,8 @@ print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
if (cbp->cb_json)
fnvlist_add_string(item, "action", action);
else {
- printf_color(ANSI_BOLD, gettext("action: "));
- printf_color(ANSI_YELLOW, action);
+ (void) printf_color(ANSI_BOLD, gettext("action: "));
+ (void) printf_color(ANSI_YELLOW, action);
}
}
}
@@ -10805,13 +10889,14 @@ status_callback_json(zpool_handle_t *zhp, void *data)
if (cbp->cb_json_pool_key_guid) {
guid = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID);
- snprintf(pool_guid, 256, "%llu", (u_longlong_t)guid);
+ (void) snprintf(pool_guid, 256, "%llu",
+ (u_longlong_t)guid);
}
cbp->cb_count++;
print_status_reason(zhp, cbp, reason, errata, item);
if (msgid != NULL) {
- snprintf(msgbuf, 256,
+ (void) snprintf(msgbuf, 256,
"https://openzfs.github.io/openzfs-docs/msg/%s",
msgid);
fnvlist_add_string(item, "msgid", msgid);
@@ -10934,19 +11019,19 @@ status_callback(zpool_handle_t *zhp, void *data)
health = zpool_get_state_str(zhp);
printf(" ");
- printf_color(ANSI_BOLD, gettext("pool:"));
+ (void) printf_color(ANSI_BOLD, gettext("pool:"));
printf(" %s\n", zpool_get_name(zhp));
- fputc(' ', stdout);
- printf_color(ANSI_BOLD, gettext("state: "));
+ (void) fputc(' ', stdout);
+ (void) printf_color(ANSI_BOLD, gettext("state: "));
- printf_color(health_str_to_color(health), "%s", health);
+ (void) printf_color(health_str_to_color(health), "%s", health);
- fputc('\n', stdout);
+ (void) fputc('\n', stdout);
print_status_reason(zhp, cbp, reason, errata, NULL);
if (msgid != NULL) {
printf(" ");
- printf_color(ANSI_BOLD, gettext("see:"));
+ (void) printf_color(ANSI_BOLD, gettext("see:"));
printf(gettext(
" https://openzfs.github.io/openzfs-docs/msg/%s\n"),
msgid);
@@ -10987,15 +11072,16 @@ status_callback(zpool_handle_t *zhp, void *data)
color_end();
if (cbp->cb_print_slow_ios) {
- printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
+ (void) printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
}
if (cbp->cb_print_power) {
- printf_color(ANSI_BOLD, " %5s", gettext("POWER"));
+ (void) printf_color(ANSI_BOLD, " %5s",
+ gettext("POWER"));
}
if (cbp->cb_print_dio_verify) {
- printf_color(ANSI_BOLD, " %5s", gettext("DIO"));
+ (void) printf_color(ANSI_BOLD, " %5s", gettext("DIO"));
}
if (cbp->vcdl != NULL)
@@ -11312,7 +11398,7 @@ check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
(*count)++;
}
- zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);
+ (void) zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);
zfs_close(zhp);
@@ -12140,17 +12226,17 @@ zpool_do_events_nvprint(nvlist_t *nvl, int depth)
FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE) == 0 ||
strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE) == 0) {
- zfs_valstr_zio_stage(i32, flagstr,
+ (void) zfs_valstr_zio_stage(i32, flagstr,
sizeof (flagstr));
printf(gettext("0x%x [%s]"), i32, flagstr);
} else if (strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_ZIO_TYPE) == 0) {
- zfs_valstr_zio_type(i32, flagstr,
+ (void) zfs_valstr_zio_type(i32, flagstr,
sizeof (flagstr));
printf(gettext("0x%x [%s]"), i32, flagstr);
} else if (strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY) == 0) {
- zfs_valstr_zio_priority(i32, flagstr,
+ (void) zfs_valstr_zio_priority(i32, flagstr,
sizeof (flagstr));
printf(gettext("0x%x [%s]"), i32, flagstr);
} else {
@@ -12178,7 +12264,7 @@ zpool_do_events_nvprint(nvlist_t *nvl, int depth)
(u_longlong_t)i64);
} else if (strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS) == 0) {
- zfs_valstr_zio_flag(i64, flagstr,
+ (void) zfs_valstr_zio_flag(i64, flagstr,
sizeof (flagstr));
printf(gettext("0x%llx [%s]"),
(u_longlong_t)i64, flagstr);
@@ -12374,7 +12460,7 @@ zpool_do_events_next(ev_opts_t *opts)
nvlist_free(nvl);
}
- VERIFY(0 == close(zevent_fd));
+ VERIFY0(close(zevent_fd));
return (ret);
}
@@ -12492,7 +12578,7 @@ get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop,
prop_name, value, sizeof (value), &srctype,
cbp->cb_literal) == 0) {
- zprop_collect_property(vdevname, cbp, prop_name,
+ (void) zprop_collect_property(vdevname, cbp, prop_name,
value, srctype, NULL, NULL, props);
}
}
@@ -12564,19 +12650,19 @@ get_callback(zpool_handle_t *zhp, void *data)
}
if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
- for_each_vdev(zhp, get_callback_vdev_cb, data);
+ (void) for_each_vdev(zhp, get_callback_vdev_cb, data);
} else {
/* Adjust column widths for vdev properties */
for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
vid++) {
- vdev_expand_proplist(zhp,
+ (void) vdev_expand_proplist(zhp,
cbp->cb_vdevs.cb_names[vid],
&cbp->cb_proplist);
}
/* Display the properties */
for (vid = 0; vid < cbp->cb_vdevs.cb_names_count;
vid++) {
- get_callback_vdev(zhp,
+ (void) get_callback_vdev(zhp,
cbp->cb_vdevs.cb_names[vid], data);
}
}
@@ -12649,7 +12735,7 @@ get_callback(zpool_handle_t *zhp, void *data)
uint64_t guid = fnvlist_lookup_uint64(
zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_POOL_GUID);
- snprintf(buf, 256, "%llu",
+ (void) snprintf(buf, 256, "%llu",
(u_longlong_t)guid);
fnvlist_add_nvlist(d, buf, item);
} else {
@@ -12848,7 +12934,6 @@ found:
argv[0], &cb.cb_vdevs);
fprintf(stderr, "\n");
usage(B_FALSE);
- return (1);
}
} else {
if (cb.cb_json) {
@@ -13344,11 +13429,11 @@ wait_status_thread(void *arg)
} else {
timeout.tv_nsec = nanos;
}
- pthread_mutex_lock(&wd->wd_mutex);
+ (void) pthread_mutex_lock(&wd->wd_mutex);
if (!wd->wd_should_exit)
ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
&timeout);
- pthread_mutex_unlock(&wd->wd_mutex);
+ (void) pthread_mutex_unlock(&wd->wd_mutex);
if (ret == 0) {
break; /* signaled by main thread */
} else if (ret != ETIMEDOUT) {
@@ -13379,8 +13464,8 @@ zpool_do_wait(int argc, char **argv)
wd.wd_headers_once = B_FALSE;
wd.wd_should_exit = B_FALSE;
- pthread_mutex_init(&wd.wd_mutex, NULL);
- pthread_cond_init(&wd.wd_cv, NULL);
+ (void) pthread_mutex_init(&wd.wd_mutex, NULL);
+ (void) pthread_cond_init(&wd.wd_cv, NULL);
/* By default, wait for all types of activity. */
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
@@ -13504,17 +13589,17 @@ found:;
if (verbose) {
uintptr_t status;
- pthread_mutex_lock(&wd.wd_mutex);
+ (void) pthread_mutex_lock(&wd.wd_mutex);
wd.wd_should_exit = B_TRUE;
- pthread_cond_signal(&wd.wd_cv);
- pthread_mutex_unlock(&wd.wd_mutex);
+ (void) pthread_cond_signal(&wd.wd_cv);
+ (void) pthread_mutex_unlock(&wd.wd_mutex);
(void) pthread_join(status_thr, (void *)&status);
if (status != 0)
error = status;
}
- pthread_mutex_destroy(&wd.wd_mutex);
- pthread_cond_destroy(&wd.wd_cv);
+ (void) pthread_mutex_destroy(&wd.wd_mutex);
+ (void) pthread_cond_destroy(&wd.wd_cv);
return (error);
}
@@ -13667,14 +13752,14 @@ zpool_do_help(int argc, char **argv)
{
char page[MAXNAMELEN];
if (argc < 3 || strcmp(argv[2], "zpool") == 0)
- strcpy(page, "zpool");
+ (void) strcpy(page, "zpool");
else if (strcmp(argv[2], "concepts") == 0 ||
strcmp(argv[2], "props") == 0)
- snprintf(page, sizeof (page), "zpool%s", argv[2]);
+ (void) snprintf(page, sizeof (page), "zpool%s", argv[2]);
else
- snprintf(page, sizeof (page), "zpool-%s", argv[2]);
+ (void) snprintf(page, sizeof (page), "zpool-%s", argv[2]);
- execlp("man", "man", page, NULL);
+ (void) execlp("man", "man", page, NULL);
fprintf(stderr, "couldn't run man program: %s", strerror(errno));
return (-1);
@@ -13801,7 +13886,6 @@ main(int argc, char **argv)
(void) fprintf(stderr, gettext("unrecognized "
"command '%s'\n"), cmdname);
usage(B_FALSE);
- ret = 1;
}
for (i = 0; i < argc; i++)
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_util.h b/sys/contrib/openzfs/cmd/zpool/zpool_util.h
index 5ab7cb9750f1..3af23c52bd45 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_util.h
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_util.h
@@ -76,11 +76,10 @@ typedef struct zpool_list zpool_list_t;
zpool_list_t *pool_list_get(int, char **, zprop_list_t **, zfs_type_t,
boolean_t, int *);
-void pool_list_update(zpool_list_t *);
+int pool_list_refresh(zpool_list_t *);
int pool_list_iter(zpool_list_t *, int unavail, zpool_iter_f, void *);
void pool_list_free(zpool_list_t *);
int pool_list_count(zpool_list_t *);
-void pool_list_remove(zpool_list_t *, zpool_handle_t *);
extern libzfs_handle_t *g_zfs;
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c b/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
index 684b46a2d673..d1e9ef76dc10 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
@@ -195,7 +195,7 @@ is_shorthand_path(const char *arg, char *path, size_t path_size,
return (0);
}
- strlcpy(path, arg, path_size);
+ (void) strlcpy(path, arg, path_size);
memset(statbuf, 0, sizeof (*statbuf));
*wholedisk = B_FALSE;
@@ -270,14 +270,13 @@ is_spare(nvlist_t *config, const char *path)
* draid* Virtual dRAID spare
*/
static nvlist_t *
-make_leaf_vdev(nvlist_t *props, const char *arg, boolean_t is_primary)
+make_leaf_vdev(const char *arg, boolean_t is_primary, uint64_t ashift)
{
char path[MAXPATHLEN];
struct stat64 statbuf;
nvlist_t *vdev = NULL;
const char *type = NULL;
boolean_t wholedisk = B_FALSE;
- uint64_t ashift = 0;
int err;
/*
@@ -309,7 +308,7 @@ make_leaf_vdev(nvlist_t *props, const char *arg, boolean_t is_primary)
}
/* After whole disk check restore original passed path */
- strlcpy(path, arg, sizeof (path));
+ (void) strlcpy(path, arg, sizeof (path));
} else if (zpool_is_draid_spare(arg)) {
if (!is_primary) {
(void) fprintf(stderr,
@@ -319,7 +318,7 @@ make_leaf_vdev(nvlist_t *props, const char *arg, boolean_t is_primary)
}
wholedisk = B_TRUE;
- strlcpy(path, arg, sizeof (path));
+ (void) strlcpy(path, arg, sizeof (path));
type = VDEV_TYPE_DRAID_SPARE;
} else {
err = is_shorthand_path(arg, path, sizeof (path),
@@ -382,31 +381,6 @@ make_leaf_vdev(nvlist_t *props, const char *arg, boolean_t is_primary)
(uint64_t)wholedisk) == 0);
/*
- * Override defaults if custom properties are provided.
- */
- if (props != NULL) {
- const char *value = NULL;
-
- if (nvlist_lookup_string(props,
- zpool_prop_to_name(ZPOOL_PROP_ASHIFT), &value) == 0) {
- if (zfs_nicestrtonum(NULL, value, &ashift) != 0) {
- (void) fprintf(stderr,
- gettext("ashift must be a number.\n"));
- return (NULL);
- }
- if (ashift != 0 &&
- (ashift < ASHIFT_MIN || ashift > ASHIFT_MAX)) {
- (void) fprintf(stderr,
- gettext("invalid 'ashift=%" PRIu64 "' "
- "property: only values between %" PRId32 " "
- "and %" PRId32 " are allowed.\n"),
- ashift, ASHIFT_MIN, ASHIFT_MAX);
- return (NULL);
- }
- }
- }
-
- /*
* If the device is known to incorrectly report its physical sector
* size explicitly provide the known correct value.
*/
@@ -609,22 +583,28 @@ get_replication(nvlist_t *nvroot, boolean_t fatal)
ZPOOL_CONFIG_PATH, &path) == 0);
/*
+ * Skip active spares they should never cause
+ * the pool to be evaluated as inconsistent.
+ */
+ if (is_spare(NULL, path))
+ continue;
+
+ /*
* If we have a raidz/mirror that combines disks
- * with files, report it as an error.
+ * with files, only report it as an error when
+ * fatal is set to ensure all the replication
+ * checks aren't skipped in check_replication().
*/
- if (!dontreport && type != NULL &&
+ if (fatal && !dontreport && type != NULL &&
strcmp(type, childtype) != 0) {
if (ret != NULL)
free(ret);
ret = NULL;
- if (fatal)
- vdev_error(gettext(
- "mismatched replication "
- "level: %s contains both "
- "files and devices\n"),
- rep.zprl_type);
- else
- return (NULL);
+ vdev_error(gettext(
+ "mismatched replication "
+ "level: %s contains both "
+ "files and devices\n"),
+ rep.zprl_type);
dontreport = B_TRUE;
}
@@ -1030,7 +1010,7 @@ make_disks(zpool_handle_t *zhp, nvlist_t *nv, boolean_t replacing)
* window between when udev deletes and recreates the link
* during which access attempts will fail with ENOENT.
*/
- strlcpy(udevpath, path, MAXPATHLEN);
+ (void) strlcpy(udevpath, path, MAXPATHLEN);
(void) zfs_append_partition(udevpath, MAXPATHLEN);
fd = open(devpath, O_RDWR|O_EXCL);
@@ -1507,6 +1487,29 @@ construct_spec(nvlist_t *props, int argc, char **argv)
const char *type, *fulltype;
boolean_t is_log, is_special, is_dedup, is_spare;
boolean_t seen_logs;
+ uint64_t ashift = 0;
+
+ if (props != NULL) {
+ const char *value = NULL;
+
+ if (nvlist_lookup_string(props,
+ zpool_prop_to_name(ZPOOL_PROP_ASHIFT), &value) == 0) {
+ if (zfs_nicestrtonum(NULL, value, &ashift) != 0) {
+ (void) fprintf(stderr,
+ gettext("ashift must be a number.\n"));
+ return (NULL);
+ }
+ if (ashift != 0 &&
+ (ashift < ASHIFT_MIN || ashift > ASHIFT_MAX)) {
+ (void) fprintf(stderr,
+ gettext("invalid 'ashift=%" PRIu64 "' "
+ "property: only values between %" PRId32 " "
+ "and %" PRId32 " are allowed.\n"),
+ ashift, ASHIFT_MIN, ASHIFT_MAX);
+ return (NULL);
+ }
+ }
+ }
top = NULL;
toplevels = 0;
@@ -1612,9 +1615,9 @@ construct_spec(nvlist_t *props, int argc, char **argv)
children * sizeof (nvlist_t *));
if (child == NULL)
zpool_no_memory();
- if ((nv = make_leaf_vdev(props, argv[c],
+ if ((nv = make_leaf_vdev(argv[c],
!(is_log || is_special || is_dedup ||
- is_spare))) == NULL) {
+ is_spare), ashift)) == NULL) {
for (c = 0; c < children - 1; c++)
nvlist_free(child[c]);
free(child);
@@ -1678,6 +1681,10 @@ construct_spec(nvlist_t *props, int argc, char **argv)
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_DEDUP) == 0);
}
+ if (ashift > 0) {
+ fnvlist_add_uint64(nv,
+ ZPOOL_CONFIG_ASHIFT, ashift);
+ }
if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
verify(nvlist_add_uint64(nv,
ZPOOL_CONFIG_NPARITY,
@@ -1705,8 +1712,9 @@ construct_spec(nvlist_t *props, int argc, char **argv)
* We have a device. Pass off to make_leaf_vdev() to
* construct the appropriate nvlist describing the vdev.
*/
- if ((nv = make_leaf_vdev(props, argv[0], !(is_log ||
- is_special || is_dedup || is_spare))) == NULL)
+ if ((nv = make_leaf_vdev(argv[0], !(is_log ||
+ is_special || is_dedup || is_spare),
+ ashift)) == NULL)
goto spec_out;
verify(nvlist_add_uint64(nv,