aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin Matuska <mm@FreeBSD.org>2021-06-26 11:37:50 +0000
committerMartin Matuska <mm@FreeBSD.org>2021-06-26 11:38:37 +0000
commit363a2f5661b3ad079862bee56b86c08648ecc178 (patch)
tree3a6f4508511e272927dcb2c675496253cd2b0a86
parentc816b23784a61b5a3222f3db1ce595c06f2c4005 (diff)
parentaee26af277c91abeb0e1cfe27cc48cc328fdb881 (diff)
downloadsrc-363a2f5661b3ad079862bee56b86c08648ecc178.tar.gz
src-363a2f5661b3ad079862bee56b86c08648ecc178.zip
zfs: merge openzfs/zfs@aee26af27 (zfs-2.1-release) into stable/13
Notable upstream pull request merges: #12172 Use wmsum for arc, abd, dbuf and zfetch statistics #12227 Revert Consolidate arc_buf allocation checks #12273 zfs_metaslab_mem_limit should be 25 instead of 75 #12266 Fix flag copying in resume case #12276 Update cache file when setting compatibility property Obtained from: OpenZFS OpenZFS commit: aee26af277c91abeb0e1cfe27cc48cc328fdb881
-rw-r--r--sys/contrib/openzfs/.github/ISSUE_TEMPLATE/config.yml2
-rw-r--r--sys/contrib/openzfs/cmd/ztest/ztest.c4
-rw-r--r--sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c12
-rw-r--r--sys/contrib/openzfs/include/sys/abd_impl.h4
-rw-r--r--sys/contrib/openzfs/include/sys/arc_impl.h119
-rw-r--r--sys/contrib/openzfs/include/sys/crypto/api.h2
-rw-r--r--sys/contrib/openzfs/include/sys/dnode.h6
-rw-r--r--sys/contrib/openzfs/include/sys/zfs_debug.h2
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c4
-rw-r--r--sys/contrib/openzfs/lib/libzpool/kernel.c2
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c46
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_debug.c2
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c9
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/abd_os.c75
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/arc_os.c2
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_debug.c3
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c20
-rw-r--r--sys/contrib/openzfs/module/zfs/arc.c615
-rw-r--r--sys/contrib/openzfs/module/zfs/dbuf.c236
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_objset.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_recv.c44
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_tx.c3
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_zfetch.c47
-rw-r--r--sys/contrib/openzfs/module/zfs/dnode.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/dnode_sync.c5
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_dataset.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_destroy.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_dir.c11
-rw-r--r--sys/contrib/openzfs/module/zfs/metaslab.c69
-rw-r--r--sys/contrib/openzfs/module/zfs/mmp.c19
-rw-r--r--sys/contrib/openzfs/module/zfs/range_tree.c3
-rw-r--r--sys/contrib/openzfs/module/zfs/sa.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/spa.c65
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_checkpoint.c13
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_history.c9
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_misc.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/space_map.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/txg.c23
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev.c5
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_indirect.c7
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_removal.c21
-rw-r--r--sys/contrib/openzfs/module/zfs/zap.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/zap_micro.c9
-rw-r--r--sys/contrib/openzfs/module/zfs/zcp.c3
-rw-r--r--sys/contrib/openzfs/module/zfs/zil.c5
-rw-r--r--sys/contrib/openzfs/module/zfs/zio.c40
-rw-r--r--sys/contrib/openzfs/tests/runfiles/common.run3
-rwxr-xr-xsys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in3
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg1
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/Makefile.am1
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_features_009_pos.ksh92
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/redacted_send/redacted_embedded.ksh4
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/rsend.kshlib12
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-c_embedded_blocks.ksh10
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-c_zstreamdump.ksh12
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-cpL_varied_recsize.ksh2
-rw-r--r--sys/modules/zfs/zfs_config.h4
57 files changed, 1276 insertions, 470 deletions
diff --git a/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/config.yml b/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/config.yml
index 952414f66ace..ecaaa182103c 100644
--- a/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/config.yml
+++ b/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/config.yml
@@ -10,5 +10,5 @@ contact_links:
url: https://lists.freebsd.org/mailman/listinfo/freebsd-fs
about: Get community support for OpenZFS on FreeBSD
- name: OpenZFS on IRC
- url: https://kiwiirc.com/nextclient/irc.libera.chat/openzfs
+ url: https://web.libera.chat/#openzfs
about: Use IRC to get community support for OpenZFS
diff --git a/sys/contrib/openzfs/cmd/ztest/ztest.c b/sys/contrib/openzfs/cmd/ztest/ztest.c
index 73694b0b352b..a580396ebd8a 100644
--- a/sys/contrib/openzfs/cmd/ztest/ztest.c
+++ b/sys/contrib/openzfs/cmd/ztest/ztest.c
@@ -6669,7 +6669,7 @@ ztest_initialize(ztest_ds_t *zd, uint64_t id)
char *path = strdup(rand_vd->vdev_path);
boolean_t active = rand_vd->vdev_initialize_thread != NULL;
- zfs_dbgmsg("vd %px, guid %llu", rand_vd, guid);
+ zfs_dbgmsg("vd %px, guid %llu", rand_vd, (u_longlong_t)guid);
spa_config_exit(spa, SCL_VDEV, FTAG);
uint64_t cmd = ztest_random(POOL_INITIALIZE_FUNCS);
@@ -6741,7 +6741,7 @@ ztest_trim(ztest_ds_t *zd, uint64_t id)
char *path = strdup(rand_vd->vdev_path);
boolean_t active = rand_vd->vdev_trim_thread != NULL;
- zfs_dbgmsg("vd %p, guid %llu", rand_vd, guid);
+ zfs_dbgmsg("vd %p, guid %llu", rand_vd, (u_longlong_t)guid);
spa_config_exit(spa, SCL_VDEV, FTAG);
uint64_t cmd = ztest_random(POOL_TRIM_FUNCS);
diff --git a/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c b/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
index 4cafc37b9b47..0856c7534f0d 100644
--- a/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
+++ b/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
@@ -82,7 +82,11 @@ alloc_pw_size(size_t len)
return (NULL);
}
pw->len = len;
- pw->value = malloc(len);
+ /*
+ * The use of malloc() triggers a spurious gcc 11 -Wmaybe-uninitialized
+ * warning in the mlock() function call below, so use calloc().
+ */
+ pw->value = calloc(len, 1);
if (!pw->value) {
free(pw);
return (NULL);
@@ -99,7 +103,11 @@ alloc_pw_string(const char *source)
return (NULL);
}
pw->len = strlen(source) + 1;
- pw->value = malloc(pw->len);
+ /*
+ * The use of malloc() triggers a spurious gcc 11 -Wmaybe-uninitialized
+ * warning in the mlock() function call below, so use calloc().
+ */
+ pw->value = calloc(pw->len, 1);
if (!pw->value) {
free(pw);
return (NULL);
diff --git a/sys/contrib/openzfs/include/sys/abd_impl.h b/sys/contrib/openzfs/include/sys/abd_impl.h
index 435a8dc6d9ce..6bce08cfa343 100644
--- a/sys/contrib/openzfs/include/sys/abd_impl.h
+++ b/sys/contrib/openzfs/include/sys/abd_impl.h
@@ -27,6 +27,7 @@
#define _ABD_IMPL_H
#include <sys/abd.h>
+#include <sys/wmsum.h>
#ifdef __cplusplus
extern "C" {
@@ -82,9 +83,8 @@ void abd_iter_unmap(struct abd_iter *);
/*
* Helper macros
*/
-#define ABDSTAT(stat) (abd_stats.stat.value.ui64)
#define ABDSTAT_INCR(stat, val) \
- atomic_add_64(&abd_stats.stat.value.ui64, (val))
+ wmsum_add(&abd_sums.stat, (val))
#define ABDSTAT_BUMP(stat) ABDSTAT_INCR(stat, 1)
#define ABDSTAT_BUMPDOWN(stat) ABDSTAT_INCR(stat, -1)
diff --git a/sys/contrib/openzfs/include/sys/arc_impl.h b/sys/contrib/openzfs/include/sys/arc_impl.h
index c01da46e01e3..1f341ec94faf 100644
--- a/sys/contrib/openzfs/include/sys/arc_impl.h
+++ b/sys/contrib/openzfs/include/sys/arc_impl.h
@@ -33,6 +33,7 @@
#include <sys/zio_crypt.h>
#include <sys/zthr.h>
#include <sys/aggsum.h>
+#include <sys/wmsum.h>
#ifdef __cplusplus
extern "C" {
@@ -563,7 +564,6 @@ typedef struct arc_stats {
kstat_named_t arcstat_c;
kstat_named_t arcstat_c_min;
kstat_named_t arcstat_c_max;
- /* Not updated directly; only synced in arc_kstat_update. */
kstat_named_t arcstat_size;
/*
* Number of compressed bytes stored in the arc_buf_hdr_t's b_pabd.
@@ -592,14 +592,12 @@ typedef struct arc_stats {
* (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only
* caches), and arc_buf_t structures (allocated via arc_buf_t
* cache).
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_hdr_size;
/*
* Number of bytes consumed by ARC buffers of type equal to
* ARC_BUFC_DATA. This is generally consumed by buffers backing
* on disk user data (e.g. plain file contents).
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_data_size;
/*
@@ -607,22 +605,18 @@ typedef struct arc_stats {
* ARC_BUFC_METADATA. This is generally consumed by buffers
* backing on disk data that is used for internal ZFS
* structures (e.g. ZAP, dnode, indirect blocks, etc).
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_metadata_size;
/*
* Number of bytes consumed by dmu_buf_impl_t objects.
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_dbuf_size;
/*
* Number of bytes consumed by dnode_t objects.
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_dnode_size;
/*
* Number of bytes consumed by bonus buffers.
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_bonus_size;
#if defined(COMPAT_FREEBSD11)
@@ -637,7 +631,6 @@ typedef struct arc_stats {
* arc_anon state. This includes *all* buffers in the arc_anon
* state; e.g. data, metadata, evictable, and unevictable buffers
* are all included in this value.
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_anon_size;
/*
@@ -645,7 +638,6 @@ typedef struct arc_stats {
* following criteria: backing buffers of type ARC_BUFC_DATA,
* residing in the arc_anon state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_anon_evictable_data;
/*
@@ -653,7 +645,6 @@ typedef struct arc_stats {
* following criteria: backing buffers of type ARC_BUFC_METADATA,
* residing in the arc_anon state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_anon_evictable_metadata;
/*
@@ -661,7 +652,6 @@ typedef struct arc_stats {
* arc_mru state. This includes *all* buffers in the arc_mru
* state; e.g. data, metadata, evictable, and unevictable buffers
* are all included in this value.
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_mru_size;
/*
@@ -669,7 +659,6 @@ typedef struct arc_stats {
* following criteria: backing buffers of type ARC_BUFC_DATA,
* residing in the arc_mru state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_mru_evictable_data;
/*
@@ -677,7 +666,6 @@ typedef struct arc_stats {
* following criteria: backing buffers of type ARC_BUFC_METADATA,
* residing in the arc_mru state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_mru_evictable_metadata;
/*
@@ -688,21 +676,18 @@ typedef struct arc_stats {
* don't actually have ARC buffers linked off of these headers.
* Thus, *if* the headers had associated ARC buffers, these
* buffers *would have* consumed this number of bytes.
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_mru_ghost_size;
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_DATA, and linked off the arc_mru_ghost state.
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_mru_ghost_evictable_data;
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_mru_ghost_evictable_metadata;
/*
@@ -710,42 +695,36 @@ typedef struct arc_stats {
* arc_mfu state. This includes *all* buffers in the arc_mfu
* state; e.g. data, metadata, evictable, and unevictable buffers
* are all included in this value.
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_mfu_size;
/*
* Number of bytes consumed by ARC buffers that are eligible for
* eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu
* state.
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_mfu_evictable_data;
/*
* Number of bytes consumed by ARC buffers that are eligible for
* eviction, of type ARC_BUFC_METADATA, and reside in the
* arc_mfu state.
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_mfu_evictable_metadata;
/*
* Total number of bytes that *would have been* consumed by ARC
* buffers in the arc_mfu_ghost state. See the comment above
* arcstat_mru_ghost_size for more details.
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_mfu_ghost_size;
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_DATA, and linked off the arc_mfu_ghost state.
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_mfu_ghost_evictable_data;
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
- * Not updated directly; only synced in arc_kstat_update.
*/
kstat_named_t arcstat_mfu_ghost_evictable_metadata;
kstat_named_t arcstat_l2_hits;
@@ -779,7 +758,6 @@ typedef struct arc_stats {
kstat_named_t arcstat_l2_io_error;
kstat_named_t arcstat_l2_lsize;
kstat_named_t arcstat_l2_psize;
- /* Not updated directly; only synced in arc_kstat_update. */
kstat_named_t arcstat_l2_hdr_size;
/*
* Number of L2ARC log blocks written. These are used for restoring the
@@ -860,7 +838,6 @@ typedef struct arc_stats {
kstat_named_t arcstat_tempreserve;
kstat_named_t arcstat_loaned_bytes;
kstat_named_t arcstat_prune;
- /* Not updated directly; only synced in arc_kstat_update. */
kstat_named_t arcstat_meta_used;
kstat_named_t arcstat_meta_limit;
kstat_named_t arcstat_dnode_limit;
@@ -876,6 +853,96 @@ typedef struct arc_stats {
kstat_named_t arcstat_abd_chunk_waste_size;
} arc_stats_t;
+typedef struct arc_sums {
+ wmsum_t arcstat_hits;
+ wmsum_t arcstat_misses;
+ wmsum_t arcstat_demand_data_hits;
+ wmsum_t arcstat_demand_data_misses;
+ wmsum_t arcstat_demand_metadata_hits;
+ wmsum_t arcstat_demand_metadata_misses;
+ wmsum_t arcstat_prefetch_data_hits;
+ wmsum_t arcstat_prefetch_data_misses;
+ wmsum_t arcstat_prefetch_metadata_hits;
+ wmsum_t arcstat_prefetch_metadata_misses;
+ wmsum_t arcstat_mru_hits;
+ wmsum_t arcstat_mru_ghost_hits;
+ wmsum_t arcstat_mfu_hits;
+ wmsum_t arcstat_mfu_ghost_hits;
+ wmsum_t arcstat_deleted;
+ wmsum_t arcstat_mutex_miss;
+ wmsum_t arcstat_access_skip;
+ wmsum_t arcstat_evict_skip;
+ wmsum_t arcstat_evict_not_enough;
+ wmsum_t arcstat_evict_l2_cached;
+ wmsum_t arcstat_evict_l2_eligible;
+ wmsum_t arcstat_evict_l2_eligible_mfu;
+ wmsum_t arcstat_evict_l2_eligible_mru;
+ wmsum_t arcstat_evict_l2_ineligible;
+ wmsum_t arcstat_evict_l2_skip;
+ wmsum_t arcstat_hash_collisions;
+ wmsum_t arcstat_hash_chains;
+ aggsum_t arcstat_size;
+ wmsum_t arcstat_compressed_size;
+ wmsum_t arcstat_uncompressed_size;
+ wmsum_t arcstat_overhead_size;
+ wmsum_t arcstat_hdr_size;
+ wmsum_t arcstat_data_size;
+ wmsum_t arcstat_metadata_size;
+ wmsum_t arcstat_dbuf_size;
+ aggsum_t arcstat_dnode_size;
+ wmsum_t arcstat_bonus_size;
+ wmsum_t arcstat_l2_hits;
+ wmsum_t arcstat_l2_misses;
+ wmsum_t arcstat_l2_prefetch_asize;
+ wmsum_t arcstat_l2_mru_asize;
+ wmsum_t arcstat_l2_mfu_asize;
+ wmsum_t arcstat_l2_bufc_data_asize;
+ wmsum_t arcstat_l2_bufc_metadata_asize;
+ wmsum_t arcstat_l2_feeds;
+ wmsum_t arcstat_l2_rw_clash;
+ wmsum_t arcstat_l2_read_bytes;
+ wmsum_t arcstat_l2_write_bytes;
+ wmsum_t arcstat_l2_writes_sent;
+ wmsum_t arcstat_l2_writes_done;
+ wmsum_t arcstat_l2_writes_error;
+ wmsum_t arcstat_l2_writes_lock_retry;
+ wmsum_t arcstat_l2_evict_lock_retry;
+ wmsum_t arcstat_l2_evict_reading;
+ wmsum_t arcstat_l2_evict_l1cached;
+ wmsum_t arcstat_l2_free_on_write;
+ wmsum_t arcstat_l2_abort_lowmem;
+ wmsum_t arcstat_l2_cksum_bad;
+ wmsum_t arcstat_l2_io_error;
+ wmsum_t arcstat_l2_lsize;
+ wmsum_t arcstat_l2_psize;
+ aggsum_t arcstat_l2_hdr_size;
+ wmsum_t arcstat_l2_log_blk_writes;
+ wmsum_t arcstat_l2_log_blk_asize;
+ wmsum_t arcstat_l2_log_blk_count;
+ wmsum_t arcstat_l2_rebuild_success;
+ wmsum_t arcstat_l2_rebuild_abort_unsupported;
+ wmsum_t arcstat_l2_rebuild_abort_io_errors;
+ wmsum_t arcstat_l2_rebuild_abort_dh_errors;
+ wmsum_t arcstat_l2_rebuild_abort_cksum_lb_errors;
+ wmsum_t arcstat_l2_rebuild_abort_lowmem;
+ wmsum_t arcstat_l2_rebuild_size;
+ wmsum_t arcstat_l2_rebuild_asize;
+ wmsum_t arcstat_l2_rebuild_bufs;
+ wmsum_t arcstat_l2_rebuild_bufs_precached;
+ wmsum_t arcstat_l2_rebuild_log_blks;
+ wmsum_t arcstat_memory_throttle_count;
+ wmsum_t arcstat_memory_direct_count;
+ wmsum_t arcstat_memory_indirect_count;
+ wmsum_t arcstat_prune;
+ aggsum_t arcstat_meta_used;
+ wmsum_t arcstat_async_upgrade_sync;
+ wmsum_t arcstat_demand_hit_predictive_prefetch;
+ wmsum_t arcstat_demand_hit_prescient_prefetch;
+ wmsum_t arcstat_raw_size;
+ wmsum_t arcstat_cached_only_in_progress;
+ wmsum_t arcstat_abd_chunk_waste_size;
+} arc_sums_t;
+
typedef struct arc_evict_waiter {
list_node_t aew_node;
kcondvar_t aew_cv;
@@ -885,7 +952,7 @@ typedef struct arc_evict_waiter {
#define ARCSTAT(stat) (arc_stats.stat.value.ui64)
#define ARCSTAT_INCR(stat, val) \
- atomic_add_64(&arc_stats.stat.value.ui64, (val))
+ wmsum_add(&arc_sums.stat, (val))
#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
@@ -899,6 +966,7 @@ typedef struct arc_evict_waiter {
extern taskq_t *arc_prune_taskq;
extern arc_stats_t arc_stats;
+extern arc_sums_t arc_sums;
extern hrtime_t arc_growtime;
extern boolean_t arc_warm;
extern int arc_grow_retry;
@@ -906,7 +974,6 @@ extern int arc_no_grow_shift;
extern int arc_shrink_shift;
extern kmutex_t arc_prune_mtx;
extern list_t arc_prune_list;
-extern aggsum_t arc_size;
extern arc_state_t *arc_mfu;
extern arc_state_t *arc_mru;
extern uint_t zfs_arc_pc_percent;
diff --git a/sys/contrib/openzfs/include/sys/crypto/api.h b/sys/contrib/openzfs/include/sys/crypto/api.h
index 7c3c465513de..8aecfeaff0f4 100644
--- a/sys/contrib/openzfs/include/sys/crypto/api.h
+++ b/sys/contrib/openzfs/include/sys/crypto/api.h
@@ -58,7 +58,7 @@ typedef struct {
*/
#define CRYPTO_MECH_INVALID ((uint64_t)-1)
-extern crypto_mech_type_t crypto_mech2id(crypto_mech_name_t name);
+extern crypto_mech_type_t crypto_mech2id(char *name);
/*
* Create and destroy context templates.
diff --git a/sys/contrib/openzfs/include/sys/dnode.h b/sys/contrib/openzfs/include/sys/dnode.h
index 3208b60f0e7b..de6492bb7618 100644
--- a/sys/contrib/openzfs/include/sys/dnode.h
+++ b/sys/contrib/openzfs/include/sys/dnode.h
@@ -171,7 +171,7 @@ enum dnode_dirtycontext {
* example, reading 32 dnodes from a 16k dnode block and all of the spill
* blocks could issue 33 separate reads. Now suppose those dnodes have size
* 1024 and therefore don't need spill blocks. Then the worst case number
- * of blocks read is reduced to from 33 to two--one per dnode block.
+ * of blocks read is reduced from 33 to two--one per dnode block.
*
* ZFS-on-Linux systems that make heavy use of extended attributes benefit
* from this feature. In particular, ZFS-on-Linux supports the xattr=sa
@@ -232,8 +232,8 @@ typedef struct dnode_phys {
* Both dn_pad2 and dn_pad3 are protected by the block's MAC. This
* allows us to protect any fields that might be added here in the
* future. In either case, developers will want to check
- * zio_crypt_init_uios_dnode() to ensure the new field is being
- * protected properly.
+ * zio_crypt_init_uios_dnode() and zio_crypt_do_dnode_hmac_updates()
+ * to ensure the new field is being protected and updated properly.
*/
uint64_t dn_pad3[4];
diff --git a/sys/contrib/openzfs/include/sys/zfs_debug.h b/sys/contrib/openzfs/include/sys/zfs_debug.h
index 8b9629fb5e25..7b103510dd07 100644
--- a/sys/contrib/openzfs/include/sys/zfs_debug.h
+++ b/sys/contrib/openzfs/include/sys/zfs_debug.h
@@ -61,7 +61,7 @@ extern int zfs_dbgmsg_enable;
extern void __set_error(const char *file, const char *func, int line, int err);
extern void __zfs_dbgmsg(char *buf);
extern void __dprintf(boolean_t dprint, const char *file, const char *func,
- int line, const char *fmt, ...);
+ int line, const char *fmt, ...) __attribute__((format(printf, 5, 6)));
/*
* Some general principles for using zfs_dbgmsg():
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c b/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
index 410c34888ab1..5e7d06465d35 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
@@ -1743,6 +1743,10 @@ zfs_send_resume_impl(libzfs_handle_t *hdl, sendflags_t *flags, int outfd,
tmpflags.compress = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_EMBED_DATA)
tmpflags.embed_data = B_TRUE;
+ if (lzc_flags & LZC_SEND_FLAG_RAW)
+ tmpflags.raw = B_TRUE;
+ if (lzc_flags & LZC_SEND_FLAG_SAVED)
+ tmpflags.saved = B_TRUE;
error = estimate_size(zhp, fromname, outfd, &tmpflags,
resumeobj, resumeoff, bytes, redact_book, errbuf);
}
diff --git a/sys/contrib/openzfs/lib/libzpool/kernel.c b/sys/contrib/openzfs/lib/libzpool/kernel.c
index cc8e534e7eb5..09812decefcf 100644
--- a/sys/contrib/openzfs/lib/libzpool/kernel.c
+++ b/sys/contrib/openzfs/lib/libzpool/kernel.c
@@ -796,7 +796,7 @@ kernel_init(int mode)
physmem = sysconf(_SC_PHYS_PAGES);
- dprintf("physmem = %llu pages (%.2f GB)\n", physmem,
+ dprintf("physmem = %llu pages (%.2f GB)\n", (u_longlong_t)physmem,
(double)physmem * sysconf(_SC_PAGE_SIZE) / (1ULL << 30));
(void) snprintf(hw_serial, sizeof (hw_serial), "%ld",
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
index cb37fb362f8c..15d3dcef50e7 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
@@ -69,6 +69,15 @@ static abd_stats_t abd_stats = {
{ "linear_data_size", KSTAT_DATA_UINT64 },
};
+struct {
+ wmsum_t abdstat_struct_size;
+ wmsum_t abdstat_scatter_cnt;
+ wmsum_t abdstat_scatter_data_size;
+ wmsum_t abdstat_scatter_chunk_waste;
+ wmsum_t abdstat_linear_cnt;
+ wmsum_t abdstat_linear_data_size;
+} abd_sums;
+
/*
* The size of the chunks ABD allocates. Because the sizes allocated from the
* kmem_cache can't change, this tunable can only be modified at boot. Changing
@@ -272,16 +281,46 @@ abd_free_zero_scatter(void)
kmem_free(abd_zero_buf, zfs_abd_chunk_size);
}
+static int
+abd_kstats_update(kstat_t *ksp, int rw)
+{
+ abd_stats_t *as = ksp->ks_data;
+
+ if (rw == KSTAT_WRITE)
+ return (EACCES);
+ as->abdstat_struct_size.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_struct_size);
+ as->abdstat_scatter_cnt.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_cnt);
+ as->abdstat_scatter_data_size.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_data_size);
+ as->abdstat_scatter_chunk_waste.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_chunk_waste);
+ as->abdstat_linear_cnt.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_linear_cnt);
+ as->abdstat_linear_data_size.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_linear_data_size);
+ return (0);
+}
+
void
abd_init(void)
{
abd_chunk_cache = kmem_cache_create("abd_chunk", zfs_abd_chunk_size, 0,
NULL, NULL, NULL, NULL, 0, KMC_NODEBUG);
+ wmsum_init(&abd_sums.abdstat_struct_size, 0);
+ wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);
+ wmsum_init(&abd_sums.abdstat_scatter_data_size, 0);
+ wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0);
+ wmsum_init(&abd_sums.abdstat_linear_cnt, 0);
+ wmsum_init(&abd_sums.abdstat_linear_data_size, 0);
+
abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (abd_ksp != NULL) {
abd_ksp->ks_data = &abd_stats;
+ abd_ksp->ks_update = abd_kstats_update;
kstat_install(abd_ksp);
}
@@ -298,6 +337,13 @@ abd_fini(void)
abd_ksp = NULL;
}
+ wmsum_fini(&abd_sums.abdstat_struct_size);
+ wmsum_fini(&abd_sums.abdstat_scatter_cnt);
+ wmsum_fini(&abd_sums.abdstat_scatter_data_size);
+ wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste);
+ wmsum_fini(&abd_sums.abdstat_linear_cnt);
+ wmsum_fini(&abd_sums.abdstat_linear_data_size);
+
kmem_cache_destroy(abd_chunk_cache);
abd_chunk_cache = NULL;
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_debug.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_debug.c
index 7239db80851c..dad342b06fc1 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_debug.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_debug.c
@@ -181,7 +181,7 @@ __set_error(const char *file, const char *func, int line, int err)
* $ echo 512 >/sys/module/zfs/parameters/zfs_flags
*/
if (zfs_flags & ZFS_DEBUG_SET_ERROR)
- __dprintf(B_FALSE, file, func, line, "error %lu", err);
+ __dprintf(B_FALSE, file, func, line, "error %lu", (ulong_t)err);
}
#ifdef _KERNEL
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
index 516d7dd81d18..b05105e0bb22 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
@@ -1066,7 +1066,7 @@ zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
&zfsvfs->z_kstat, zs.zs_num_entries);
dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
"num_entries in unlinked set: %llu",
- zs.zs_num_entries);
+ (u_longlong_t)zs.zs_num_entries);
}
zfs_unlinked_drain(zfsvfs);
@@ -1880,7 +1880,9 @@ zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, int flags, vnode_t **vpp)
gen_mask = -1ULL >> (64 - 8 * i);
- dprintf("getting %llu [%u mask %llx]\n", object, fid_gen, gen_mask);
+ dprintf("getting %llu [%llu mask %llx]\n", (u_longlong_t)object,
+ (u_longlong_t)fid_gen,
+ (u_longlong_t)gen_mask);
if ((err = zfs_zget(zfsvfs, object, &zp))) {
ZFS_EXIT(zfsvfs);
return (err);
@@ -1891,7 +1893,8 @@ zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, int flags, vnode_t **vpp)
if (zp_gen == 0)
zp_gen = 1;
if (zp->z_unlinked || zp_gen != fid_gen) {
- dprintf("znode gen (%u) != fid gen (%u)\n", zp_gen, fid_gen);
+ dprintf("znode gen (%llu) != fid gen (%llu)\n",
+ (u_longlong_t)zp_gen, (u_longlong_t)fid_gen);
vrele(ZTOV(zp));
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c b/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
index 551a3cc8d1db..af543d6e3f7e 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
@@ -132,6 +132,20 @@ static abd_stats_t abd_stats = {
{ "scatter_sg_table_retry", KSTAT_DATA_UINT64 },
};
+struct {
+ wmsum_t abdstat_struct_size;
+ wmsum_t abdstat_linear_cnt;
+ wmsum_t abdstat_linear_data_size;
+ wmsum_t abdstat_scatter_cnt;
+ wmsum_t abdstat_scatter_data_size;
+ wmsum_t abdstat_scatter_chunk_waste;
+ wmsum_t abdstat_scatter_orders[MAX_ORDER];
+ wmsum_t abdstat_scatter_page_multi_chunk;
+ wmsum_t abdstat_scatter_page_multi_zone;
+ wmsum_t abdstat_scatter_page_alloc_retry;
+ wmsum_t abdstat_scatter_sg_table_retry;
+} abd_sums;
+
#define abd_for_each_sg(abd, sg, n, i) \
for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
@@ -687,6 +701,40 @@ abd_free_zero_scatter(void)
#endif /* _KERNEL */
}
+static int
+abd_kstats_update(kstat_t *ksp, int rw)
+{
+ abd_stats_t *as = ksp->ks_data;
+
+ if (rw == KSTAT_WRITE)
+ return (EACCES);
+ as->abdstat_struct_size.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_struct_size);
+ as->abdstat_linear_cnt.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_linear_cnt);
+ as->abdstat_linear_data_size.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_linear_data_size);
+ as->abdstat_scatter_cnt.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_cnt);
+ as->abdstat_scatter_data_size.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_data_size);
+ as->abdstat_scatter_chunk_waste.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_chunk_waste);
+ for (int i = 0; i < MAX_ORDER; i++) {
+ as->abdstat_scatter_orders[i].value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_orders[i]);
+ }
+ as->abdstat_scatter_page_multi_chunk.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_page_multi_chunk);
+ as->abdstat_scatter_page_multi_zone.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_page_multi_zone);
+ as->abdstat_scatter_page_alloc_retry.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_page_alloc_retry);
+ as->abdstat_scatter_sg_table_retry.value.ui64 =
+ wmsum_value(&abd_sums.abdstat_scatter_sg_table_retry);
+ return (0);
+}
+
void
abd_init(void)
{
@@ -695,6 +743,19 @@ abd_init(void)
abd_cache = kmem_cache_create("abd_t", sizeof (abd_t),
0, NULL, NULL, NULL, NULL, NULL, 0);
+ wmsum_init(&abd_sums.abdstat_struct_size, 0);
+ wmsum_init(&abd_sums.abdstat_linear_cnt, 0);
+ wmsum_init(&abd_sums.abdstat_linear_data_size, 0);
+ wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);
+ wmsum_init(&abd_sums.abdstat_scatter_data_size, 0);
+ wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0);
+ for (i = 0; i < MAX_ORDER; i++)
+ wmsum_init(&abd_sums.abdstat_scatter_orders[i], 0);
+ wmsum_init(&abd_sums.abdstat_scatter_page_multi_chunk, 0);
+ wmsum_init(&abd_sums.abdstat_scatter_page_multi_zone, 0);
+ wmsum_init(&abd_sums.abdstat_scatter_page_alloc_retry, 0);
+ wmsum_init(&abd_sums.abdstat_scatter_sg_table_retry, 0);
+
abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (abd_ksp != NULL) {
@@ -705,6 +766,7 @@ abd_init(void)
KSTAT_DATA_UINT64;
}
abd_ksp->ks_data = &abd_stats;
+ abd_ksp->ks_update = abd_kstats_update;
kstat_install(abd_ksp);
}
@@ -721,6 +783,19 @@ abd_fini(void)
abd_ksp = NULL;
}
+ wmsum_fini(&abd_sums.abdstat_struct_size);
+ wmsum_fini(&abd_sums.abdstat_linear_cnt);
+ wmsum_fini(&abd_sums.abdstat_linear_data_size);
+ wmsum_fini(&abd_sums.abdstat_scatter_cnt);
+ wmsum_fini(&abd_sums.abdstat_scatter_data_size);
+ wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste);
+ for (int i = 0; i < MAX_ORDER; i++)
+ wmsum_fini(&abd_sums.abdstat_scatter_orders[i]);
+ wmsum_fini(&abd_sums.abdstat_scatter_page_multi_chunk);
+ wmsum_fini(&abd_sums.abdstat_scatter_page_multi_zone);
+ wmsum_fini(&abd_sums.abdstat_scatter_page_alloc_retry);
+ wmsum_fini(&abd_sums.abdstat_scatter_sg_table_retry);
+
if (abd_cache) {
kmem_cache_destroy(abd_cache);
abd_cache = NULL;
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c b/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c
index 465775a6748e..b03ad8318d1d 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/arc_os.c
@@ -135,7 +135,7 @@ arc_available_memory(void)
static uint64_t
arc_evictable_memory(void)
{
- int64_t asize = aggsum_value(&arc_size);
+ int64_t asize = aggsum_value(&arc_sums.arcstat_size);
uint64_t arc_clean =
zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) +
zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) +
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_debug.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_debug.c
index 8d7f04097da8..98c9923d5927 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_debug.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_debug.c
@@ -127,7 +127,8 @@ __set_error(const char *file, const char *func, int line, int err)
* $ echo 512 >/sys/module/zfs/parameters/zfs_flags
*/
if (zfs_flags & ZFS_DEBUG_SET_ERROR)
- __dprintf(B_FALSE, file, func, line, "error %lu", err);
+ __dprintf(B_FALSE, file, func, line, "error %lu",
+ (ulong_t)err);
}
void
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c b/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c
index 94406999cb89..52e62f4d1da4 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c
@@ -190,7 +190,7 @@ unsigned long zfs_key_max_salt_uses = ZFS_KEY_MAX_SALT_USES_DEFAULT;
typedef struct blkptr_auth_buf {
uint64_t bab_prop; /* blk_prop - portable mask */
- uint8_t bab_mac[ZIO_DATA_MAC_LEN]; /* MAC from blk_cksum */
+ uint8_t bab_mac[ZIO_DATA_MAC_LEN]; /* MAC from blk_cksum */
uint64_t bab_pad; /* reserved for future use */
} blkptr_auth_buf_t;
@@ -1045,17 +1045,23 @@ zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx, uint64_t version,
boolean_t should_bswap, dnode_phys_t *dnp)
{
int ret, i;
- dnode_phys_t *adnp;
+ dnode_phys_t *adnp, tmp_dncore;
+ size_t dn_core_size = offsetof(dnode_phys_t, dn_blkptr);
boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER);
crypto_data_t cd;
- uint8_t tmp_dncore[offsetof(dnode_phys_t, dn_blkptr)];
cd.cd_format = CRYPTO_DATA_RAW;
cd.cd_offset = 0;
- /* authenticate the core dnode (masking out non-portable bits) */
- bcopy(dnp, tmp_dncore, sizeof (tmp_dncore));
- adnp = (dnode_phys_t *)tmp_dncore;
+ /*
+ * Authenticate the core dnode (masking out non-portable bits).
+ * We only copy the first 64 bytes we operate on to avoid the overhead
+ * of copying 512-64 unneeded bytes. The compiler seems to be fine
+ * with that.
+ */
+ bcopy(dnp, &tmp_dncore, dn_core_size);
+ adnp = &tmp_dncore;
+
if (le_bswap) {
adnp->dn_datablkszsec = BSWAP_16(adnp->dn_datablkszsec);
adnp->dn_bonuslen = BSWAP_16(adnp->dn_bonuslen);
@@ -1065,7 +1071,7 @@ zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx, uint64_t version,
adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
adnp->dn_used = 0;
- cd.cd_length = sizeof (tmp_dncore);
+ cd.cd_length = dn_core_size;
cd.cd_raw.iov_base = (char *)adnp;
cd.cd_raw.iov_len = cd.cd_length;
diff --git a/sys/contrib/openzfs/module/zfs/arc.c b/sys/contrib/openzfs/module/zfs/arc.c
index 300acc251fb7..7d892f4c7b9d 100644
--- a/sys/contrib/openzfs/module/zfs/arc.c
+++ b/sys/contrib/openzfs/module/zfs/arc.c
@@ -600,6 +600,8 @@ arc_stats_t arc_stats = {
{ "abd_chunk_waste_size", KSTAT_DATA_UINT64 },
};
+arc_sums_t arc_sums;
+
#define ARCSTAT_MAX(stat, val) { \
uint64_t m; \
while ((val) > (m = arc_stats.stat.value.ui64) && \
@@ -607,9 +609,6 @@ arc_stats_t arc_stats = {
continue; \
}
-#define ARCSTAT_MAXSTAT(stat) \
- ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
-
/*
* We define a macro to allow ARC hits/misses to be easily broken down by
* two separate conditions, giving a total of four different subtypes for
@@ -671,37 +670,8 @@ arc_state_t *arc_mfu;
/* max size for dnodes */
#define arc_dnode_size_limit ARCSTAT(arcstat_dnode_limit)
#define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */
-#define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */
#define arc_need_free ARCSTAT(arcstat_need_free) /* waiting to be evicted */
-/* size of all b_rabd's in entire arc */
-#define arc_raw_size ARCSTAT(arcstat_raw_size)
-/* compressed size of entire arc */
-#define arc_compressed_size ARCSTAT(arcstat_compressed_size)
-/* uncompressed size of entire arc */
-#define arc_uncompressed_size ARCSTAT(arcstat_uncompressed_size)
-/* number of bytes in the arc from arc_buf_t's */
-#define arc_overhead_size ARCSTAT(arcstat_overhead_size)
-
-/*
- * There are also some ARC variables that we want to export, but that are
- * updated so often that having the canonical representation be the statistic
- * variable causes a performance bottleneck. We want to use aggsum_t's for these
- * instead, but still be able to export the kstat in the same way as before.
- * The solution is to always use the aggsum version, except in the kstat update
- * callback.
- */
-aggsum_t arc_size;
-aggsum_t arc_meta_used;
-wmsum_t astat_data_size;
-wmsum_t astat_metadata_size;
-wmsum_t astat_dbuf_size;
-aggsum_t astat_dnode_size;
-wmsum_t astat_bonus_size;
-wmsum_t astat_hdr_size;
-aggsum_t astat_l2_hdr_size;
-wmsum_t astat_abd_chunk_waste_size;
-
hrtime_t arc_growtime;
list_t arc_prune_list;
kmutex_t arc_prune_mtx;
@@ -819,9 +789,6 @@ uint64_t zfs_crc64_table[256];
*/
#define L2ARC_FEED_TYPES 4
-#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
-#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
-
/* L2ARC Performance Tunables */
unsigned long l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */
unsigned long l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */
@@ -1085,9 +1052,9 @@ buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
ARCSTAT_MAX(arcstat_hash_chain_max, i);
}
-
- ARCSTAT_BUMP(arcstat_hash_elements);
- ARCSTAT_MAXSTAT(arcstat_hash_elements);
+ uint64_t he = atomic_inc_64_nv(
+ &arc_stats.arcstat_hash_elements.value.ui64);
+ ARCSTAT_MAX(arcstat_hash_elements_max, he);
return (NULL);
}
@@ -1111,7 +1078,7 @@ buf_hash_remove(arc_buf_hdr_t *hdr)
arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
- ARCSTAT_BUMPDOWN(arcstat_hash_elements);
+ atomic_dec_64(&arc_stats.arcstat_hash_elements.value.ui64);
if (buf_hash_table.ht_table[idx] &&
buf_hash_table.ht_table[idx]->b_hash_next == NULL)
@@ -2646,25 +2613,25 @@ arc_space_consume(uint64_t space, arc_space_type_t type)
default:
break;
case ARC_SPACE_DATA:
- wmsum_add(&astat_data_size, space);
+ ARCSTAT_INCR(arcstat_data_size, space);
break;
case ARC_SPACE_META:
- wmsum_add(&astat_metadata_size, space);
+ ARCSTAT_INCR(arcstat_metadata_size, space);
break;
case ARC_SPACE_BONUS:
- wmsum_add(&astat_bonus_size, space);
+ ARCSTAT_INCR(arcstat_bonus_size, space);
break;
case ARC_SPACE_DNODE:
- aggsum_add(&astat_dnode_size, space);
+ aggsum_add(&arc_sums.arcstat_dnode_size, space);
break;
case ARC_SPACE_DBUF:
- wmsum_add(&astat_dbuf_size, space);
+ ARCSTAT_INCR(arcstat_dbuf_size, space);
break;
case ARC_SPACE_HDRS:
- wmsum_add(&astat_hdr_size, space);
+ ARCSTAT_INCR(arcstat_hdr_size, space);
break;
case ARC_SPACE_L2HDRS:
- aggsum_add(&astat_l2_hdr_size, space);
+ aggsum_add(&arc_sums.arcstat_l2_hdr_size, space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
/*
@@ -2673,14 +2640,14 @@ arc_space_consume(uint64_t space, arc_space_type_t type)
* scatter ABD's come from the ARC, because other users are
* very short-lived.
*/
- wmsum_add(&astat_abd_chunk_waste_size, space);
+ ARCSTAT_INCR(arcstat_abd_chunk_waste_size, space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE)
- aggsum_add(&arc_meta_used, space);
+ aggsum_add(&arc_sums.arcstat_meta_used, space);
- aggsum_add(&arc_size, space);
+ aggsum_add(&arc_sums.arcstat_size, space);
}
void
@@ -2692,45 +2659,41 @@ arc_space_return(uint64_t space, arc_space_type_t type)
default:
break;
case ARC_SPACE_DATA:
- wmsum_add(&astat_data_size, -space);
+ ARCSTAT_INCR(arcstat_data_size, -space);
break;
case ARC_SPACE_META:
- wmsum_add(&astat_metadata_size, -space);
+ ARCSTAT_INCR(arcstat_metadata_size, -space);
break;
case ARC_SPACE_BONUS:
- wmsum_add(&astat_bonus_size, -space);
+ ARCSTAT_INCR(arcstat_bonus_size, -space);
break;
case ARC_SPACE_DNODE:
- aggsum_add(&astat_dnode_size, -space);
+ aggsum_add(&arc_sums.arcstat_dnode_size, -space);
break;
case ARC_SPACE_DBUF:
- wmsum_add(&astat_dbuf_size, -space);
+ ARCSTAT_INCR(arcstat_dbuf_size, -space);
break;
case ARC_SPACE_HDRS:
- wmsum_add(&astat_hdr_size, -space);
+ ARCSTAT_INCR(arcstat_hdr_size, -space);
break;
case ARC_SPACE_L2HDRS:
- aggsum_add(&astat_l2_hdr_size, -space);
+ aggsum_add(&arc_sums.arcstat_l2_hdr_size, -space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
- wmsum_add(&astat_abd_chunk_waste_size, -space);
+ ARCSTAT_INCR(arcstat_abd_chunk_waste_size, -space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE) {
- ASSERT(aggsum_compare(&arc_meta_used, space) >= 0);
- /*
- * We use the upper bound here rather than the precise value
- * because the arc_meta_max value doesn't need to be
- * precise. It's only consumed by humans via arcstats.
- */
- if (arc_meta_max < aggsum_upper_bound(&arc_meta_used))
- arc_meta_max = aggsum_upper_bound(&arc_meta_used);
- aggsum_add(&arc_meta_used, -space);
+ ASSERT(aggsum_compare(&arc_sums.arcstat_meta_used,
+ space) >= 0);
+ ARCSTAT_MAX(arcstat_meta_max,
+ aggsum_upper_bound(&arc_sums.arcstat_meta_used));
+ aggsum_add(&arc_sums.arcstat_meta_used, -space);
}
- ASSERT(aggsum_compare(&arc_size, space) >= 0);
- aggsum_add(&arc_size, -space);
+ ASSERT(aggsum_compare(&arc_sums.arcstat_size, space) >= 0);
+ aggsum_add(&arc_sums.arcstat_size, -space);
}
/*
@@ -4251,9 +4214,10 @@ arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes,
* Request that 10% of the LRUs be scanned by the superblock
* shrinker.
*/
- if (type == ARC_BUFC_DATA && aggsum_compare(&astat_dnode_size,
- arc_dnode_size_limit) > 0) {
- arc_prune_async((aggsum_upper_bound(&astat_dnode_size) -
+ if (type == ARC_BUFC_DATA && aggsum_compare(
+ &arc_sums.arcstat_dnode_size, arc_dnode_size_limit) > 0) {
+ arc_prune_async((aggsum_upper_bound(
+ &arc_sums.arcstat_dnode_size) -
arc_dnode_size_limit) / sizeof (dnode_t) /
zfs_arc_dnode_reduce_percent);
}
@@ -4483,7 +4447,7 @@ restart:
}
/*
- * Evict metadata buffers from the cache, such that arc_meta_used is
+ * Evict metadata buffers from the cache, such that arcstat_meta_used is
* capped by the arc_meta_limit tunable.
*/
static uint64_t
@@ -4604,7 +4568,7 @@ arc_evict_type(arc_state_t *state)
}
/*
- * Evict buffers from the cache, such that arc_size is capped by arc_c.
+ * Evict buffers from the cache, such that arcstat_size is capped by arc_c.
*/
static uint64_t
arc_evict(void)
@@ -4612,8 +4576,8 @@ arc_evict(void)
uint64_t total_evicted = 0;
uint64_t bytes;
int64_t target;
- uint64_t asize = aggsum_value(&arc_size);
- uint64_t ameta = aggsum_value(&arc_meta_used);
+ uint64_t asize = aggsum_value(&arc_sums.arcstat_size);
+ uint64_t ameta = aggsum_value(&arc_sums.arcstat_meta_used);
/*
* If we're over arc_meta_limit, we want to correct that before
@@ -4673,8 +4637,8 @@ arc_evict(void)
/*
* Re-sum ARC stats after the first round of evictions.
*/
- asize = aggsum_value(&arc_size);
- ameta = aggsum_value(&arc_meta_used);
+ asize = aggsum_value(&arc_sums.arcstat_size);
+ ameta = aggsum_value(&arc_sums.arcstat_meta_used);
/*
@@ -4788,7 +4752,7 @@ arc_flush(spa_t *spa, boolean_t retry)
void
arc_reduce_target_size(int64_t to_free)
{
- uint64_t asize = aggsum_value(&arc_size);
+ uint64_t asize = aggsum_value(&arc_sums.arcstat_size);
/*
* All callers want the ARC to actually evict (at least) this much
@@ -4841,8 +4805,8 @@ arc_kmem_reap_soon(void)
extern kmem_cache_t *zio_data_buf_cache[];
#ifdef _KERNEL
- if ((aggsum_compare(&arc_meta_used, arc_meta_limit) >= 0) &&
- zfs_arc_meta_prune) {
+ if ((aggsum_compare(&arc_sums.arcstat_meta_used,
+ arc_meta_limit) >= 0) && zfs_arc_meta_prune) {
/*
* We are exceeding our meta-data cache limit.
* Prune some entries to release holds on meta-data.
@@ -4945,7 +4909,7 @@ arc_evict_cb(void *arg, zthr_t *zthr)
*/
mutex_enter(&arc_evict_lock);
arc_evict_needed = !zthr_iscancelled(arc_evict_zthr) &&
- evicted > 0 && aggsum_compare(&arc_size, arc_c) > 0;
+ evicted > 0 && aggsum_compare(&arc_sums.arcstat_size, arc_c) > 0;
if (!arc_evict_needed) {
/*
* We're either no longer overflowing, or we
@@ -5158,7 +5122,7 @@ arc_adapt(int bytes, arc_state_t *state)
* cache size, increment the target cache size
*/
ASSERT3U(arc_c, >=, 2ULL << SPA_MAXBLOCKSHIFT);
- if (aggsum_upper_bound(&arc_size) >=
+ if (aggsum_upper_bound(&arc_sums.arcstat_size) >=
arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
atomic_add_64(&arc_c, (int64_t)bytes);
if (arc_c > arc_c_max)
@@ -5191,7 +5155,8 @@ arc_is_overflowing(void)
* in the ARC. In practice, that's in the tens of MB, which is low
* enough to be safe.
*/
- return (aggsum_lower_bound(&arc_size) >= (int64_t)arc_c + overflow);
+ return (aggsum_lower_bound(&arc_sums.arcstat_size) >=
+ (int64_t)arc_c + overflow);
}
static abd_t *
@@ -5360,7 +5325,7 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag,
* If we are growing the cache, and we are adding anonymous
* data, and we have outgrown arc_p, update arc_p
*/
- if (aggsum_upper_bound(&arc_size) < arc_c &&
+ if (aggsum_upper_bound(&arc_sums.arcstat_size) < arc_c &&
hdr->b_l1hdr.b_state == arc_anon &&
(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) > arc_p))
@@ -7219,8 +7184,11 @@ arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg)
zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
"anon_data=%lluK tempreserve=%lluK rarc_c=%lluK\n",
- arc_tempreserve >> 10, meta_esize >> 10,
- data_esize >> 10, reserve >> 10, rarc_c >> 10);
+ (u_longlong_t)arc_tempreserve >> 10,
+ (u_longlong_t)meta_esize >> 10,
+ (u_longlong_t)data_esize >> 10,
+ (u_longlong_t)reserve >> 10,
+ (u_longlong_t)rarc_c >> 10);
#endif
DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle);
return (SET_ERROR(ERESTART));
@@ -7245,55 +7213,219 @@ arc_kstat_update(kstat_t *ksp, int rw)
{
arc_stats_t *as = ksp->ks_data;
- if (rw == KSTAT_WRITE) {
+ if (rw == KSTAT_WRITE)
return (SET_ERROR(EACCES));
- } else {
- arc_kstat_update_state(arc_anon,
- &as->arcstat_anon_size,
- &as->arcstat_anon_evictable_data,
- &as->arcstat_anon_evictable_metadata);
- arc_kstat_update_state(arc_mru,
- &as->arcstat_mru_size,
- &as->arcstat_mru_evictable_data,
- &as->arcstat_mru_evictable_metadata);
- arc_kstat_update_state(arc_mru_ghost,
- &as->arcstat_mru_ghost_size,
- &as->arcstat_mru_ghost_evictable_data,
- &as->arcstat_mru_ghost_evictable_metadata);
- arc_kstat_update_state(arc_mfu,
- &as->arcstat_mfu_size,
- &as->arcstat_mfu_evictable_data,
- &as->arcstat_mfu_evictable_metadata);
- arc_kstat_update_state(arc_mfu_ghost,
- &as->arcstat_mfu_ghost_size,
- &as->arcstat_mfu_ghost_evictable_data,
- &as->arcstat_mfu_ghost_evictable_metadata);
-
- ARCSTAT(arcstat_size) = aggsum_value(&arc_size);
- ARCSTAT(arcstat_meta_used) = aggsum_value(&arc_meta_used);
- ARCSTAT(arcstat_data_size) = wmsum_value(&astat_data_size);
- ARCSTAT(arcstat_metadata_size) =
- wmsum_value(&astat_metadata_size);
- ARCSTAT(arcstat_hdr_size) = wmsum_value(&astat_hdr_size);
- ARCSTAT(arcstat_l2_hdr_size) = aggsum_value(&astat_l2_hdr_size);
- ARCSTAT(arcstat_dbuf_size) = wmsum_value(&astat_dbuf_size);
+
+ as->arcstat_hits.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_hits);
+ as->arcstat_misses.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_misses);
+ as->arcstat_demand_data_hits.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_demand_data_hits);
+ as->arcstat_demand_data_misses.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_demand_data_misses);
+ as->arcstat_demand_metadata_hits.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_demand_metadata_hits);
+ as->arcstat_demand_metadata_misses.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_demand_metadata_misses);
+ as->arcstat_prefetch_data_hits.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_prefetch_data_hits);
+ as->arcstat_prefetch_data_misses.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_prefetch_data_misses);
+ as->arcstat_prefetch_metadata_hits.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_prefetch_metadata_hits);
+ as->arcstat_prefetch_metadata_misses.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_prefetch_metadata_misses);
+ as->arcstat_mru_hits.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_mru_hits);
+ as->arcstat_mru_ghost_hits.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_mru_ghost_hits);
+ as->arcstat_mfu_hits.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_mfu_hits);
+ as->arcstat_mfu_ghost_hits.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_mfu_ghost_hits);
+ as->arcstat_deleted.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_deleted);
+ as->arcstat_mutex_miss.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_mutex_miss);
+ as->arcstat_access_skip.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_access_skip);
+ as->arcstat_evict_skip.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_evict_skip);
+ as->arcstat_evict_not_enough.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_evict_not_enough);
+ as->arcstat_evict_l2_cached.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_evict_l2_cached);
+ as->arcstat_evict_l2_eligible.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_evict_l2_eligible);
+ as->arcstat_evict_l2_eligible_mfu.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mfu);
+ as->arcstat_evict_l2_eligible_mru.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mru);
+ as->arcstat_evict_l2_ineligible.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_evict_l2_ineligible);
+ as->arcstat_evict_l2_skip.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_evict_l2_skip);
+ as->arcstat_hash_collisions.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_hash_collisions);
+ as->arcstat_hash_chains.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_hash_chains);
+ as->arcstat_size.value.ui64 =
+ aggsum_value(&arc_sums.arcstat_size);
+ as->arcstat_compressed_size.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_compressed_size);
+ as->arcstat_uncompressed_size.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_uncompressed_size);
+ as->arcstat_overhead_size.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_overhead_size);
+ as->arcstat_hdr_size.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_hdr_size);
+ as->arcstat_data_size.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_data_size);
+ as->arcstat_metadata_size.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_metadata_size);
+ as->arcstat_dbuf_size.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_dbuf_size);
#if defined(COMPAT_FREEBSD11)
- ARCSTAT(arcstat_other_size) = wmsum_value(&astat_bonus_size) +
- aggsum_value(&astat_dnode_size) +
- wmsum_value(&astat_dbuf_size);
+ as->arcstat_other_size.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_bonus_size) +
+ aggsum_value(&arc_sums.arcstat_dnode_size) +
+ wmsum_value(&arc_sums.arcstat_dbuf_size);
#endif
- ARCSTAT(arcstat_dnode_size) = aggsum_value(&astat_dnode_size);
- ARCSTAT(arcstat_bonus_size) = wmsum_value(&astat_bonus_size);
- ARCSTAT(arcstat_abd_chunk_waste_size) =
- wmsum_value(&astat_abd_chunk_waste_size);
- as->arcstat_memory_all_bytes.value.ui64 =
- arc_all_memory();
- as->arcstat_memory_free_bytes.value.ui64 =
- arc_free_memory();
- as->arcstat_memory_available_bytes.value.i64 =
- arc_available_memory();
- }
+ arc_kstat_update_state(arc_anon,
+ &as->arcstat_anon_size,
+ &as->arcstat_anon_evictable_data,
+ &as->arcstat_anon_evictable_metadata);
+ arc_kstat_update_state(arc_mru,
+ &as->arcstat_mru_size,
+ &as->arcstat_mru_evictable_data,
+ &as->arcstat_mru_evictable_metadata);
+ arc_kstat_update_state(arc_mru_ghost,
+ &as->arcstat_mru_ghost_size,
+ &as->arcstat_mru_ghost_evictable_data,
+ &as->arcstat_mru_ghost_evictable_metadata);
+ arc_kstat_update_state(arc_mfu,
+ &as->arcstat_mfu_size,
+ &as->arcstat_mfu_evictable_data,
+ &as->arcstat_mfu_evictable_metadata);
+ arc_kstat_update_state(arc_mfu_ghost,
+ &as->arcstat_mfu_ghost_size,
+ &as->arcstat_mfu_ghost_evictable_data,
+ &as->arcstat_mfu_ghost_evictable_metadata);
+
+ as->arcstat_dnode_size.value.ui64 =
+ aggsum_value(&arc_sums.arcstat_dnode_size);
+ as->arcstat_bonus_size.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_bonus_size);
+ as->arcstat_l2_hits.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_hits);
+ as->arcstat_l2_misses.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_misses);
+ as->arcstat_l2_prefetch_asize.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_prefetch_asize);
+ as->arcstat_l2_mru_asize.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_mru_asize);
+ as->arcstat_l2_mfu_asize.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_mfu_asize);
+ as->arcstat_l2_bufc_data_asize.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_bufc_data_asize);
+ as->arcstat_l2_bufc_metadata_asize.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_bufc_metadata_asize);
+ as->arcstat_l2_feeds.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_feeds);
+ as->arcstat_l2_rw_clash.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_rw_clash);
+ as->arcstat_l2_read_bytes.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_read_bytes);
+ as->arcstat_l2_write_bytes.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_write_bytes);
+ as->arcstat_l2_writes_sent.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_writes_sent);
+ as->arcstat_l2_writes_done.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_writes_done);
+ as->arcstat_l2_writes_error.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_writes_error);
+ as->arcstat_l2_writes_lock_retry.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_writes_lock_retry);
+ as->arcstat_l2_evict_lock_retry.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_evict_lock_retry);
+ as->arcstat_l2_evict_reading.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_evict_reading);
+ as->arcstat_l2_evict_l1cached.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_evict_l1cached);
+ as->arcstat_l2_free_on_write.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_free_on_write);
+ as->arcstat_l2_abort_lowmem.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_abort_lowmem);
+ as->arcstat_l2_cksum_bad.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_cksum_bad);
+ as->arcstat_l2_io_error.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_io_error);
+ as->arcstat_l2_lsize.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_lsize);
+ as->arcstat_l2_psize.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_psize);
+ as->arcstat_l2_hdr_size.value.ui64 =
+ aggsum_value(&arc_sums.arcstat_l2_hdr_size);
+ as->arcstat_l2_log_blk_writes.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_log_blk_writes);
+ as->arcstat_l2_log_blk_asize.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_log_blk_asize);
+ as->arcstat_l2_log_blk_count.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_log_blk_count);
+ as->arcstat_l2_rebuild_success.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_rebuild_success);
+ as->arcstat_l2_rebuild_abort_unsupported.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
+ as->arcstat_l2_rebuild_abort_io_errors.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
+ as->arcstat_l2_rebuild_abort_dh_errors.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
+ as->arcstat_l2_rebuild_abort_cksum_lb_errors.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
+ as->arcstat_l2_rebuild_abort_lowmem.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
+ as->arcstat_l2_rebuild_size.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_rebuild_size);
+ as->arcstat_l2_rebuild_asize.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_rebuild_asize);
+ as->arcstat_l2_rebuild_bufs.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs);
+ as->arcstat_l2_rebuild_bufs_precached.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs_precached);
+ as->arcstat_l2_rebuild_log_blks.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_l2_rebuild_log_blks);
+ as->arcstat_memory_throttle_count.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_memory_throttle_count);
+ as->arcstat_memory_direct_count.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_memory_direct_count);
+ as->arcstat_memory_indirect_count.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_memory_indirect_count);
+
+ as->arcstat_memory_all_bytes.value.ui64 =
+ arc_all_memory();
+ as->arcstat_memory_free_bytes.value.ui64 =
+ arc_free_memory();
+ as->arcstat_memory_available_bytes.value.i64 =
+ arc_available_memory();
+
+ as->arcstat_prune.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_prune);
+ as->arcstat_meta_used.value.ui64 =
+ aggsum_value(&arc_sums.arcstat_meta_used);
+ as->arcstat_async_upgrade_sync.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_async_upgrade_sync);
+ as->arcstat_demand_hit_predictive_prefetch.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_demand_hit_predictive_prefetch);
+ as->arcstat_demand_hit_prescient_prefetch.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_demand_hit_prescient_prefetch);
+ as->arcstat_raw_size.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_raw_size);
+ as->arcstat_cached_only_in_progress.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_cached_only_in_progress);
+ as->arcstat_abd_chunk_waste_size.value.ui64 =
+ wmsum_value(&arc_sums.arcstat_abd_chunk_waste_size);
return (0);
}
@@ -7516,16 +7648,93 @@ arc_state_init(void)
zfs_refcount_create(&arc_mfu_ghost->arcs_size);
zfs_refcount_create(&arc_l2c_only->arcs_size);
- aggsum_init(&arc_meta_used, 0);
- aggsum_init(&arc_size, 0);
- wmsum_init(&astat_data_size, 0);
- wmsum_init(&astat_metadata_size, 0);
- wmsum_init(&astat_hdr_size, 0);
- aggsum_init(&astat_l2_hdr_size, 0);
- wmsum_init(&astat_bonus_size, 0);
- aggsum_init(&astat_dnode_size, 0);
- wmsum_init(&astat_dbuf_size, 0);
- wmsum_init(&astat_abd_chunk_waste_size, 0);
+ wmsum_init(&arc_sums.arcstat_hits, 0);
+ wmsum_init(&arc_sums.arcstat_misses, 0);
+ wmsum_init(&arc_sums.arcstat_demand_data_hits, 0);
+ wmsum_init(&arc_sums.arcstat_demand_data_misses, 0);
+ wmsum_init(&arc_sums.arcstat_demand_metadata_hits, 0);
+ wmsum_init(&arc_sums.arcstat_demand_metadata_misses, 0);
+ wmsum_init(&arc_sums.arcstat_prefetch_data_hits, 0);
+ wmsum_init(&arc_sums.arcstat_prefetch_data_misses, 0);
+ wmsum_init(&arc_sums.arcstat_prefetch_metadata_hits, 0);
+ wmsum_init(&arc_sums.arcstat_prefetch_metadata_misses, 0);
+ wmsum_init(&arc_sums.arcstat_mru_hits, 0);
+ wmsum_init(&arc_sums.arcstat_mru_ghost_hits, 0);
+ wmsum_init(&arc_sums.arcstat_mfu_hits, 0);
+ wmsum_init(&arc_sums.arcstat_mfu_ghost_hits, 0);
+ wmsum_init(&arc_sums.arcstat_deleted, 0);
+ wmsum_init(&arc_sums.arcstat_mutex_miss, 0);
+ wmsum_init(&arc_sums.arcstat_access_skip, 0);
+ wmsum_init(&arc_sums.arcstat_evict_skip, 0);
+ wmsum_init(&arc_sums.arcstat_evict_not_enough, 0);
+ wmsum_init(&arc_sums.arcstat_evict_l2_cached, 0);
+ wmsum_init(&arc_sums.arcstat_evict_l2_eligible, 0);
+ wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mfu, 0);
+ wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mru, 0);
+ wmsum_init(&arc_sums.arcstat_evict_l2_ineligible, 0);
+ wmsum_init(&arc_sums.arcstat_evict_l2_skip, 0);
+ wmsum_init(&arc_sums.arcstat_hash_collisions, 0);
+ wmsum_init(&arc_sums.arcstat_hash_chains, 0);
+ aggsum_init(&arc_sums.arcstat_size, 0);
+ wmsum_init(&arc_sums.arcstat_compressed_size, 0);
+ wmsum_init(&arc_sums.arcstat_uncompressed_size, 0);
+ wmsum_init(&arc_sums.arcstat_overhead_size, 0);
+ wmsum_init(&arc_sums.arcstat_hdr_size, 0);
+ wmsum_init(&arc_sums.arcstat_data_size, 0);
+ wmsum_init(&arc_sums.arcstat_metadata_size, 0);
+ wmsum_init(&arc_sums.arcstat_dbuf_size, 0);
+ aggsum_init(&arc_sums.arcstat_dnode_size, 0);
+ wmsum_init(&arc_sums.arcstat_bonus_size, 0);
+ wmsum_init(&arc_sums.arcstat_l2_hits, 0);
+ wmsum_init(&arc_sums.arcstat_l2_misses, 0);
+ wmsum_init(&arc_sums.arcstat_l2_prefetch_asize, 0);
+ wmsum_init(&arc_sums.arcstat_l2_mru_asize, 0);
+ wmsum_init(&arc_sums.arcstat_l2_mfu_asize, 0);
+ wmsum_init(&arc_sums.arcstat_l2_bufc_data_asize, 0);
+ wmsum_init(&arc_sums.arcstat_l2_bufc_metadata_asize, 0);
+ wmsum_init(&arc_sums.arcstat_l2_feeds, 0);
+ wmsum_init(&arc_sums.arcstat_l2_rw_clash, 0);
+ wmsum_init(&arc_sums.arcstat_l2_read_bytes, 0);
+ wmsum_init(&arc_sums.arcstat_l2_write_bytes, 0);
+ wmsum_init(&arc_sums.arcstat_l2_writes_sent, 0);
+ wmsum_init(&arc_sums.arcstat_l2_writes_done, 0);
+ wmsum_init(&arc_sums.arcstat_l2_writes_error, 0);
+ wmsum_init(&arc_sums.arcstat_l2_writes_lock_retry, 0);
+ wmsum_init(&arc_sums.arcstat_l2_evict_lock_retry, 0);
+ wmsum_init(&arc_sums.arcstat_l2_evict_reading, 0);
+ wmsum_init(&arc_sums.arcstat_l2_evict_l1cached, 0);
+ wmsum_init(&arc_sums.arcstat_l2_free_on_write, 0);
+ wmsum_init(&arc_sums.arcstat_l2_abort_lowmem, 0);
+ wmsum_init(&arc_sums.arcstat_l2_cksum_bad, 0);
+ wmsum_init(&arc_sums.arcstat_l2_io_error, 0);
+ wmsum_init(&arc_sums.arcstat_l2_lsize, 0);
+ wmsum_init(&arc_sums.arcstat_l2_psize, 0);
+ aggsum_init(&arc_sums.arcstat_l2_hdr_size, 0);
+ wmsum_init(&arc_sums.arcstat_l2_log_blk_writes, 0);
+ wmsum_init(&arc_sums.arcstat_l2_log_blk_asize, 0);
+ wmsum_init(&arc_sums.arcstat_l2_log_blk_count, 0);
+ wmsum_init(&arc_sums.arcstat_l2_rebuild_success, 0);
+ wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_unsupported, 0);
+ wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_io_errors, 0);
+ wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_dh_errors, 0);
+ wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors, 0);
+ wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_lowmem, 0);
+ wmsum_init(&arc_sums.arcstat_l2_rebuild_size, 0);
+ wmsum_init(&arc_sums.arcstat_l2_rebuild_asize, 0);
+ wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs, 0);
+ wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs_precached, 0);
+ wmsum_init(&arc_sums.arcstat_l2_rebuild_log_blks, 0);
+ wmsum_init(&arc_sums.arcstat_memory_throttle_count, 0);
+ wmsum_init(&arc_sums.arcstat_memory_direct_count, 0);
+ wmsum_init(&arc_sums.arcstat_memory_indirect_count, 0);
+ wmsum_init(&arc_sums.arcstat_prune, 0);
+ aggsum_init(&arc_sums.arcstat_meta_used, 0);
+ wmsum_init(&arc_sums.arcstat_async_upgrade_sync, 0);
+ wmsum_init(&arc_sums.arcstat_demand_hit_predictive_prefetch, 0);
+ wmsum_init(&arc_sums.arcstat_demand_hit_prescient_prefetch, 0);
+ wmsum_init(&arc_sums.arcstat_raw_size, 0);
+ wmsum_init(&arc_sums.arcstat_cached_only_in_progress, 0);
+ wmsum_init(&arc_sums.arcstat_abd_chunk_waste_size, 0);
arc_anon->arcs_state = ARC_STATE_ANON;
arc_mru->arcs_state = ARC_STATE_MRU;
@@ -7569,16 +7778,93 @@ arc_state_fini(void)
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]);
- aggsum_fini(&arc_meta_used);
- aggsum_fini(&arc_size);
- wmsum_fini(&astat_data_size);
- wmsum_fini(&astat_metadata_size);
- wmsum_fini(&astat_hdr_size);
- aggsum_fini(&astat_l2_hdr_size);
- wmsum_fini(&astat_bonus_size);
- aggsum_fini(&astat_dnode_size);
- wmsum_fini(&astat_dbuf_size);
- wmsum_fini(&astat_abd_chunk_waste_size);
+ wmsum_fini(&arc_sums.arcstat_hits);
+ wmsum_fini(&arc_sums.arcstat_misses);
+ wmsum_fini(&arc_sums.arcstat_demand_data_hits);
+ wmsum_fini(&arc_sums.arcstat_demand_data_misses);
+ wmsum_fini(&arc_sums.arcstat_demand_metadata_hits);
+ wmsum_fini(&arc_sums.arcstat_demand_metadata_misses);
+ wmsum_fini(&arc_sums.arcstat_prefetch_data_hits);
+ wmsum_fini(&arc_sums.arcstat_prefetch_data_misses);
+ wmsum_fini(&arc_sums.arcstat_prefetch_metadata_hits);
+ wmsum_fini(&arc_sums.arcstat_prefetch_metadata_misses);
+ wmsum_fini(&arc_sums.arcstat_mru_hits);
+ wmsum_fini(&arc_sums.arcstat_mru_ghost_hits);
+ wmsum_fini(&arc_sums.arcstat_mfu_hits);
+ wmsum_fini(&arc_sums.arcstat_mfu_ghost_hits);
+ wmsum_fini(&arc_sums.arcstat_deleted);
+ wmsum_fini(&arc_sums.arcstat_mutex_miss);
+ wmsum_fini(&arc_sums.arcstat_access_skip);
+ wmsum_fini(&arc_sums.arcstat_evict_skip);
+ wmsum_fini(&arc_sums.arcstat_evict_not_enough);
+ wmsum_fini(&arc_sums.arcstat_evict_l2_cached);
+ wmsum_fini(&arc_sums.arcstat_evict_l2_eligible);
+ wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mfu);
+ wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mru);
+ wmsum_fini(&arc_sums.arcstat_evict_l2_ineligible);
+ wmsum_fini(&arc_sums.arcstat_evict_l2_skip);
+ wmsum_fini(&arc_sums.arcstat_hash_collisions);
+ wmsum_fini(&arc_sums.arcstat_hash_chains);
+ aggsum_fini(&arc_sums.arcstat_size);
+ wmsum_fini(&arc_sums.arcstat_compressed_size);
+ wmsum_fini(&arc_sums.arcstat_uncompressed_size);
+ wmsum_fini(&arc_sums.arcstat_overhead_size);
+ wmsum_fini(&arc_sums.arcstat_hdr_size);
+ wmsum_fini(&arc_sums.arcstat_data_size);
+ wmsum_fini(&arc_sums.arcstat_metadata_size);
+ wmsum_fini(&arc_sums.arcstat_dbuf_size);
+ aggsum_fini(&arc_sums.arcstat_dnode_size);
+ wmsum_fini(&arc_sums.arcstat_bonus_size);
+ wmsum_fini(&arc_sums.arcstat_l2_hits);
+ wmsum_fini(&arc_sums.arcstat_l2_misses);
+ wmsum_fini(&arc_sums.arcstat_l2_prefetch_asize);
+ wmsum_fini(&arc_sums.arcstat_l2_mru_asize);
+ wmsum_fini(&arc_sums.arcstat_l2_mfu_asize);
+ wmsum_fini(&arc_sums.arcstat_l2_bufc_data_asize);
+ wmsum_fini(&arc_sums.arcstat_l2_bufc_metadata_asize);
+ wmsum_fini(&arc_sums.arcstat_l2_feeds);
+ wmsum_fini(&arc_sums.arcstat_l2_rw_clash);
+ wmsum_fini(&arc_sums.arcstat_l2_read_bytes);
+ wmsum_fini(&arc_sums.arcstat_l2_write_bytes);
+ wmsum_fini(&arc_sums.arcstat_l2_writes_sent);
+ wmsum_fini(&arc_sums.arcstat_l2_writes_done);
+ wmsum_fini(&arc_sums.arcstat_l2_writes_error);
+ wmsum_fini(&arc_sums.arcstat_l2_writes_lock_retry);
+ wmsum_fini(&arc_sums.arcstat_l2_evict_lock_retry);
+ wmsum_fini(&arc_sums.arcstat_l2_evict_reading);
+ wmsum_fini(&arc_sums.arcstat_l2_evict_l1cached);
+ wmsum_fini(&arc_sums.arcstat_l2_free_on_write);
+ wmsum_fini(&arc_sums.arcstat_l2_abort_lowmem);
+ wmsum_fini(&arc_sums.arcstat_l2_cksum_bad);
+ wmsum_fini(&arc_sums.arcstat_l2_io_error);
+ wmsum_fini(&arc_sums.arcstat_l2_lsize);
+ wmsum_fini(&arc_sums.arcstat_l2_psize);
+ aggsum_fini(&arc_sums.arcstat_l2_hdr_size);
+ wmsum_fini(&arc_sums.arcstat_l2_log_blk_writes);
+ wmsum_fini(&arc_sums.arcstat_l2_log_blk_asize);
+ wmsum_fini(&arc_sums.arcstat_l2_log_blk_count);
+ wmsum_fini(&arc_sums.arcstat_l2_rebuild_success);
+ wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
+ wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
+ wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
+ wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
+ wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
+ wmsum_fini(&arc_sums.arcstat_l2_rebuild_size);
+ wmsum_fini(&arc_sums.arcstat_l2_rebuild_asize);
+ wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs);
+ wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs_precached);
+ wmsum_fini(&arc_sums.arcstat_l2_rebuild_log_blks);
+ wmsum_fini(&arc_sums.arcstat_memory_throttle_count);
+ wmsum_fini(&arc_sums.arcstat_memory_direct_count);
+ wmsum_fini(&arc_sums.arcstat_memory_indirect_count);
+ wmsum_fini(&arc_sums.arcstat_prune);
+ aggsum_fini(&arc_sums.arcstat_meta_used);
+ wmsum_fini(&arc_sums.arcstat_async_upgrade_sync);
+ wmsum_fini(&arc_sums.arcstat_demand_hit_predictive_prefetch);
+ wmsum_fini(&arc_sums.arcstat_demand_hit_prescient_prefetch);
+ wmsum_fini(&arc_sums.arcstat_raw_size);
+ wmsum_fini(&arc_sums.arcstat_cached_only_in_progress);
+ wmsum_fini(&arc_sums.arcstat_abd_chunk_waste_size);
}
uint64_t
@@ -7628,8 +7914,6 @@ arc_init(void)
/* Set min to 1/2 of arc_c_min */
arc_meta_min = 1ULL << SPA_MAXBLOCKSHIFT;
- /* Initialize maximum observed usage to zero */
- arc_meta_max = 0;
/*
* Set arc_meta_limit to a percent of arc_c_max with a floor of
* arc_meta_min, and a ceiling of arc_c_max.
@@ -8355,7 +8639,7 @@ top:
}
}
- atomic_inc_64(&l2arc_writes_done);
+ ARCSTAT_BUMP(arcstat_l2_writes_done);
list_remove(buflist, head);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
@@ -9327,7 +9611,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
static boolean_t
l2arc_hdr_limit_reached(void)
{
- int64_t s = aggsum_upper_bound(&astat_l2_hdr_size);
+ int64_t s = aggsum_upper_bound(&arc_sums.arcstat_l2_hdr_size);
return (arc_reclaim_needed() || (s > arc_meta_limit * 3 / 4) ||
(s > (arc_warm ? arc_c : arc_c_max) * l2arc_meta_percent / 100));
@@ -9661,8 +9945,6 @@ l2arc_init(void)
{
l2arc_thread_exit = 0;
l2arc_ndev = 0;
- l2arc_writes_sent = 0;
- l2arc_writes_done = 0;
mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
@@ -9976,7 +10258,7 @@ out:
* log as the pool may be in the process of being removed.
*/
zfs_dbgmsg("L2ARC rebuild aborted, restored %llu blocks",
- zfs_refcount_count(&dev->l2ad_lb_count));
+ (u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
} else if (err != 0) {
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"aborted, restored %llu blocks",
@@ -10019,7 +10301,8 @@ l2arc_dev_hdr_read(l2arc_dev_t *dev)
if (err != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_dh_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading device header, "
- "vdev guid: %llu", err, dev->l2ad_vdev->vdev_guid);
+ "vdev guid: %llu", err,
+ (u_longlong_t)dev->l2ad_vdev->vdev_guid);
return (err);
}
@@ -10116,8 +10399,9 @@ l2arc_log_blk_read(l2arc_dev_t *dev,
if ((err = zio_wait(this_io)) != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_io_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading log block, "
- "offset: %llu, vdev guid: %llu", err, this_lbp->lbp_daddr,
- dev->l2ad_vdev->vdev_guid);
+ "offset: %llu, vdev guid: %llu", err,
+ (u_longlong_t)this_lbp->lbp_daddr,
+ (u_longlong_t)dev->l2ad_vdev->vdev_guid);
goto cleanup;
}
@@ -10131,8 +10415,10 @@ l2arc_log_blk_read(l2arc_dev_t *dev,
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_cksum_lb_errors);
zfs_dbgmsg("L2ARC log block cksum failed, offset: %llu, "
"vdev guid: %llu, l2ad_hand: %llu, l2ad_evict: %llu",
- this_lbp->lbp_daddr, dev->l2ad_vdev->vdev_guid,
- dev->l2ad_hand, dev->l2ad_evict);
+ (u_longlong_t)this_lbp->lbp_daddr,
+ (u_longlong_t)dev->l2ad_vdev->vdev_guid,
+ (u_longlong_t)dev->l2ad_hand,
+ (u_longlong_t)dev->l2ad_evict);
err = SET_ERROR(ECKSUM);
goto cleanup;
}
@@ -10386,7 +10672,8 @@ l2arc_dev_hdr_update(l2arc_dev_t *dev)
if (err != 0) {
zfs_dbgmsg("L2ARC IO error (%d) while writing device header, "
- "vdev guid: %llu", err, dev->l2ad_vdev->vdev_guid);
+ "vdev guid: %llu", err,
+ (u_longlong_t)dev->l2ad_vdev->vdev_guid);
}
}
diff --git a/sys/contrib/openzfs/module/zfs/dbuf.c b/sys/contrib/openzfs/module/zfs/dbuf.c
index 368cdfe397a2..8e55a613bbf2 100644
--- a/sys/contrib/openzfs/module/zfs/dbuf.c
+++ b/sys/contrib/openzfs/module/zfs/dbuf.c
@@ -52,6 +52,7 @@
#include <sys/vdev.h>
#include <cityhash.h>
#include <sys/spa_impl.h>
+#include <sys/wmsum.h>
kstat_t *dbuf_ksp;
@@ -135,8 +136,22 @@ dbuf_stats_t dbuf_stats = {
{ "metadata_cache_overflow", KSTAT_DATA_UINT64 }
};
+struct {
+ wmsum_t cache_count;
+ wmsum_t cache_total_evicts;
+ wmsum_t cache_levels[DN_MAX_LEVELS];
+ wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
+ wmsum_t hash_hits;
+ wmsum_t hash_misses;
+ wmsum_t hash_collisions;
+ wmsum_t hash_chains;
+ wmsum_t hash_insert_race;
+ wmsum_t metadata_cache_count;
+ wmsum_t metadata_cache_overflow;
+} dbuf_sums;
+
#define DBUF_STAT_INCR(stat, val) \
- atomic_add_64(&dbuf_stats.stat.value.ui64, (val));
+ wmsum_add(&dbuf_sums.stat, val);
#define DBUF_STAT_DECR(stat, val) \
DBUF_STAT_INCR(stat, -(val));
#define DBUF_STAT_BUMP(stat) \
@@ -297,8 +312,6 @@ dbuf_dest(void *vdb, void *unused)
*/
static dbuf_hash_table_t dbuf_hash_table;
-static uint64_t dbuf_hash_count;
-
/*
* We use Cityhash for this. It's fast, and has good hash properties without
* requiring any large static buffers.
@@ -409,8 +422,8 @@ dbuf_hash_insert(dmu_buf_impl_t *db)
db->db_hash_next = h->hash_table[idx];
h->hash_table[idx] = db;
mutex_exit(DBUF_HASH_MUTEX(h, idx));
- atomic_inc_64(&dbuf_hash_count);
- DBUF_STAT_MAX(hash_elements_max, dbuf_hash_count);
+ uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
+ DBUF_STAT_MAX(hash_elements_max, he);
return (NULL);
}
@@ -483,7 +496,7 @@ dbuf_hash_remove(dmu_buf_impl_t *db)
h->hash_table[idx]->db_hash_next == NULL)
DBUF_STAT_BUMPDOWN(hash_chains);
mutex_exit(DBUF_HASH_MUTEX(h, idx));
- atomic_dec_64(&dbuf_hash_count);
+ atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
}
typedef enum {
@@ -767,19 +780,40 @@ dbuf_kstat_update(kstat_t *ksp, int rw)
{
dbuf_stats_t *ds = ksp->ks_data;
- if (rw == KSTAT_WRITE) {
+ if (rw == KSTAT_WRITE)
return (SET_ERROR(EACCES));
- } else {
- ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
- &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
- ds->cache_size_bytes.value.ui64 =
- zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
- ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
- ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
- ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
- ds->hash_elements.value.ui64 = dbuf_hash_count;
- }
+ ds->cache_count.value.ui64 =
+ wmsum_value(&dbuf_sums.cache_count);
+ ds->cache_size_bytes.value.ui64 =
+ zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
+ ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
+ ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
+ ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
+ ds->cache_total_evicts.value.ui64 =
+ wmsum_value(&dbuf_sums.cache_total_evicts);
+ for (int i = 0; i < DN_MAX_LEVELS; i++) {
+ ds->cache_levels[i].value.ui64 =
+ wmsum_value(&dbuf_sums.cache_levels[i]);
+ ds->cache_levels_bytes[i].value.ui64 =
+ wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
+ }
+ ds->hash_hits.value.ui64 =
+ wmsum_value(&dbuf_sums.hash_hits);
+ ds->hash_misses.value.ui64 =
+ wmsum_value(&dbuf_sums.hash_misses);
+ ds->hash_collisions.value.ui64 =
+ wmsum_value(&dbuf_sums.hash_collisions);
+ ds->hash_chains.value.ui64 =
+ wmsum_value(&dbuf_sums.hash_chains);
+ ds->hash_insert_race.value.ui64 =
+ wmsum_value(&dbuf_sums.hash_insert_race);
+ ds->metadata_cache_count.value.ui64 =
+ wmsum_value(&dbuf_sums.metadata_cache_count);
+ ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
+ &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
+ ds->metadata_cache_overflow.value.ui64 =
+ wmsum_value(&dbuf_sums.metadata_cache_overflow);
return (0);
}
@@ -846,6 +880,20 @@ retry:
dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
NULL, 0, &p0, TS_RUN, minclsyspri);
+ wmsum_init(&dbuf_sums.cache_count, 0);
+ wmsum_init(&dbuf_sums.cache_total_evicts, 0);
+ for (i = 0; i < DN_MAX_LEVELS; i++) {
+ wmsum_init(&dbuf_sums.cache_levels[i], 0);
+ wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
+ }
+ wmsum_init(&dbuf_sums.hash_hits, 0);
+ wmsum_init(&dbuf_sums.hash_misses, 0);
+ wmsum_init(&dbuf_sums.hash_collisions, 0);
+ wmsum_init(&dbuf_sums.hash_chains, 0);
+ wmsum_init(&dbuf_sums.hash_insert_race, 0);
+ wmsum_init(&dbuf_sums.metadata_cache_count, 0);
+ wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
+
dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
@@ -908,6 +956,20 @@ dbuf_fini(void)
kstat_delete(dbuf_ksp);
dbuf_ksp = NULL;
}
+
+ wmsum_fini(&dbuf_sums.cache_count);
+ wmsum_fini(&dbuf_sums.cache_total_evicts);
+ for (i = 0; i < DN_MAX_LEVELS; i++) {
+ wmsum_fini(&dbuf_sums.cache_levels[i]);
+ wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
+ }
+ wmsum_fini(&dbuf_sums.hash_hits);
+ wmsum_fini(&dbuf_sums.hash_misses);
+ wmsum_fini(&dbuf_sums.hash_collisions);
+ wmsum_fini(&dbuf_sums.hash_chains);
+ wmsum_fini(&dbuf_sums.hash_insert_race);
+ wmsum_fini(&dbuf_sums.metadata_cache_count);
+ wmsum_fini(&dbuf_sums.metadata_cache_overflow);
}
/*
@@ -1091,42 +1153,6 @@ dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
}
static arc_buf_t *
-dbuf_alloc_arcbuf_from_arcbuf(dmu_buf_impl_t *db, arc_buf_t *data)
-{
- objset_t *os = db->db_objset;
- spa_t *spa = os->os_spa;
- arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
- enum zio_compress compress_type;
- uint8_t complevel;
- int psize, lsize;
-
- psize = arc_buf_size(data);
- lsize = arc_buf_lsize(data);
- compress_type = arc_get_compression(data);
- complevel = arc_get_complevel(data);
-
- if (arc_is_encrypted(data)) {
- boolean_t byteorder;
- uint8_t salt[ZIO_DATA_SALT_LEN];
- uint8_t iv[ZIO_DATA_IV_LEN];
- uint8_t mac[ZIO_DATA_MAC_LEN];
- dnode_t *dn = DB_DNODE(db);
-
- arc_get_raw_params(data, &byteorder, salt, iv, mac);
- data = arc_alloc_raw_buf(spa, db, dmu_objset_id(os),
- byteorder, salt, iv, mac, dn->dn_type, psize, lsize,
- compress_type, complevel);
- } else if (compress_type != ZIO_COMPRESS_OFF) {
- ASSERT3U(type, ==, ARC_BUFC_DATA);
- data = arc_alloc_compressed_buf(spa, db,
- psize, lsize, compress_type, complevel);
- } else {
- data = arc_alloc_buf(spa, db, type, psize);
- }
- return (data);
-}
-
-static arc_buf_t *
dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
{
spa_t *spa = db->db_objset->os_spa;
@@ -1575,9 +1601,35 @@ dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
arc_space_consume(bonuslen, ARC_SPACE_BONUS);
bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
- arc_buf_t *buf = dbuf_alloc_arcbuf_from_arcbuf(db, db->db_buf);
- dr->dt.dl.dr_data = buf;
- bcopy(db->db.db_data, buf->b_data, arc_buf_size(buf));
+ dnode_t *dn = DB_DNODE(db);
+ int size = arc_buf_size(db->db_buf);
+ arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
+ spa_t *spa = db->db_objset->os_spa;
+ enum zio_compress compress_type =
+ arc_get_compression(db->db_buf);
+ uint8_t complevel = arc_get_complevel(db->db_buf);
+
+ if (arc_is_encrypted(db->db_buf)) {
+ boolean_t byteorder;
+ uint8_t salt[ZIO_DATA_SALT_LEN];
+ uint8_t iv[ZIO_DATA_IV_LEN];
+ uint8_t mac[ZIO_DATA_MAC_LEN];
+
+ arc_get_raw_params(db->db_buf, &byteorder, salt,
+ iv, mac);
+ dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
+ dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
+ mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
+ compress_type, complevel);
+ } else if (compress_type != ZIO_COMPRESS_OFF) {
+ ASSERT3U(type, ==, ARC_BUFC_DATA);
+ dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
+ size, arc_buf_lsize(db->db_buf), compress_type,
+ complevel);
+ } else {
+ dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
+ }
+ bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
} else {
db->db_buf = NULL;
dbuf_clear_data(db);
@@ -1798,7 +1850,8 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
if (end_blkid > dn->dn_maxblkid &&
!(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
end_blkid = dn->dn_maxblkid;
- dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid);
+ dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
+ (u_longlong_t)end_blkid);
db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
db_search->db_level = 0;
@@ -3378,10 +3431,30 @@ noinline static void
dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
{
dbuf_dirty_record_t *dr = db->db_data_pending;
- arc_buf_t *newdata, *data = dr->dt.dl.dr_data;
+ arc_buf_t *data = dr->dt.dl.dr_data;
+ enum zio_compress compress_type = arc_get_compression(data);
+ uint8_t complevel = arc_get_complevel(data);
+
+ if (arc_is_encrypted(data)) {
+ boolean_t byteorder;
+ uint8_t salt[ZIO_DATA_SALT_LEN];
+ uint8_t iv[ZIO_DATA_IV_LEN];
+ uint8_t mac[ZIO_DATA_MAC_LEN];
+
+ arc_get_raw_params(data, &byteorder, salt, iv, mac);
+ dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
+ dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
+ dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
+ compress_type, complevel));
+ } else if (compress_type != ZIO_COMPRESS_OFF) {
+ dbuf_set_data(db, arc_alloc_compressed_buf(
+ dn->dn_objset->os_spa, db, arc_buf_size(data),
+ arc_buf_lsize(data), compress_type, complevel));
+ } else {
+ dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
+ DBUF_GET_BUFC_TYPE(db), db->db.db_size));
+ }
- newdata = dbuf_alloc_arcbuf_from_arcbuf(db, data);
- dbuf_set_data(db, newdata);
rw_enter(&db->db_rwlock, RW_WRITER);
bcopy(data->b_data, db->db.db_data, arc_buf_size(data));
rw_exit(&db->db_rwlock);
@@ -3710,9 +3783,11 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
db->db_caching_status = dcs;
multilist_insert(&dbuf_caches[dcs].cache, db);
+ uint64_t db_size = db->db.db_size;
size = zfs_refcount_add_many(
- &dbuf_caches[dcs].size,
- db->db.db_size, db);
+ &dbuf_caches[dcs].size, db_size, db);
+ uint8_t db_level = db->db_level;
+ mutex_exit(&db->db_mtx);
if (dcs == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMP(metadata_cache_count);
@@ -3720,16 +3795,14 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
metadata_cache_size_bytes_max,
size);
} else {
- DBUF_STAT_BUMP(
- cache_levels[db->db_level]);
DBUF_STAT_BUMP(cache_count);
- DBUF_STAT_INCR(
- cache_levels_bytes[db->db_level],
- db->db.db_size);
DBUF_STAT_MAX(cache_size_bytes_max,
size);
+ DBUF_STAT_BUMP(cache_levels[db_level]);
+ DBUF_STAT_INCR(
+ cache_levels_bytes[db_level],
+ db_size);
}
- mutex_exit(&db->db_mtx);
if (dcs == DB_DBUF_CACHE && !evicting)
dbuf_evict_notify(size);
@@ -4303,8 +4376,31 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
* objects only modified in the syncing context (e.g.
* DNONE_DNODE blocks).
*/
- *datap = dbuf_alloc_arcbuf_from_arcbuf(db, db->db_buf);
- bcopy(db->db.db_data, (*datap)->b_data, arc_buf_size(*datap));
+ int psize = arc_buf_size(*datap);
+ int lsize = arc_buf_lsize(*datap);
+ arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
+ enum zio_compress compress_type = arc_get_compression(*datap);
+ uint8_t complevel = arc_get_complevel(*datap);
+
+ if (arc_is_encrypted(*datap)) {
+ boolean_t byteorder;
+ uint8_t salt[ZIO_DATA_SALT_LEN];
+ uint8_t iv[ZIO_DATA_IV_LEN];
+ uint8_t mac[ZIO_DATA_MAC_LEN];
+
+ arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
+ *datap = arc_alloc_raw_buf(os->os_spa, db,
+ dmu_objset_id(os), byteorder, salt, iv, mac,
+ dn->dn_type, psize, lsize, compress_type,
+ complevel);
+ } else if (compress_type != ZIO_COMPRESS_OFF) {
+ ASSERT3U(type, ==, ARC_BUFC_DATA);
+ *datap = arc_alloc_compressed_buf(os->os_spa, db,
+ psize, lsize, compress_type, complevel);
+ } else {
+ *datap = arc_alloc_buf(os->os_spa, db, type, psize);
+ }
+ bcopy(db->db.db_data, (*datap)->b_data, psize);
}
db->db_data_pending = dr;
diff --git a/sys/contrib/openzfs/module/zfs/dmu_objset.c b/sys/contrib/openzfs/module/zfs/dmu_objset.c
index 8c244dc4c317..22deee7f3dc9 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_objset.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_objset.c
@@ -1616,7 +1616,7 @@ dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
blkptr_t *blkptr_copy = kmem_alloc(sizeof (*os->os_rootbp), KM_SLEEP);
*blkptr_copy = *os->os_rootbp;
- dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
+ dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", (u_longlong_t)tx->tx_txg);
ASSERT(dmu_tx_is_syncing(tx));
/* XXX the write_done callback should really give us the tx... */
diff --git a/sys/contrib/openzfs/module/zfs/dmu_recv.c b/sys/contrib/openzfs/module/zfs/dmu_recv.c
index a713e1329027..0ec46bdb4f47 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_recv.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_recv.c
@@ -2588,8 +2588,8 @@ dprintf_drr(struct receive_record_arg *rrd, int err)
dprintf("drr_type = OBJECT obj = %llu type = %u "
"bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
"compress = %u dn_slots = %u err = %d\n",
- drro->drr_object, drro->drr_type, drro->drr_bonustype,
- drro->drr_blksz, drro->drr_bonuslen,
+ (u_longlong_t)drro->drr_object, drro->drr_type,
+ drro->drr_bonustype, drro->drr_blksz, drro->drr_bonuslen,
drro->drr_checksumtype, drro->drr_compress,
drro->drr_dn_slots, err);
break;
@@ -2600,7 +2600,8 @@ dprintf_drr(struct receive_record_arg *rrd, int err)
&rrd->header.drr_u.drr_freeobjects;
dprintf("drr_type = FREEOBJECTS firstobj = %llu "
"numobjs = %llu err = %d\n",
- drrfo->drr_firstobj, drrfo->drr_numobjs, err);
+ (u_longlong_t)drrfo->drr_firstobj,
+ (u_longlong_t)drrfo->drr_numobjs, err);
break;
}
case DRR_WRITE:
@@ -2609,10 +2610,12 @@ dprintf_drr(struct receive_record_arg *rrd, int err)
dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
"lsize = %llu cksumtype = %u flags = %u "
"compress = %u psize = %llu err = %d\n",
- drrw->drr_object, drrw->drr_type, drrw->drr_offset,
- drrw->drr_logical_size, drrw->drr_checksumtype,
- drrw->drr_flags, drrw->drr_compressiontype,
- drrw->drr_compressed_size, err);
+ (u_longlong_t)drrw->drr_object, drrw->drr_type,
+ (u_longlong_t)drrw->drr_offset,
+ (u_longlong_t)drrw->drr_logical_size,
+ drrw->drr_checksumtype, drrw->drr_flags,
+ drrw->drr_compressiontype,
+ (u_longlong_t)drrw->drr_compressed_size, err);
break;
}
case DRR_WRITE_BYREF:
@@ -2623,11 +2626,14 @@ dprintf_drr(struct receive_record_arg *rrd, int err)
"length = %llu toguid = %llx refguid = %llx "
"refobject = %llu refoffset = %llu cksumtype = %u "
"flags = %u err = %d\n",
- drrwbr->drr_object, drrwbr->drr_offset,
- drrwbr->drr_length, drrwbr->drr_toguid,
- drrwbr->drr_refguid, drrwbr->drr_refobject,
- drrwbr->drr_refoffset, drrwbr->drr_checksumtype,
- drrwbr->drr_flags, err);
+ (u_longlong_t)drrwbr->drr_object,
+ (u_longlong_t)drrwbr->drr_offset,
+ (u_longlong_t)drrwbr->drr_length,
+ (u_longlong_t)drrwbr->drr_toguid,
+ (u_longlong_t)drrwbr->drr_refguid,
+ (u_longlong_t)drrwbr->drr_refobject,
+ (u_longlong_t)drrwbr->drr_refoffset,
+ drrwbr->drr_checksumtype, drrwbr->drr_flags, err);
break;
}
case DRR_WRITE_EMBEDDED:
@@ -2637,7 +2643,9 @@ dprintf_drr(struct receive_record_arg *rrd, int err)
dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
"length = %llu compress = %u etype = %u lsize = %u "
"psize = %u err = %d\n",
- drrwe->drr_object, drrwe->drr_offset, drrwe->drr_length,
+ (u_longlong_t)drrwe->drr_object,
+ (u_longlong_t)drrwe->drr_offset,
+ (u_longlong_t)drrwe->drr_length,
drrwe->drr_compression, drrwe->drr_etype,
drrwe->drr_lsize, drrwe->drr_psize, err);
break;
@@ -2647,7 +2655,9 @@ dprintf_drr(struct receive_record_arg *rrd, int err)
struct drr_free *drrf = &rrd->header.drr_u.drr_free;
dprintf("drr_type = FREE obj = %llu offset = %llu "
"length = %lld err = %d\n",
- drrf->drr_object, drrf->drr_offset, drrf->drr_length,
+ (u_longlong_t)drrf->drr_object,
+ (u_longlong_t)drrf->drr_offset,
+ (longlong_t)drrf->drr_length,
err);
break;
}
@@ -2655,7 +2665,8 @@ dprintf_drr(struct receive_record_arg *rrd, int err)
{
struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
dprintf("drr_type = SPILL obj = %llu length = %llu "
- "err = %d\n", drrs->drr_object, drrs->drr_length, err);
+ "err = %d\n", (u_longlong_t)drrs->drr_object,
+ (u_longlong_t)drrs->drr_length, err);
break;
}
case DRR_OBJECT_RANGE:
@@ -2664,7 +2675,8 @@ dprintf_drr(struct receive_record_arg *rrd, int err)
&rrd->header.drr_u.drr_object_range;
dprintf("drr_type = OBJECT_RANGE firstobj = %llu "
"numslots = %llu flags = %u err = %d\n",
- drror->drr_firstobj, drror->drr_numslots,
+ (u_longlong_t)drror->drr_firstobj,
+ (u_longlong_t)drror->drr_numslots,
drror->drr_flags, err);
break;
}
diff --git a/sys/contrib/openzfs/module/zfs/dmu_tx.c b/sys/contrib/openzfs/module/zfs/dmu_tx.c
index 73667915df0f..0beb983f992f 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_tx.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_tx.c
@@ -613,7 +613,8 @@ dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
/* XXX txh_arg2 better not be zero... */
dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
- txh->txh_type, beginblk, endblk);
+ txh->txh_type, (u_longlong_t)beginblk,
+ (u_longlong_t)endblk);
switch (txh->txh_type) {
case THT_WRITE:
diff --git a/sys/contrib/openzfs/module/zfs/dmu_zfetch.c b/sys/contrib/openzfs/module/zfs/dmu_zfetch.c
index 3d7407016d2c..4a323fa990fe 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_zfetch.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_zfetch.c
@@ -34,6 +34,7 @@
#include <sys/dmu.h>
#include <sys/dbuf.h>
#include <sys/kstat.h>
+#include <sys/wmsum.h>
/*
* This tunable disables predictive prefetch. Note that it leaves "prescient"
@@ -69,27 +70,54 @@ static zfetch_stats_t zfetch_stats = {
{ "io_issued", KSTAT_DATA_UINT64 },
};
-#define ZFETCHSTAT_BUMP(stat) \
- atomic_inc_64(&zfetch_stats.stat.value.ui64)
+struct {
+ wmsum_t zfetchstat_hits;
+ wmsum_t zfetchstat_misses;
+ wmsum_t zfetchstat_max_streams;
+ wmsum_t zfetchstat_io_issued;
+} zfetch_sums;
+
+#define ZFETCHSTAT_BUMP(stat) \
+ wmsum_add(&zfetch_sums.stat, 1)
#define ZFETCHSTAT_ADD(stat, val) \
- atomic_add_64(&zfetch_stats.stat.value.ui64, val)
-#define ZFETCHSTAT_SET(stat, val) \
- zfetch_stats.stat.value.ui64 = val
-#define ZFETCHSTAT_GET(stat) \
- zfetch_stats.stat.value.ui64
+ wmsum_add(&zfetch_sums.stat, val)
kstat_t *zfetch_ksp;
+static int
+zfetch_kstats_update(kstat_t *ksp, int rw)
+{
+ zfetch_stats_t *zs = ksp->ks_data;
+
+ if (rw == KSTAT_WRITE)
+ return (EACCES);
+ zs->zfetchstat_hits.value.ui64 =
+ wmsum_value(&zfetch_sums.zfetchstat_hits);
+ zs->zfetchstat_misses.value.ui64 =
+ wmsum_value(&zfetch_sums.zfetchstat_misses);
+ zs->zfetchstat_max_streams.value.ui64 =
+ wmsum_value(&zfetch_sums.zfetchstat_max_streams);
+ zs->zfetchstat_io_issued.value.ui64 =
+ wmsum_value(&zfetch_sums.zfetchstat_io_issued);
+ return (0);
+}
+
void
zfetch_init(void)
{
+ wmsum_init(&zfetch_sums.zfetchstat_hits, 0);
+ wmsum_init(&zfetch_sums.zfetchstat_misses, 0);
+ wmsum_init(&zfetch_sums.zfetchstat_max_streams, 0);
+ wmsum_init(&zfetch_sums.zfetchstat_io_issued, 0);
+
zfetch_ksp = kstat_create("zfs", 0, "zfetchstats", "misc",
KSTAT_TYPE_NAMED, sizeof (zfetch_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (zfetch_ksp != NULL) {
zfetch_ksp->ks_data = &zfetch_stats;
+ zfetch_ksp->ks_update = zfetch_kstats_update;
kstat_install(zfetch_ksp);
}
}
@@ -101,6 +129,11 @@ zfetch_fini(void)
kstat_delete(zfetch_ksp);
zfetch_ksp = NULL;
}
+
+ wmsum_fini(&zfetch_sums.zfetchstat_hits);
+ wmsum_fini(&zfetch_sums.zfetchstat_misses);
+ wmsum_fini(&zfetch_sums.zfetchstat_max_streams);
+ wmsum_fini(&zfetch_sums.zfetchstat_io_issued);
}
/*
diff --git a/sys/contrib/openzfs/module/zfs/dnode.c b/sys/contrib/openzfs/module/zfs/dnode.c
index 8434e72aa4f8..b1813a8951d5 100644
--- a/sys/contrib/openzfs/module/zfs/dnode.c
+++ b/sys/contrib/openzfs/module/zfs/dnode.c
@@ -592,7 +592,8 @@ dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d dn_slots=%d\n",
- dn->dn_objset, dn->dn_object, tx->tx_txg, blocksize, ibs, dn_slots);
+ dn->dn_objset, (u_longlong_t)dn->dn_object,
+ (u_longlong_t)tx->tx_txg, blocksize, ibs, dn_slots);
DNODE_STAT_BUMP(dnode_allocate);
ASSERT(dn->dn_type == DMU_OT_NONE);
@@ -1690,7 +1691,7 @@ dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
ASSERT0(dn->dn_next_bonustype[txg & TXG_MASK]);
dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
- dn->dn_object, txg);
+ (u_longlong_t)dn->dn_object, (u_longlong_t)txg);
multilist_sublist_insert_head(mls, dn);
@@ -2253,7 +2254,8 @@ done:
range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks);
}
dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
- blkid, nblks, tx->tx_txg);
+ (u_longlong_t)blkid, (u_longlong_t)nblks,
+ (u_longlong_t)tx->tx_txg);
mutex_exit(&dn->dn_mtx);
dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
diff --git a/sys/contrib/openzfs/module/zfs/dnode_sync.c b/sys/contrib/openzfs/module/zfs/dnode_sync.c
index 66e48a1e17d4..dd37e3af7ed5 100644
--- a/sys/contrib/openzfs/module/zfs/dnode_sync.c
+++ b/sys/contrib/openzfs/module/zfs/dnode_sync.c
@@ -59,7 +59,7 @@ dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
dn->dn_phys->dn_nlevels = new_level;
dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
- dn->dn_object, dn->dn_phys->dn_nlevels);
+ (u_longlong_t)dn->dn_object, dn->dn_phys->dn_nlevels);
/*
* Lock ordering requires that we hold the children's db_mutexes (by
@@ -136,7 +136,8 @@ free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
uint64_t bytesfreed = 0;
- dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num);
+ dprintf("ds=%p obj=%llx num=%d\n", ds, (u_longlong_t)dn->dn_object,
+ num);
for (int i = 0; i < num; i++, bp++) {
if (BP_IS_HOLE(bp))
diff --git a/sys/contrib/openzfs/module/zfs/dsl_dataset.c b/sys/contrib/openzfs/module/zfs/dsl_dataset.c
index 9b9bb42287d5..1c03216ef6d5 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_dataset.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_dataset.c
@@ -282,7 +282,7 @@ dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
if (bp->blk_birth > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
int64_t delta;
- dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
+ dprintf_bp(bp, "freeing ds=%llu", (u_longlong_t)ds->ds_object);
dsl_free(tx->tx_pool, tx->tx_txg, bp);
mutex_enter(&ds->ds_lock);
@@ -721,7 +721,7 @@ dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
dsl_dataset_phys(ds)->ds_fsid_guid,
(long long)ds->ds_fsid_guid,
spa_name(dp->dp_spa),
- dsobj);
+ (u_longlong_t)dsobj);
}
}
}
diff --git a/sys/contrib/openzfs/module/zfs/dsl_destroy.c b/sys/contrib/openzfs/module/zfs/dsl_destroy.c
index 837d78987e75..a2748197f29d 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_destroy.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_destroy.c
@@ -654,7 +654,7 @@ dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
char *errorstr = NULL;
(void) nvlist_lookup_string(result, ZCP_RET_ERROR, &errorstr);
if (errorstr != NULL) {
- zfs_dbgmsg(errorstr);
+ zfs_dbgmsg("%s", errorstr);
}
fnvlist_free(wrapper);
fnvlist_free(result);
diff --git a/sys/contrib/openzfs/module/zfs/dsl_dir.c b/sys/contrib/openzfs/module/zfs/dsl_dir.c
index 90dd787023be..df2c3d8f0637 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_dir.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_dir.c
@@ -488,7 +488,7 @@ dsl_dir_hold(dsl_pool_t *dp, const char *name, void *tag,
if (next[0] == '@')
break;
dprintf("looking up %s in obj%lld\n",
- buf, dsl_dir_phys(dd)->dd_child_dir_zapobj);
+ buf, (longlong_t)dsl_dir_phys(dd)->dd_child_dir_zapobj);
err = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj,
@@ -1156,8 +1156,8 @@ dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
mutex_enter(&dd->dd_lock);
ASSERT0(dd->dd_tempreserved[tx->tx_txg & TXG_MASK]);
- dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
- dd->dd_space_towrite[tx->tx_txg & TXG_MASK] / 1024);
+ dprintf_dd(dd, "txg=%llu towrite=%lluK\n", (u_longlong_t)tx->tx_txg,
+ (u_longlong_t)dd->dd_space_towrite[tx->tx_txg & TXG_MASK] / 1024);
dd->dd_space_towrite[tx->tx_txg & TXG_MASK] = 0;
mutex_exit(&dd->dd_lock);
@@ -1344,8 +1344,9 @@ top_of_function:
retval = ERESTART;
dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
"quota=%lluK tr=%lluK err=%d\n",
- used_on_disk>>10, est_inflight>>10,
- quota>>10, asize>>10, retval);
+ (u_longlong_t)used_on_disk>>10,
+ (u_longlong_t)est_inflight>>10,
+ (u_longlong_t)quota>>10, (u_longlong_t)asize>>10, retval);
mutex_exit(&dd->dd_lock);
DMU_TX_STAT_BUMP(dmu_tx_quota);
return (SET_ERROR(retval));
diff --git a/sys/contrib/openzfs/module/zfs/metaslab.c b/sys/contrib/openzfs/module/zfs/metaslab.c
index e588765b3382..0ddad5b026d8 100644
--- a/sys/contrib/openzfs/module/zfs/metaslab.c
+++ b/sys/contrib/openzfs/module/zfs/metaslab.c
@@ -293,7 +293,7 @@ unsigned long zfs_metaslab_max_size_cache_sec = 3600; /* 1 hour */
* a metaslab would take it over this percentage, the oldest selected metaslab
* is automatically unloaded.
*/
-int zfs_metaslab_mem_limit = 75;
+int zfs_metaslab_mem_limit = 25;
/*
* Force the per-metaslab range trees to use 64-bit integers to store
@@ -2437,18 +2437,20 @@ metaslab_load_impl(metaslab_t *msp)
"loading_time %lld ms, ms_max_size %llu, "
"max size error %lld, "
"old_weight %llx, new_weight %llx",
- spa_syncing_txg(spa), spa_name(spa),
- msp->ms_group->mg_vd->vdev_id, msp->ms_id,
- space_map_length(msp->ms_sm),
- range_tree_space(msp->ms_unflushed_allocs),
- range_tree_space(msp->ms_unflushed_frees),
- range_tree_space(msp->ms_freed),
- range_tree_space(msp->ms_defer[0]),
- range_tree_space(msp->ms_defer[1]),
+ (u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
+ (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
+ (u_longlong_t)msp->ms_id,
+ (u_longlong_t)space_map_length(msp->ms_sm),
+ (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
+ (u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
+ (u_longlong_t)range_tree_space(msp->ms_freed),
+ (u_longlong_t)range_tree_space(msp->ms_defer[0]),
+ (u_longlong_t)range_tree_space(msp->ms_defer[1]),
(longlong_t)((load_start - msp->ms_unload_time) / 1000000),
(longlong_t)((load_end - load_start) / 1000000),
- msp->ms_max_size, msp->ms_max_size - max_size,
- weight, msp->ms_weight);
+ (u_longlong_t)msp->ms_max_size,
+ (u_longlong_t)msp->ms_max_size - max_size,
+ (u_longlong_t)weight, (u_longlong_t)msp->ms_weight);
metaslab_verify_space(msp, spa_syncing_txg(spa));
mutex_exit(&msp->ms_sync_lock);
@@ -2545,14 +2547,17 @@ metaslab_unload(metaslab_t *msp)
"ms_id %llu, weight %llx, "
"selected txg %llu (%llu ms ago), alloc_txg %llu, "
"loaded %llu ms ago, max_size %llu",
- spa_syncing_txg(spa), spa_name(spa),
- msp->ms_group->mg_vd->vdev_id, msp->ms_id,
- msp->ms_weight,
- msp->ms_selected_txg,
- (msp->ms_unload_time - msp->ms_selected_time) / 1000 / 1000,
- msp->ms_alloc_txg,
- (msp->ms_unload_time - msp->ms_load_time) / 1000 / 1000,
- msp->ms_max_size);
+ (u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
+ (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
+ (u_longlong_t)msp->ms_id,
+ (u_longlong_t)msp->ms_weight,
+ (u_longlong_t)msp->ms_selected_txg,
+ (u_longlong_t)(msp->ms_unload_time -
+ msp->ms_selected_time) / 1000 / 1000,
+ (u_longlong_t)msp->ms_alloc_txg,
+ (u_longlong_t)(msp->ms_unload_time -
+ msp->ms_load_time) / 1000 / 1000,
+ (u_longlong_t)msp->ms_max_size);
}
/*
@@ -2914,8 +2919,9 @@ metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty)
msp->ms_condense_wanted = B_TRUE;
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
zfs_dbgmsg("txg %llu, requesting force condense: "
- "ms_id %llu, vdev_id %llu", txg, msp->ms_id,
- vd->vdev_id);
+ "ms_id %llu, vdev_id %llu", (u_longlong_t)txg,
+ (u_longlong_t)msp->ms_id,
+ (u_longlong_t)vd->vdev_id);
}
msp->ms_fragmentation = ZFS_FRAG_INVALID;
return;
@@ -3635,10 +3641,11 @@ metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
- "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
- msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
- spa->spa_name, space_map_length(msp->ms_sm),
- range_tree_numsegs(msp->ms_allocatable),
+ "spa %s, smp size %llu, segments %llu, forcing condense=%s",
+ (u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp,
+ (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
+ spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm),
+ (u_longlong_t)range_tree_numsegs(msp->ms_allocatable),
msp->ms_condense_wanted ? "TRUE" : "FALSE");
msp->ms_condense_wanted = B_FALSE;
@@ -3883,11 +3890,13 @@ metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
- "appended %llu bytes", dmu_tx_get_txg(tx), spa_name(spa),
- msp->ms_group->mg_vd->vdev_id, msp->ms_id,
- range_tree_space(msp->ms_unflushed_allocs),
- range_tree_space(msp->ms_unflushed_frees),
- (sm_len_after - sm_len_before));
+ "appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx),
+ spa_name(spa),
+ (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
+ (u_longlong_t)msp->ms_id,
+ (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
+ (u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
+ (u_longlong_t)(sm_len_after - sm_len_before));
}
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
diff --git a/sys/contrib/openzfs/module/zfs/mmp.c b/sys/contrib/openzfs/module/zfs/mmp.c
index d05c9db24c20..d9ed457a7107 100644
--- a/sys/contrib/openzfs/module/zfs/mmp.c
+++ b/sys/contrib/openzfs/module/zfs/mmp.c
@@ -485,8 +485,9 @@ mmp_write_uberblock(spa_t *spa)
if (mmp->mmp_skip_error != 0) {
mmp->mmp_skip_error = 0;
zfs_dbgmsg("MMP write after skipping due to unavailable "
- "leaves, pool '%s' gethrtime %llu leaf %#llu",
- spa_name(spa), gethrtime(), vd->vdev_guid);
+ "leaves, pool '%s' gethrtime %llu leaf %llu",
+ spa_name(spa), (u_longlong_t)gethrtime(),
+ (u_longlong_t)vd->vdev_guid);
}
if (mmp->mmp_zio_root == NULL)
@@ -617,10 +618,11 @@ mmp_thread(void *arg)
"mmp_interval %llu last_mmp_fail_intervals %u "
"mmp_fail_intervals %u mmp_fail_ns %llu "
"skip_wait %d leaves %d next_time %llu",
- spa_name(spa), gethrtime(), last_mmp_interval,
- mmp_interval, last_mmp_fail_intervals,
- mmp_fail_intervals, mmp_fail_ns, skip_wait, leaves,
- next_time);
+ spa_name(spa), (u_longlong_t)gethrtime(),
+ (u_longlong_t)last_mmp_interval,
+ (u_longlong_t)mmp_interval, last_mmp_fail_intervals,
+ mmp_fail_intervals, (u_longlong_t)mmp_fail_ns,
+ skip_wait, leaves, (u_longlong_t)next_time);
}
/*
@@ -633,8 +635,9 @@ mmp_thread(void *arg)
zfs_dbgmsg("MMP state change pool '%s': gethrtime %llu "
"last_spa_multihost %u multihost %u "
"last_spa_suspended %u suspended %u",
- spa_name(spa), last_spa_multihost, multihost,
- last_spa_suspended, suspended);
+ spa_name(spa), (u_longlong_t)gethrtime(),
+ last_spa_multihost, multihost, last_spa_suspended,
+ suspended);
mutex_enter(&mmp->mmp_io_lock);
mmp->mmp_last_write = gethrtime();
mmp->mmp_delay = mmp_interval;
diff --git a/sys/contrib/openzfs/module/zfs/range_tree.c b/sys/contrib/openzfs/module/zfs/range_tree.c
index 5219fd079b73..595918e5a742 100644
--- a/sys/contrib/openzfs/module/zfs/range_tree.c
+++ b/sys/contrib/openzfs/module/zfs/range_tree.c
@@ -116,7 +116,8 @@ range_tree_stat_verify(range_tree_t *rt)
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
if (hist[i] != rt->rt_histogram[i]) {
zfs_dbgmsg("i=%d, hist=%px, hist=%llu, rt_hist=%llu",
- i, hist, hist[i], rt->rt_histogram[i]);
+ i, hist, (u_longlong_t)hist[i],
+ (u_longlong_t)rt->rt_histogram[i]);
}
VERIFY3U(hist[i], ==, rt->rt_histogram[i]);
}
diff --git a/sys/contrib/openzfs/module/zfs/sa.c b/sys/contrib/openzfs/module/zfs/sa.c
index 5af0aaa7d0aa..2604a7513ecf 100644
--- a/sys/contrib/openzfs/module/zfs/sa.c
+++ b/sys/contrib/openzfs/module/zfs/sa.c
@@ -1292,7 +1292,7 @@ sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype)
mutex_exit(&sa->sa_lock);
zfs_dbgmsg("Buffer Header: %x != SA_MAGIC:%x "
"object=%#llx\n", sa_hdr_phys->sa_magic, SA_MAGIC,
- db->db.db_object);
+ (u_longlong_t)db->db.db_object);
return (SET_ERROR(EIO));
}
sa_byteswap(hdl, buftype);
diff --git a/sys/contrib/openzfs/module/zfs/spa.c b/sys/contrib/openzfs/module/zfs/spa.c
index 26995575adaa..bacd04fc0e39 100644
--- a/sys/contrib/openzfs/module/zfs/spa.c
+++ b/sys/contrib/openzfs/module/zfs/spa.c
@@ -2578,8 +2578,9 @@ spa_livelist_delete_cb(void *arg, zthr_t *z)
.to_free = &to_free
};
zfs_dbgmsg("deleting sublist (id %llu) from"
- " livelist %llu, %d remaining",
- dle->dle_bpobj.bpo_object, ll_obj, count - 1);
+ " livelist %llu, %lld remaining",
+ (u_longlong_t)dle->dle_bpobj.bpo_object,
+ (u_longlong_t)ll_obj, (longlong_t)count - 1);
VERIFY0(dsl_sync_task(spa_name(spa), NULL,
sublist_delete_sync, &sync_arg, 0,
ZFS_SPACE_CHECK_DESTROY));
@@ -2596,7 +2597,8 @@ spa_livelist_delete_cb(void *arg, zthr_t *z)
.ll_obj = ll_obj,
.zap_obj = zap_obj
};
- zfs_dbgmsg("deletion of livelist %llu completed", ll_obj);
+ zfs_dbgmsg("deletion of livelist %llu completed",
+ (u_longlong_t)ll_obj);
VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync,
&sync_arg, 0, ZFS_SPACE_CHECK_DESTROY));
}
@@ -2696,10 +2698,12 @@ spa_livelist_condense_sync(void *arg, dmu_tx_t *tx)
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu "
"(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu "
- "(%llu blkptrs)", tx->tx_txg, dsname, ds->ds_object, first_obj,
- cur_first_size, next_obj, cur_next_size,
- first->dle_bpobj.bpo_object,
- first->dle_bpobj.bpo_phys->bpo_num_blkptrs);
+ "(%llu blkptrs)", (u_longlong_t)tx->tx_txg, dsname,
+ (u_longlong_t)ds->ds_object, (u_longlong_t)first_obj,
+ (u_longlong_t)cur_first_size, (u_longlong_t)next_obj,
+ (u_longlong_t)cur_next_size,
+ (u_longlong_t)first->dle_bpobj.bpo_object,
+ (u_longlong_t)first->dle_bpobj.bpo_phys->bpo_num_blkptrs);
out:
dmu_buf_rele(ds->ds_dbuf, spa);
spa->spa_to_condense.ds = NULL;
@@ -3091,8 +3095,10 @@ spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp "
"mmp_fails=%llu ub_mmp mmp_interval=%llu "
- "import_intervals=%u", import_delay, MMP_FAIL_INT(ub),
- MMP_INTERVAL(ub), import_intervals);
+ "import_intervals=%llu", (u_longlong_t)import_delay,
+ (u_longlong_t)MMP_FAIL_INT(ub),
+ (u_longlong_t)MMP_INTERVAL(ub),
+ (u_longlong_t)import_intervals);
} else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
MMP_FAIL_INT(ub) == 0) {
@@ -3103,8 +3109,10 @@ spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp "
"mmp_interval=%llu ub_mmp_delay=%llu "
- "import_intervals=%u", import_delay, MMP_INTERVAL(ub),
- ub->ub_mmp_delay, import_intervals);
+ "import_intervals=%llu", (u_longlong_t)import_delay,
+ (u_longlong_t)MMP_INTERVAL(ub),
+ (u_longlong_t)ub->ub_mmp_delay,
+ (u_longlong_t)import_intervals);
} else if (MMP_VALID(ub)) {
/*
@@ -3115,15 +3123,18 @@ spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
ub->ub_mmp_delay) * import_intervals);
zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu "
- "import_intervals=%u leaves=%u", import_delay,
- ub->ub_mmp_delay, import_intervals,
+ "import_intervals=%llu leaves=%u",
+ (u_longlong_t)import_delay,
+ (u_longlong_t)ub->ub_mmp_delay,
+ (u_longlong_t)import_intervals,
vdev_count_leaves(spa));
} else {
/* Using local tunings is the only reasonable option */
zfs_dbgmsg("pool last imported on non-MMP aware "
"host using import_delay=%llu multihost_interval=%llu "
- "import_intervals=%u", import_delay, multihost_interval,
- import_intervals);
+ "import_intervals=%llu", (u_longlong_t)import_delay,
+ (u_longlong_t)multihost_interval,
+ (u_longlong_t)import_intervals);
}
return (import_delay);
@@ -3191,8 +3202,11 @@ spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
"txg %llu ub_txg %llu "
"timestamp %llu ub_timestamp %llu "
"mmp_config %#llx ub_mmp_config %#llx",
- txg, ub->ub_txg, timestamp, ub->ub_timestamp,
- mmp_config, ub->ub_mmp_config);
+ (u_longlong_t)txg, (u_longlong_t)ub->ub_txg,
+ (u_longlong_t)timestamp,
+ (u_longlong_t)ub->ub_timestamp,
+ (u_longlong_t)mmp_config,
+ (u_longlong_t)ub->ub_mmp_config);
error = SET_ERROR(EREMOTEIO);
break;
@@ -8716,12 +8730,16 @@ spa_sync_props(void *arg, dmu_tx_t *tx)
spa->spa_comment = spa_strdup(strval);
/*
* We need to dirty the configuration on all the vdevs
- * so that their labels get updated. It's unnecessary
- * to do this for pool creation since the vdev's
- * configuration has already been dirtied.
+ * so that their labels get updated. We also need to
+ * update the cache file to keep it in sync with the
+ * MOS version. It's unnecessary to do this for pool
+ * creation since the vdev's configuration has already
+ * been dirtied.
*/
- if (tx->tx_txg != TXG_INITIAL)
+ if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
+ spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
+ }
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
break;
@@ -8733,8 +8751,11 @@ spa_sync_props(void *arg, dmu_tx_t *tx)
/*
* Dirty the configuration on vdevs as above.
*/
- if (tx->tx_txg != TXG_INITIAL)
+ if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
+ spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
+ }
+
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
break;
diff --git a/sys/contrib/openzfs/module/zfs/spa_checkpoint.c b/sys/contrib/openzfs/module/zfs/spa_checkpoint.c
index 5fb614467273..09f62996853d 100644
--- a/sys/contrib/openzfs/module/zfs/spa_checkpoint.c
+++ b/sys/contrib/openzfs/module/zfs/spa_checkpoint.c
@@ -337,17 +337,18 @@ spa_checkpoint_discard_thread_sync(void *arg, dmu_tx_t *tx)
spa_checkpoint_accounting_verify(vd->vdev_spa);
#endif
- zfs_dbgmsg("discarding checkpoint: txg %llu, vdev id %d, "
+ zfs_dbgmsg("discarding checkpoint: txg %llu, vdev id %lld, "
"deleted %llu words - %llu words are left",
- tx->tx_txg, vd->vdev_id, (words_before - words_after),
- words_after);
+ (u_longlong_t)tx->tx_txg, (longlong_t)vd->vdev_id,
+ (u_longlong_t)(words_before - words_after),
+ (u_longlong_t)words_after);
if (error != EINTR) {
if (error != 0) {
- zfs_panic_recover("zfs: error %d was returned "
+ zfs_panic_recover("zfs: error %lld was returned "
"while incrementally destroying the checkpoint "
- "space map of vdev %llu\n",
- error, vd->vdev_id);
+ "space map of vdev %u\n",
+ (longlong_t)error, vd->vdev_id);
}
ASSERT0(words_after);
ASSERT0(space_map_allocated(vd->vdev_checkpoint_sm));
diff --git a/sys/contrib/openzfs/module/zfs/spa_history.c b/sys/contrib/openzfs/module/zfs/spa_history.c
index 0482e0f6c39d..dae06e46c316 100644
--- a/sys/contrib/openzfs/module/zfs/spa_history.c
+++ b/sys/contrib/openzfs/module/zfs/spa_history.c
@@ -296,14 +296,17 @@ spa_history_log_sync(void *arg, dmu_tx_t *tx)
} else if (nvlist_exists(nvl, ZPOOL_HIST_INT_NAME)) {
if (nvlist_exists(nvl, ZPOOL_HIST_DSNAME)) {
zfs_dbgmsg("txg %lld %s %s (id %llu) %s",
- fnvlist_lookup_uint64(nvl, ZPOOL_HIST_TXG),
+ (longlong_t)fnvlist_lookup_uint64(nvl,
+ ZPOOL_HIST_TXG),
fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_NAME),
fnvlist_lookup_string(nvl, ZPOOL_HIST_DSNAME),
- fnvlist_lookup_uint64(nvl, ZPOOL_HIST_DSID),
+ (u_longlong_t)fnvlist_lookup_uint64(nvl,
+ ZPOOL_HIST_DSID),
fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_STR));
} else {
zfs_dbgmsg("txg %lld %s %s",
- fnvlist_lookup_uint64(nvl, ZPOOL_HIST_TXG),
+ (longlong_t)fnvlist_lookup_uint64(nvl,
+ ZPOOL_HIST_TXG),
fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_NAME),
fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_STR));
}
diff --git a/sys/contrib/openzfs/module/zfs/spa_misc.c b/sys/contrib/openzfs/module/zfs/spa_misc.c
index 1a2e5abc5335..65b0988d675b 100644
--- a/sys/contrib/openzfs/module/zfs/spa_misc.c
+++ b/sys/contrib/openzfs/module/zfs/spa_misc.c
@@ -615,7 +615,7 @@ spa_deadman(void *arg)
zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
(gethrtime() - spa->spa_sync_starttime) / NANOSEC,
- ++spa->spa_deadman_calls);
+ (u_longlong_t)++spa->spa_deadman_calls);
if (zfs_deadman_enabled)
vdev_deadman(spa->spa_root_vdev, FTAG);
diff --git a/sys/contrib/openzfs/module/zfs/space_map.c b/sys/contrib/openzfs/module/zfs/space_map.c
index 3db7d199199c..138e6c75ed9b 100644
--- a/sys/contrib/openzfs/module/zfs/space_map.c
+++ b/sys/contrib/openzfs/module/zfs/space_map.c
@@ -877,9 +877,11 @@ space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx)
doi.doi_data_block_size != blocksize ||
doi.doi_metadata_block_size != 1 << space_map_ibs) {
zfs_dbgmsg("txg %llu, spa %s, sm %px, reallocating "
- "object[%llu]: old bonus %u, old blocksz %u",
- dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object,
- doi.doi_bonus_size, doi.doi_data_block_size);
+ "object[%llu]: old bonus %llu, old blocksz %u",
+ (u_longlong_t)dmu_tx_get_txg(tx), spa_name(spa), sm,
+ (u_longlong_t)sm->sm_object,
+ (u_longlong_t)doi.doi_bonus_size,
+ doi.doi_data_block_size);
space_map_free(sm, tx);
dmu_buf_rele(sm->sm_dbuf, sm);
diff --git a/sys/contrib/openzfs/module/zfs/txg.c b/sys/contrib/openzfs/module/zfs/txg.c
index 497e19dd58eb..c55b1d8f9601 100644
--- a/sys/contrib/openzfs/module/zfs/txg.c
+++ b/sys/contrib/openzfs/module/zfs/txg.c
@@ -554,7 +554,8 @@ txg_sync_thread(void *arg)
!txg_has_quiesced_to_sync(dp) &&
dp->dp_dirty_total < dirty_min_bytes) {
dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
- tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
+ (u_longlong_t)tx->tx_synced_txg,
+ (u_longlong_t)tx->tx_sync_txg_waiting, dp);
txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
delta = ddi_get_lbolt() - start;
timer = (delta > timeout ? 0 : timeout - delta);
@@ -587,7 +588,8 @@ txg_sync_thread(void *arg)
cv_broadcast(&tx->tx_quiesce_more_cv);
dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
- txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
+ (u_longlong_t)txg, (u_longlong_t)tx->tx_quiesce_txg_waiting,
+ (u_longlong_t)tx->tx_sync_txg_waiting);
mutex_exit(&tx->tx_sync_lock);
txg_stat_t *ts = spa_txg_history_init_io(spa, txg, dp);
@@ -638,8 +640,9 @@ txg_quiesce_thread(void *arg)
txg = tx->tx_open_txg;
dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
- txg, tx->tx_quiesce_txg_waiting,
- tx->tx_sync_txg_waiting);
+ (u_longlong_t)txg,
+ (u_longlong_t)tx->tx_quiesce_txg_waiting,
+ (u_longlong_t)tx->tx_sync_txg_waiting);
tx->tx_quiescing_txg = txg;
mutex_exit(&tx->tx_sync_lock);
@@ -649,7 +652,8 @@ txg_quiesce_thread(void *arg)
/*
* Hand this txg off to the sync thread.
*/
- dprintf("quiesce done, handing off txg %llu\n", txg);
+ dprintf("quiesce done, handing off txg %llu\n",
+ (u_longlong_t)txg);
tx->tx_quiescing_txg = 0;
tx->tx_quiesced_txg = txg;
DTRACE_PROBE2(txg__quiesced, dsl_pool_t *, dp, uint64_t, txg);
@@ -705,11 +709,13 @@ txg_wait_synced_impl(dsl_pool_t *dp, uint64_t txg, boolean_t wait_sig)
if (tx->tx_sync_txg_waiting < txg)
tx->tx_sync_txg_waiting = txg;
dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
- txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
+ (u_longlong_t)txg, (u_longlong_t)tx->tx_quiesce_txg_waiting,
+ (u_longlong_t)tx->tx_sync_txg_waiting);
while (tx->tx_synced_txg < txg) {
dprintf("broadcasting sync more "
"tx_synced=%llu waiting=%llu dp=%px\n",
- tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
+ (u_longlong_t)tx->tx_synced_txg,
+ (u_longlong_t)tx->tx_sync_txg_waiting, dp);
cv_broadcast(&tx->tx_sync_more_cv);
if (wait_sig) {
/*
@@ -764,7 +770,8 @@ txg_wait_open(dsl_pool_t *dp, uint64_t txg, boolean_t should_quiesce)
if (tx->tx_quiesce_txg_waiting < txg && should_quiesce)
tx->tx_quiesce_txg_waiting = txg;
dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
- txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
+ (u_longlong_t)txg, (u_longlong_t)tx->tx_quiesce_txg_waiting,
+ (u_longlong_t)tx->tx_sync_txg_waiting);
while (tx->tx_open_txg < txg) {
cv_broadcast(&tx->tx_quiesce_more_cv);
/*
diff --git a/sys/contrib/openzfs/module/zfs/vdev.c b/sys/contrib/openzfs/module/zfs/vdev.c
index 5e14d71f1946..4e316d8135ee 100644
--- a/sys/contrib/openzfs/module/zfs/vdev.c
+++ b/sys/contrib/openzfs/module/zfs/vdev.c
@@ -165,7 +165,8 @@ vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
char state[20];
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
- zfs_dbgmsg("%*svdev %u: %s", indent, "", vd->vdev_id,
+ zfs_dbgmsg("%*svdev %llu: %s", indent, "",
+ (u_longlong_t)vd->vdev_id,
vd->vdev_ops->vdev_op_type);
return;
}
@@ -5208,7 +5209,7 @@ vdev_deadman(vdev_t *vd, char *tag)
zio_t *fio;
uint64_t delta;
- zfs_dbgmsg("slow vdev: %s has %d active IOs",
+ zfs_dbgmsg("slow vdev: %s has %lu active IOs",
vd->vdev_path, avl_numnodes(&vq->vq_active_tree));
/*
diff --git a/sys/contrib/openzfs/module/zfs/vdev_indirect.c b/sys/contrib/openzfs/module/zfs/vdev_indirect.c
index 6362683ae93d..1b05ff03a0c5 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_indirect.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_indirect.c
@@ -529,8 +529,9 @@ spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
"new mapping object %llu has %llu entries "
"(was %llu entries)",
- vd->vdev_id, dmu_tx_get_txg(tx), vic->vic_mapping_object,
- new_count, old_count);
+ (u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
+ (u_longlong_t)vic->vic_mapping_object,
+ (u_longlong_t)new_count, (u_longlong_t)old_count);
vdev_config_dirty(spa->spa_root_vdev);
}
@@ -796,7 +797,7 @@ spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
"posm=%llu nm=%llu",
- vd->vdev_id, dmu_tx_get_txg(tx),
+ (u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
(u_longlong_t)scip->scip_prev_obsolete_sm_object,
(u_longlong_t)scip->scip_next_mapping_object);
diff --git a/sys/contrib/openzfs/module/zfs/vdev_removal.c b/sys/contrib/openzfs/module/zfs/vdev_removal.c
index a758fe4fb343..8b0c76ff0a14 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_removal.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_removal.c
@@ -345,8 +345,9 @@ vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx)
vdev_config_dirty(vd);
zfs_dbgmsg("starting removal thread for vdev %llu (%px) in txg %llu "
- "im_obj=%llu", vd->vdev_id, vd, dmu_tx_get_txg(tx),
- vic->vic_mapping_object);
+ "im_obj=%llu", (u_longlong_t)vd->vdev_id, vd,
+ (u_longlong_t)dmu_tx_get_txg(tx),
+ (u_longlong_t)vic->vic_mapping_object);
spa_history_log_internal(spa, "vdev remove started", tx,
"%s vdev %llu %s", spa_name(spa), (u_longlong_t)vd->vdev_id,
@@ -474,7 +475,8 @@ spa_restart_removal(spa_t *spa)
if (!spa_writeable(spa))
return;
- zfs_dbgmsg("restarting removal of %llu", svr->svr_vdev_id);
+ zfs_dbgmsg("restarting removal of %llu",
+ (u_longlong_t)svr->svr_vdev_id);
svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, spa,
0, &p0, TS_RUN, minclsyspri);
}
@@ -1196,7 +1198,7 @@ vdev_remove_complete(spa_t *spa)
ESC_ZFS_VDEV_REMOVE_DEV);
zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu",
- vd->vdev_id, txg);
+ (u_longlong_t)vd->vdev_id, (u_longlong_t)txg);
/*
* Discard allocation state.
@@ -1490,8 +1492,9 @@ spa_vdev_remove_thread(void *arg)
vca.vca_msp = msp;
zfs_dbgmsg("copying %llu segments for metaslab %llu",
- zfs_btree_numnodes(&svr->svr_allocd_segs->rt_root),
- msp->ms_id);
+ (u_longlong_t)zfs_btree_numnodes(
+ &svr->svr_allocd_segs->rt_root),
+ (u_longlong_t)msp->ms_id);
while (!svr->svr_thread_exit &&
!range_tree_is_empty(svr->svr_allocd_segs)) {
@@ -1592,8 +1595,8 @@ spa_vdev_remove_thread(void *arg)
vca.vca_write_error_bytes > 0)) {
zfs_dbgmsg("canceling removal due to IO errors: "
"[read_error_bytes=%llu] [write_error_bytes=%llu]",
- vca.vca_read_error_bytes,
- vca.vca_write_error_bytes);
+ (u_longlong_t)vca.vca_read_error_bytes,
+ (u_longlong_t)vca.vca_write_error_bytes);
spa_vdev_remove_cancel_impl(spa);
}
} else {
@@ -1765,7 +1768,7 @@ spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
vdev_config_dirty(vd);
zfs_dbgmsg("canceled device removal for vdev %llu in %llu",
- vd->vdev_id, dmu_tx_get_txg(tx));
+ (u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx));
spa_history_log_internal(spa, "vdev remove canceled", tx,
"%s vdev %llu %s", spa_name(spa),
(u_longlong_t)vd->vdev_id,
diff --git a/sys/contrib/openzfs/module/zfs/zap.c b/sys/contrib/openzfs/module/zfs/zap.c
index c0c280c52076..6f03beef3bdb 100644
--- a/sys/contrib/openzfs/module/zfs/zap.c
+++ b/sys/contrib/openzfs/module/zfs/zap.c
@@ -221,7 +221,8 @@ zap_table_grow(zap_t *zap, zap_table_phys_t *tbl,
tbl->zt_blks_copied++;
dprintf("copied block %llu of %llu\n",
- tbl->zt_blks_copied, tbl->zt_numblks);
+ (u_longlong_t)tbl->zt_blks_copied,
+ (u_longlong_t)tbl->zt_numblks);
if (tbl->zt_blks_copied == tbl->zt_numblks) {
(void) dmu_free_range(zap->zap_objset, zap->zap_object,
@@ -234,7 +235,7 @@ zap_table_grow(zap_t *zap, zap_table_phys_t *tbl,
tbl->zt_blks_copied = 0;
dprintf("finished; numblocks now %llu (%uk entries)\n",
- tbl->zt_numblks, 1<<(tbl->zt_shift-10));
+ (u_longlong_t)tbl->zt_numblks, 1<<(tbl->zt_shift-10));
}
return (0);
@@ -249,7 +250,8 @@ zap_table_store(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t val,
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
ASSERT(tbl->zt_blk != 0);
- dprintf("storing %llx at index %llx\n", val, idx);
+ dprintf("storing %llx at index %llx\n", (u_longlong_t)val,
+ (u_longlong_t)idx);
uint64_t blk = idx >> (bs-3);
uint64_t off = idx & ((1<<(bs-3))-1);
diff --git a/sys/contrib/openzfs/module/zfs/zap_micro.c b/sys/contrib/openzfs/module/zfs/zap_micro.c
index 5d9bc2076068..b4611685b204 100644
--- a/sys/contrib/openzfs/module/zfs/zap_micro.c
+++ b/sys/contrib/openzfs/module/zfs/zap_micro.c
@@ -563,7 +563,7 @@ zap_lockdir_impl(dmu_buf_t *db, void *tag, dmu_tx_t *tx,
uint64_t newsz = db->db_size + SPA_MINBLOCKSIZE;
if (newsz > MZAP_MAX_BLKSZ) {
dprintf("upgrading obj %llu: num_entries=%u\n",
- obj, zap->zap_m.zap_num_entries);
+ (u_longlong_t)obj, zap->zap_m.zap_num_entries);
*zapp = zap;
int err = mzap_upgrade(zapp, tag, tx, 0);
if (err != 0)
@@ -656,7 +656,7 @@ mzap_upgrade(zap_t **zapp, void *tag, dmu_tx_t *tx, zap_flags_t flags)
}
dprintf("upgrading obj=%llu with %u chunks\n",
- zap->zap_object, nchunks);
+ (u_longlong_t)zap->zap_object, nchunks);
/* XXX destroy the avl later, so we can use the stored hash value */
mze_destroy(zap);
@@ -667,7 +667,7 @@ mzap_upgrade(zap_t **zapp, void *tag, dmu_tx_t *tx, zap_flags_t flags)
if (mze->mze_name[0] == 0)
continue;
dprintf("adding %s=%llu\n",
- mze->mze_name, mze->mze_value);
+ mze->mze_name, (u_longlong_t)mze->mze_value);
zap_name_t *zn = zap_name_alloc(zap, mze->mze_name, 0);
/* If we fail here, we would end up losing entries */
VERIFY0(fzap_add_cd(zn, 8, 1, &mze->mze_value, mze->mze_cd,
@@ -1339,7 +1339,8 @@ zap_update(objset_t *os, uint64_t zapobj, const char *name,
} else if (integer_size != 8 || num_integers != 1 ||
strlen(name) >= MZAP_NAME_LEN) {
dprintf("upgrading obj %llu: intsz=%u numint=%llu name=%s\n",
- zapobj, integer_size, num_integers, name);
+ (u_longlong_t)zapobj, integer_size,
+ (u_longlong_t)num_integers, name);
err = mzap_upgrade(&zn->zn_zap, FTAG, tx, 0);
if (err == 0) {
err = fzap_update(zn, integer_size, num_integers,
diff --git a/sys/contrib/openzfs/module/zfs/zcp.c b/sys/contrib/openzfs/module/zfs/zcp.c
index 1ad53eae1eef..f724b44baf1d 100644
--- a/sys/contrib/openzfs/module/zfs/zcp.c
+++ b/sys/contrib/openzfs/module/zfs/zcp.c
@@ -654,7 +654,8 @@ zcp_debug(lua_State *state)
dbgstring = lua_tostring(state, 1);
- zfs_dbgmsg("txg %lld ZCP: %s", ri->zri_tx->tx_txg, dbgstring);
+ zfs_dbgmsg("txg %lld ZCP: %s", (longlong_t)ri->zri_tx->tx_txg,
+ dbgstring);
return (0);
}
diff --git a/sys/contrib/openzfs/module/zfs/zil.c b/sys/contrib/openzfs/module/zfs/zil.c
index d9c3042084e3..7f11c3913c71 100644
--- a/sys/contrib/openzfs/module/zfs/zil.c
+++ b/sys/contrib/openzfs/module/zfs/zil.c
@@ -1960,7 +1960,7 @@ zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
* This should be rare.
*/
zfs_dbgmsg("zil_itx_assign: missed itx cleanup for "
- "txg %llu", itxg->itxg_txg);
+ "txg %llu", (u_longlong_t)itxg->itxg_txg);
clean = itxg->itxg_itxs;
}
itxg->itxg_txg = txg;
@@ -3285,7 +3285,8 @@ zil_close(zilog_t *zilog)
txg_wait_synced(zilog->zl_dmu_pool, txg);
if (zilog_is_dirty(zilog))
- zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog, txg);
+ zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog,
+ (u_longlong_t)txg);
if (txg < spa_freeze_txg(zilog->zl_spa))
VERIFY(!zilog_is_dirty(zilog));
diff --git a/sys/contrib/openzfs/module/zfs/zio.c b/sys/contrib/openzfs/module/zfs/zio.c
index 66ac545c7981..e33d36dab5f9 100644
--- a/sys/contrib/openzfs/module/zfs/zio.c
+++ b/sys/contrib/openzfs/module/zfs/zio.c
@@ -2014,18 +2014,24 @@ zio_deadman_impl(zio_t *pio, int ziodepth)
zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
"delta=%llu queued=%llu io=%llu "
- "path=%s last=%llu "
- "type=%d priority=%d flags=0x%x "
- "stage=0x%x pipeline=0x%x pipeline-trace=0x%x "
- "objset=%llu object=%llu level=%llu blkid=%llu "
- "offset=%llu size=%llu error=%d",
+ "path=%s "
+ "last=%llu type=%d "
+ "priority=%d flags=0x%x stage=0x%x "
+ "pipeline=0x%x pipeline-trace=0x%x "
+ "objset=%llu object=%llu "
+ "level=%llu blkid=%llu "
+ "offset=%llu size=%llu "
+ "error=%d",
ziodepth, pio, pio->io_timestamp,
- delta, pio->io_delta, pio->io_delay,
- vd ? vd->vdev_path : "NULL", vq ? vq->vq_io_complete_ts : 0,
- pio->io_type, pio->io_priority, pio->io_flags,
- pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace,
- zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid,
- pio->io_offset, pio->io_size, pio->io_error);
+ (u_longlong_t)delta, pio->io_delta, pio->io_delay,
+ vd ? vd->vdev_path : "NULL",
+ vq ? vq->vq_io_complete_ts : 0, pio->io_type,
+ pio->io_priority, pio->io_flags, pio->io_stage,
+ pio->io_pipeline, pio->io_pipeline_trace,
+ (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
+ (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
+ (u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
+ pio->io_error);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
pio->io_spa, vd, zb, pio, 0);
@@ -3533,7 +3539,8 @@ zio_dva_allocate(zio_t *zio)
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
zfs_dbgmsg("%s: metaslab allocation failure, "
"trying normal class: zio %px, size %llu, error %d",
- spa_name(spa), zio, zio->io_size, error);
+ spa_name(spa), zio, (u_longlong_t)zio->io_size,
+ error);
}
error = metaslab_alloc(spa, mc, zio->io_size, bp,
@@ -3545,7 +3552,8 @@ zio_dva_allocate(zio_t *zio)
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
zfs_dbgmsg("%s: metaslab allocation failure, "
"trying ganging: zio %px, size %llu, error %d",
- spa_name(spa), zio, zio->io_size, error);
+ spa_name(spa), zio, (u_longlong_t)zio->io_size,
+ error);
}
return (zio_write_gang_block(zio, mc));
}
@@ -3554,7 +3562,8 @@ zio_dva_allocate(zio_t *zio)
(zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
"size %llu, error %d",
- spa_name(spa), zio, zio->io_size, error);
+ spa_name(spa), zio, (u_longlong_t)zio->io_size,
+ error);
}
zio->io_error = error;
}
@@ -3680,7 +3689,8 @@ zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
}
} else {
zfs_dbgmsg("%s: zil block allocation failure: "
- "size %llu, error %d", spa_name(spa), size, error);
+ "size %llu, error %d", spa_name(spa), (u_longlong_t)size,
+ error);
}
return (error);
diff --git a/sys/contrib/openzfs/tests/runfiles/common.run b/sys/contrib/openzfs/tests/runfiles/common.run
index dd25a55edfd4..5f5e10d133e8 100644
--- a/sys/contrib/openzfs/tests/runfiles/common.run
+++ b/sys/contrib/openzfs/tests/runfiles/common.run
@@ -350,7 +350,8 @@ tests = ['zpool_create_001_pos', 'zpool_create_002_pos',
'zpool_create_features_003_pos', 'zpool_create_features_004_neg',
'zpool_create_features_005_pos', 'zpool_create_features_006_pos',
'zpool_create_features_007_pos', 'zpool_create_features_008_pos',
- 'create-o_ashift', 'zpool_create_tempname', 'zpool_create_dryrun_output']
+ 'zpool_create_features_009_pos', 'create-o_ashift',
+ 'zpool_create_tempname', 'zpool_create_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_create']
[tests/functional/cli_root/zpool_destroy]
diff --git a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
index a3e9f2a82e69..27c865ed5c7a 100755
--- a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
+++ b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
@@ -261,6 +261,8 @@ if sys.platform.startswith('freebsd'):
maybe.update({
'cli_root/zfs_copies/zfs_copies_002_pos': ['FAIL', known_reason],
'cli_root/zfs_inherit/zfs_inherit_001_neg': ['FAIL', known_reason],
+ 'cli_root/zfs_receive/receive-o-x_props_override':
+ ['FAIL', known_reason],
'cli_root/zfs_share/zfs_share_011_pos': ['FAIL', known_reason],
'cli_root/zfs_share/zfs_share_concurrent_shares':
['FAIL', known_reason],
@@ -282,6 +284,7 @@ elif sys.platform.startswith('linux'):
'alloc_class/alloc_class_009_pos': ['FAIL', known_reason],
'alloc_class/alloc_class_010_pos': ['FAIL', known_reason],
'alloc_class/alloc_class_011_neg': ['FAIL', known_reason],
+ 'alloc_class/alloc_class_012_pos': ['FAIL', known_reason],
'alloc_class/alloc_class_013_pos': ['FAIL', '11888'],
'cli_root/zfs_rename/zfs_rename_002_pos': ['FAIL', known_reason],
'cli_root/zpool_expand/zpool_expand_001_pos': ['FAIL', known_reason],
diff --git a/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg b/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg
index 0db9724eead0..1ec73f25bae7 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg
+++ b/sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg
@@ -188,7 +188,6 @@ export ZFS_FILES='zdb
zed
zgenhostid
zstream
- zstreamdump
zfs_ids_to_path
zpool_influxdb'
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/Makefile.am
index 5e9e83f0db91..5ffaae5b152c 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/Makefile.am
@@ -39,6 +39,7 @@ dist_pkgdata_SCRIPTS = \
zpool_create_features_006_pos.ksh \
zpool_create_features_007_pos.ksh \
zpool_create_features_008_pos.ksh \
+ zpool_create_features_009_pos.ksh \
create-o_ashift.ksh \
zpool_create_tempname.ksh \
zpool_create_dryrun_output.ksh
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_features_009_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_features_009_pos.ksh
new file mode 100755
index 000000000000..052c18dcee2b
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_features_009_pos.ksh
@@ -0,0 +1,92 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2021 Lawrence Livermore National Security, LLC.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+#
+# DESCRIPTION:
+# Verify '-o compatibility' property is updated in both the
+# pool config MOS object and the cache file.
+#
+# STRATEGY:
+# 1. Create a pool with '-o compatibility=legacy', then verify
+# the property exists in the MOS config and cache file.
+# 2. Create a pool, set the 'compatibility=off' property, then
+# verify the property exists in the MOS config and cache file.
+#
+
+verify_runnable "global"
+
+function cleanup
+{
+ datasetexists $TESTPOOL && log_must zpool destroy $TESTPOOL
+ rm -f $CACHE_FILE
+}
+
+function check_config
+{
+ typeset propval=$1
+
+ poolval="$(zpool get -H -o value compatibility $TESTPOOL)"
+ if [ "$poolval" != "$propval" ]; then
+ log_fail "compatibility property set incorrectly $curval"
+ fi
+
+ if ! zdb -C -U $CACHE_FILE | grep "compatibility: '$propval'"; then
+ log_fail "compatibility property missing in cache file"
+ fi
+
+ if ! zdb -C -U $CACHE_FILE $TESTPOOL | grep "compatibility: '$propval'"; then
+ log_fail "compatibility property missing from MOS object"
+ fi
+}
+
+log_onexit cleanup
+
+log_assert "verify '-o compatibility' in MOS object and cache file"
+
+CACHE_FILE=$TEST_BASE_DIR/cachefile.$$
+
+# 1. Create a pool with '-o compatibility=legacy', then verify
+# the property exists in the MOS config and cache file.
+log_must zpool create -f -o cachefile=$CACHE_FILE -o compatibility=legacy $TESTPOOL $DISKS
+log_must check_config legacy
+log_must zpool export -F $TESTPOOL
+log_must zpool import -c $CACHE_FILE $TESTPOOL
+log_must check_config legacy
+log_must zpool destroy -f $TESTPOOL
+
+# 2. Create a pool, set the 'compatibility=off' property, then
+# verify the property exists in the MOS config and cache file.
+log_must zpool create -f -o cachefile=$CACHE_FILE $TESTPOOL $DISKS
+log_must zpool set compatibility=legacy $TESTPOOL
+log_must check_config legacy
+log_must zpool export -F $TESTPOOL
+log_must zpool import -c $CACHE_FILE $TESTPOOL
+log_must check_config legacy
+log_must zpool destroy -f $TESTPOOL
+
+log_pass "verify '-o compatibility' in MOS object and cache file"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/redacted_send/redacted_embedded.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/redacted_send/redacted_embedded.ksh
index 94937a2f79ab..1c5b503a9be5 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/redacted_send/redacted_embedded.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/redacted_send/redacted_embedded.ksh
@@ -65,7 +65,7 @@ for recsize in 512 1024 2048 4096 8192 16384; do
grep -q "EMBEDDED" $tmpdir/recv.zdb || \
log_fail "Obj $recv_obj not embedded in $recvfs"
- cat $stream | zstreamdump -v | log_must grep -q \
+ cat $stream | zstream dump -v | log_must grep -q \
"WRITE_EMBEDDED object = $send_obj offset = 0"
done
@@ -96,7 +96,7 @@ for recsize in 1024 4096 16384; do
grep -q "EMBEDDED" $tmpdir/recv.zdb || \
log_fail "Obj $recv_obj not embedded in $recvfs"
- cat $stream | zstreamdump -v | log_must grep -q \
+ cat $stream | zstream dump -v | log_must grep -q \
"WRITE_EMBEDDED object = $send_obj offset = 0"
done
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/rsend.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/rsend.kshlib
index 26755e87d0a5..d06bd39b4d49 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/rsend.kshlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/rsend.kshlib
@@ -584,13 +584,13 @@ function mess_send_file
# The random offset might truncate the send stream to be
# smaller than the DRR_BEGIN record. If this happens, then
# the receiving system won't have enough info to create the
- # partial dataset at all. We use zstreamdump to check for
+ # partial dataset at all. We use zstream dump to check for
# this and retry in this case.
- nr_begins=$(head -c $offset $file | zstreamdump | \
+ nr_begins=$(head -c $offset $file | zstream dump | \
grep DRR_BEGIN | awk '{ print $5 }')
while [ "$nr_begins" -eq 0 ]; do
offset=$(($RANDOM * $RANDOM % $filesize))
- nr_begins=$(head -c $offset $file | zstreamdump | \
+ nr_begins=$(head -c $offset $file | zstream dump | \
grep DRR_BEGIN | awk '{ print $5 }')
done
@@ -741,7 +741,7 @@ function stream_has_features
shift
[[ -f $file ]] || log_fail "Couldn't find file: $file"
- typeset flags=$(cat $file | zstreamdump | \
+ typeset flags=$(cat $file | zstream dump | \
awk '/features =/ {features = $3} END {print features}')
typeset -A feature
feature[dedup]="1"
@@ -774,7 +774,7 @@ function stream_has_features
# comparing. This function does not currently handle incremental streams
# that remove data.
#
-# $1 The zstreamdump output file
+# $1 The zstream dump output file
# $2 The dataset to compare against
# This can be a source of a send or recv target (fs, not snapshot)
# $3 The percentage below which verification is deemed a failure
@@ -791,7 +791,7 @@ function verify_stream_size
[[ -f $stream ]] || log_fail "No such file: $stream"
datasetexists $ds || log_fail "No such dataset: $ds"
- typeset stream_size=$(cat $stream | zstreamdump | sed -n \
+ typeset stream_size=$(cat $stream | zstream dump | sed -n \
's/ Total payload size = \(.*\) (0x.*)/\1/p')
typeset inc_size=0
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-c_embedded_blocks.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-c_embedded_blocks.ksh
index 70f79b3173b7..3dce217d8955 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-c_embedded_blocks.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-c_embedded_blocks.ksh
@@ -63,17 +63,17 @@ for recsize in "${recsize_prop_vals[@]}"; do
fi
done
-# Generate the streams and zstreamdump output.
+# Generate the streams and zstream dump output.
log_must zfs snapshot $sendfs@now
log_must eval "zfs send -c $sendfs@now >$stream"
-log_must eval "zstreamdump -v <$stream >$dump"
+log_must eval "zstream dump -v <$stream >$dump"
log_must eval "zfs recv -d $recvfs <$stream"
cmp_ds_cont $sendfs $recvfs
verify_stream_size $stream $sendfs
log_mustnot stream_has_features $stream embed_data
log_must eval "zfs send -c -e $sendfs@now >$stream2"
-log_must eval "zstreamdump -v <$stream2 >$dump2"
+log_must eval "zstream dump -v <$stream2 >$dump2"
log_must eval "zfs recv -d $recvfs2 <$stream2"
cmp_ds_cont $sendfs $recvfs2
verify_stream_size $stream2 $sendfs
@@ -101,9 +101,9 @@ for recsize in "${recsize_prop_vals[@]}"; do
log_fail "Obj $recv2_obj not embedded in $recvfs2"
grep -q "WRITE_EMBEDDED object = $send_obj offset = 0" $dump && \
- log_fail "Obj $obj embedded in zstreamdump output"
+ log_fail "Obj $obj embedded in zstream dump output"
grep -q "WRITE_EMBEDDED object = $send_obj offset = 0" $dump2 || \
- log_fail "Obj $obj not embedded in zstreamdump output"
+ log_fail "Obj $obj not embedded in zstream dump output"
done
log_pass "Compressed streams can contain embedded blocks."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-c_zstreamdump.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-c_zstreamdump.ksh
index b4dc00cec4e7..5b9939c6a64c 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-c_zstreamdump.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-c_zstreamdump.ksh
@@ -21,12 +21,12 @@
#
# Description:
-# Verify compression features show up in zstreamdump
+# Verify compression features show up in zstream dump
#
# Strategy:
# 1. Create a full compressed send stream
-# 2. Verify zstreamdump shows this stream has the relevant features
-# 3. Verify zstreamdump's accounting of logical and compressed size is correct
+# 2. Verify zstream dump shows this stream has the relevant features
+# 3. Verify zstream dump's accounting of logical and compressed size is correct
# 4. Verify the toname from a resume token
# 5. Verify it fails with corrupted resume token
# 6. Verify it fails with missing resume token
@@ -34,7 +34,7 @@
verify_runnable "both"
-log_assert "Verify zstreamdump correctly interprets compressed send streams."
+log_assert "Verify zstream dump correctly interprets compressed send streams."
log_onexit cleanup_pool $POOL2
typeset sendfs=$POOL2/fs
@@ -49,7 +49,7 @@ log_must zfs snapshot $sendfs@full
log_must eval "zfs send -c $sendfs@full >$BACKDIR/full"
log_must stream_has_features $BACKDIR/full lz4 compressed
-cat $BACKDIR/full | zstreamdump -v > $BACKDIR/dump.out
+cat $BACKDIR/full | zstream dump -v > $BACKDIR/dump.out
lsize=$(awk '/^WRITE [^0]/ {lsize += $24} END {printf("%d", lsize)}' \
$BACKDIR/dump.out)
@@ -72,4 +72,4 @@ bad_resume_token="1-1162e8285b-100789c6360"
log_mustnot eval "zstream token $bad_resume_token 2>&1"
log_mustnot eval "zstream token 2>&1"
-log_pass "zstreamdump correctly interprets compressed send streams."
+log_pass "zstream dump correctly interprets compressed send streams."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-cpL_varied_recsize.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-cpL_varied_recsize.ksh
index e1ac00c79c96..e2810651a60e 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-cpL_varied_recsize.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-cpL_varied_recsize.ksh
@@ -134,7 +134,7 @@ function check
[[ -f $stream ]] && log_must rm $stream
log_must eval "zfs send $flags $send_snap >$stream"
$verify eval "zfs recv $recv_ds <$stream"
- typeset stream_size=$(cat $stream | zstreamdump | sed -n \
+ typeset stream_size=$(cat $stream | zstream dump | sed -n \
's/ Total write size = \(.*\) (0x.*)/\1/p')
#
diff --git a/sys/modules/zfs/zfs_config.h b/sys/modules/zfs/zfs_config.h
index 59041ba82b69..0080a6c775e9 100644
--- a/sys/modules/zfs/zfs_config.h
+++ b/sys/modules/zfs/zfs_config.h
@@ -734,7 +734,7 @@
/* #undef ZFS_IS_GPL_COMPATIBLE */
/* Define the project alias string. */
-#define ZFS_META_ALIAS "zfs-2.1.0-FreeBSD_g9a865b7fb"
+#define ZFS_META_ALIAS "zfs-2.1.0-FreeBSD_gaee26af27"
/* Define the project author. */
#define ZFS_META_AUTHOR "OpenZFS"
@@ -764,7 +764,7 @@
#define ZFS_META_NAME "zfs"
/* Define the project release. */
-#define ZFS_META_RELEASE "FreeBSD_g9a865b7fb"
+#define ZFS_META_RELEASE "FreeBSD_gaee26af27"
/* Define the project version. */
#define ZFS_META_VERSION "2.1.0"