aboutsummaryrefslogtreecommitdiff
path: root/sys/contrib/openzfs/module
diff options
context:
space:
mode:
Diffstat (limited to 'sys/contrib/openzfs/module')
-rw-r--r--sys/contrib/openzfs/module/avl/avl.c16
-rw-r--r--sys/contrib/openzfs/module/icp/core/kcf_sched.c2
-rw-r--r--sys/contrib/openzfs/module/icp/io/aes.c10
-rw-r--r--sys/contrib/openzfs/module/nvpair/nvpair.c6
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/spl/spl_kmem.c2
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/spl/spl_sysevent.c2
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c4
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/dmu_os.c8
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c2
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c4
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c4
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c4
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c11
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c90
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode_os.c18
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c20
-rw-r--r--sys/contrib/openzfs/module/os/linux/spl/spl-condvar.c8
-rw-r--r--sys/contrib/openzfs/module/os/linux/spl/spl-generic.c2
-rw-r--r--sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c24
-rw-r--r--sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c2
-rw-r--r--sys/contrib/openzfs/module/os/linux/spl/spl-thread.c2
-rw-r--r--sys/contrib/openzfs/module/os/linux/spl/spl-tsd.c2
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/abd_os.c6
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c6
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c6
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c8
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_dir.c24
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_sysfs.c2
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c27
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c132
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_znode_os.c58
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c63
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c2
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c4
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c4
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c57
-rw-r--r--sys/contrib/openzfs/module/zfs/abd.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/arc.c68
-rw-r--r--sys/contrib/openzfs/module/zfs/bpobj.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/btree.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/dataset_kstats.c1
-rw-r--r--sys/contrib/openzfs/module/zfs/dbuf.c74
-rw-r--r--sys/contrib/openzfs/module/zfs/ddt.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/ddt_log.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu.c12
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_direct.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_object.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_objset.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_recv.c12
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_redact.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_send.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_tx.c18
-rw-r--r--sys/contrib/openzfs/module/zfs/dnode.c22
-rw-r--r--sys/contrib/openzfs/module/zfs/dnode_sync.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_bookmark.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_crypt.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_dataset.c10
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_deadlist.c5
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_deleg.c20
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_destroy.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_dir.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_pool.c10
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_prop.c31
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_scan.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_userhold.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/fm.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/metaslab.c34
-rw-r--r--sys/contrib/openzfs/module/zfs/mmp.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/range_tree.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/rrwlock.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/sa.c24
-rw-r--r--sys/contrib/openzfs/module/zfs/spa.c66
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_misc.c19
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_stats.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/space_map.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/space_reftree.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev.c46
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_draid.c16
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_indirect.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_initialize.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_label.c14
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_queue.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_raidz.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_rebuild.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_removal.c22
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_trim.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/zap.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/zap_micro.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/zcp.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/zfeature.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_fuid.c44
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_ioctl.c119
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_log.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_quota.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_rlock.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_sa.c15
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_vnops.c27
-rw-r--r--sys/contrib/openzfs/module/zfs/zil.c461
-rw-r--r--sys/contrib/openzfs/module/zfs/zio.c62
-rw-r--r--sys/contrib/openzfs/module/zfs/zio_checksum.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/zio_compress.c15
-rw-r--r--sys/contrib/openzfs/module/zfs/zio_inject.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/zrlock.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/zthr.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/zvol.c117
105 files changed, 1331 insertions, 877 deletions
diff --git a/sys/contrib/openzfs/module/avl/avl.c b/sys/contrib/openzfs/module/avl/avl.c
index b6c1c02bc3f2..67cbcd3adeec 100644
--- a/sys/contrib/openzfs/module/avl/avl.c
+++ b/sys/contrib/openzfs/module/avl/avl.c
@@ -225,7 +225,7 @@ avl_nearest(avl_tree_t *tree, avl_index_t where, int direction)
size_t off = tree->avl_offset;
if (node == NULL) {
- ASSERT(tree->avl_root == NULL);
+ ASSERT0P(tree->avl_root);
return (NULL);
}
data = AVL_NODE2DATA(node, off);
@@ -478,7 +478,7 @@ avl_insert(avl_tree_t *tree, void *new_data, avl_index_t where)
size_t off = tree->avl_offset;
#ifdef _LP64
- ASSERT(((uintptr_t)new_data & 0x7) == 0);
+ ASSERT0(((uintptr_t)new_data & 0x7));
#endif
node = AVL_DATA2NODE(new_data, off);
@@ -495,10 +495,10 @@ avl_insert(avl_tree_t *tree, void *new_data, avl_index_t where)
AVL_SETBALANCE(node, 0);
AVL_SETPARENT(node, parent);
if (parent != NULL) {
- ASSERT(parent->avl_child[which_child] == NULL);
+ ASSERT0P(parent->avl_child[which_child]);
parent->avl_child[which_child] = node;
} else {
- ASSERT(tree->avl_root == NULL);
+ ASSERT0P(tree->avl_root);
tree->avl_root = node;
}
/*
@@ -608,7 +608,7 @@ avl_insert_here(
ASSERT(diff > 0 ? child == 1 : child == 0);
#endif
}
- ASSERT(node->avl_child[child] == NULL);
+ ASSERT0P(node->avl_child[child]);
avl_insert(tree, new_data, AVL_MKINDEX(node, child));
}
@@ -881,7 +881,7 @@ avl_create(avl_tree_t *tree, int (*compar) (const void *, const void *),
ASSERT(size > 0);
ASSERT(size >= offset + sizeof (avl_node_t));
#ifdef _LP64
- ASSERT((offset & 0x7) == 0);
+ ASSERT0((offset & 0x7));
#endif
tree->avl_compar = compar;
@@ -897,8 +897,8 @@ void
avl_destroy(avl_tree_t *tree)
{
ASSERT(tree);
- ASSERT(tree->avl_numnodes == 0);
- ASSERT(tree->avl_root == NULL);
+ ASSERT0(tree->avl_numnodes);
+ ASSERT0P(tree->avl_root);
}
diff --git a/sys/contrib/openzfs/module/icp/core/kcf_sched.c b/sys/contrib/openzfs/module/icp/core/kcf_sched.c
index 759f0d81d521..75e1052a4ed4 100644
--- a/sys/contrib/openzfs/module/icp/core/kcf_sched.c
+++ b/sys/contrib/openzfs/module/icp/core/kcf_sched.c
@@ -124,7 +124,7 @@ kcf_context_cache_destructor(void *buf, void *cdrarg)
(void) cdrarg;
kcf_context_t *kctx = (kcf_context_t *)buf;
- ASSERT(kctx->kc_refcnt == 0);
+ ASSERT0(kctx->kc_refcnt);
}
void
diff --git a/sys/contrib/openzfs/module/icp/io/aes.c b/sys/contrib/openzfs/module/icp/io/aes.c
index ba703efa71fc..ca586eaf97ef 100644
--- a/sys/contrib/openzfs/module/icp/io/aes.c
+++ b/sys/contrib/openzfs/module/icp/io/aes.c
@@ -236,16 +236,16 @@ aes_encrypt_atomic(crypto_mechanism_t *mechanism,
aes_xor_block);
if (ret != CRYPTO_SUCCESS)
goto out;
- ASSERT(aes_ctx.ac_remainder_len == 0);
+ ASSERT0(aes_ctx.ac_remainder_len);
} else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE) {
ret = gcm_encrypt_final((gcm_ctx_t *)&aes_ctx,
ciphertext, AES_BLOCK_LEN, aes_encrypt_block,
aes_copy_block, aes_xor_block);
if (ret != CRYPTO_SUCCESS)
goto out;
- ASSERT(aes_ctx.ac_remainder_len == 0);
+ ASSERT0(aes_ctx.ac_remainder_len);
} else {
- ASSERT(aes_ctx.ac_remainder_len == 0);
+ ASSERT0(aes_ctx.ac_remainder_len);
}
if (plaintext != ciphertext) {
@@ -337,7 +337,7 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism,
ret = ccm_decrypt_final((ccm_ctx_t *)&aes_ctx,
plaintext, AES_BLOCK_LEN, aes_encrypt_block,
aes_copy_block, aes_xor_block);
- ASSERT(aes_ctx.ac_remainder_len == 0);
+ ASSERT0(aes_ctx.ac_remainder_len);
if ((ret == CRYPTO_SUCCESS) &&
(ciphertext != plaintext)) {
plaintext->cd_length =
@@ -349,7 +349,7 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism,
ret = gcm_decrypt_final((gcm_ctx_t *)&aes_ctx,
plaintext, AES_BLOCK_LEN, aes_encrypt_block,
aes_xor_block);
- ASSERT(aes_ctx.ac_remainder_len == 0);
+ ASSERT0(aes_ctx.ac_remainder_len);
if ((ret == CRYPTO_SUCCESS) &&
(ciphertext != plaintext)) {
plaintext->cd_length =
diff --git a/sys/contrib/openzfs/module/nvpair/nvpair.c b/sys/contrib/openzfs/module/nvpair/nvpair.c
index 811cfc87d7a4..eb8c14b4a783 100644
--- a/sys/contrib/openzfs/module/nvpair/nvpair.c
+++ b/sys/contrib/openzfs/module/nvpair/nvpair.c
@@ -265,7 +265,7 @@ nv_priv_alloc_embedded(nvpriv_t *priv)
static int
nvt_tab_alloc(nvpriv_t *priv, uint64_t buckets)
{
- ASSERT3P(priv->nvp_hashtable, ==, NULL);
+ ASSERT0P(priv->nvp_hashtable);
ASSERT0(priv->nvp_nbuckets);
ASSERT0(priv->nvp_nentries);
@@ -334,7 +334,7 @@ nvt_lookup_name_type(const nvlist_t *nvl, const char *name, data_type_t type)
i_nvp_t **tab = priv->nvp_hashtable;
if (tab == NULL) {
- ASSERT3P(priv->nvp_list, ==, NULL);
+ ASSERT0P(priv->nvp_list);
ASSERT0(priv->nvp_nbuckets);
ASSERT0(priv->nvp_nentries);
return (NULL);
@@ -540,7 +540,7 @@ nvt_add_nvpair(nvlist_t *nvl, nvpair_t *nvp)
/* insert link at the beginning of the bucket */
i_nvp_t *new_entry = NVPAIR2I_NVP(nvp);
- ASSERT3P(new_entry->nvi_hashtable_next, ==, NULL);
+ ASSERT0P(new_entry->nvi_hashtable_next);
new_entry->nvi_hashtable_next = bucket;
// cppcheck-suppress nullPointerRedundantCheck
tab[index] = new_entry;
diff --git a/sys/contrib/openzfs/module/os/freebsd/spl/spl_kmem.c b/sys/contrib/openzfs/module/os/freebsd/spl/spl_kmem.c
index 6d198fad5203..ae6e36d988c2 100644
--- a/sys/contrib/openzfs/module/os/freebsd/spl/spl_kmem.c
+++ b/sys/contrib/openzfs/module/os/freebsd/spl/spl_kmem.c
@@ -160,7 +160,7 @@ kmem_cache_create(const char *name, size_t bufsize, size_t align,
{
kmem_cache_t *cache;
- ASSERT3P(vmp, ==, NULL);
+ ASSERT0P(vmp);
cache = kmem_alloc(sizeof (*cache), KM_SLEEP);
strlcpy(cache->kc_name, name, sizeof (cache->kc_name));
diff --git a/sys/contrib/openzfs/module/os/freebsd/spl/spl_sysevent.c b/sys/contrib/openzfs/module/os/freebsd/spl/spl_sysevent.c
index 9da633c2b1be..3c2d39b20c09 100644
--- a/sys/contrib/openzfs/module/os/freebsd/spl/spl_sysevent.c
+++ b/sys/contrib/openzfs/module/os/freebsd/spl/spl_sysevent.c
@@ -256,7 +256,7 @@ sysevent_worker(void *arg __unused)
* free `ze`, so just inline the free() here -- events have already
* been drained.
*/
- VERIFY3P(ze->ze_zevent, ==, NULL);
+ VERIFY0P(ze->ze_zevent);
kmem_free(ze, sizeof (zfs_zevent_t));
kthread_exit();
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
index fbf67f6a14a8..4bf487cdc469 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
@@ -507,7 +507,7 @@ abd_iter_at_end(struct abd_iter *aiter)
void
abd_iter_advance(struct abd_iter *aiter, size_t amount)
{
- ASSERT3P(aiter->iter_mapaddr, ==, NULL);
+ ASSERT0P(aiter->iter_mapaddr);
ASSERT0(aiter->iter_mapsize);
/* There's nothing left to advance to, so do nothing */
@@ -526,7 +526,7 @@ abd_iter_map(struct abd_iter *aiter)
{
void *paddr;
- ASSERT3P(aiter->iter_mapaddr, ==, NULL);
+ ASSERT0P(aiter->iter_mapaddr);
ASSERT0(aiter->iter_mapsize);
/* There's nothing left to iterate over, so do nothing */
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/dmu_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/dmu_os.c
index 364bbfc60abd..26cc7981bfcd 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/dmu_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/dmu_os.c
@@ -156,7 +156,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
if (dbp[0]->db_offset != 0 || numbufs > 1) {
for (i = 0; i < numbufs; i++) {
ASSERT(ISP2(dbp[i]->db_size));
- ASSERT3U((dbp[i]->db_offset % dbp[i]->db_size), ==, 0);
+ ASSERT0((dbp[i]->db_offset % dbp[i]->db_size));
ASSERT3U(dbp[i]->db_size, ==, dbp[0]->db_size);
}
}
@@ -175,7 +175,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
vm_page_sunbusy(m);
break;
}
- ASSERT3U(m->dirty, ==, 0);
+ ASSERT0(m->dirty);
ASSERT(!pmap_page_is_write_mapped(m));
ASSERT3U(db->db_size, >, PAGE_SIZE);
@@ -201,7 +201,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
if (m != bogus_page) {
vm_page_assert_xbusied(m);
ASSERT(vm_page_none_valid(m));
- ASSERT3U(m->dirty, ==, 0);
+ ASSERT0(m->dirty);
ASSERT(!pmap_page_is_write_mapped(m));
va = zfs_map_page(m, &sf);
}
@@ -295,7 +295,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
vm_page_sunbusy(m);
break;
}
- ASSERT3U(m->dirty, ==, 0);
+ ASSERT0(m->dirty);
ASSERT(!pmap_page_is_write_mapped(m));
ASSERT3U(db->db_size, >, PAGE_SIZE);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
index c8ab7cc7cf8e..bbd1dafc69be 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c
@@ -1236,7 +1236,7 @@ vdev_geom_io_done(zio_t *zio)
struct bio *bp = zio->io_bio;
if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) {
- ASSERT3P(bp, ==, NULL);
+ ASSERT0P(bp);
return;
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
index 5c5adc6cc12b..b15a3e6e38c0 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
@@ -1632,7 +1632,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(dzp), __func__);
} else
- ASSERT3P(dzp->z_vnode, ==, NULL);
+ ASSERT0P(dzp->z_vnode);
memset(acl_ids, 0, sizeof (zfs_acl_ids_t));
acl_ids->z_mode = MAKEIMODE(vap->va_type, vap->va_mode);
@@ -2014,7 +2014,7 @@ top:
error = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT0(error);
- ASSERT3P(zp->z_acl_cached, ==, NULL);
+ ASSERT0P(zp->z_acl_cached);
zp->z_acl_cached = aclp;
if (fuid_dirtied)
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
index 8d0ff9b25e30..61d0bb26d1e5 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
@@ -357,7 +357,7 @@ zfsctl_create(zfsvfs_t *zfsvfs)
vnode_t *rvp;
uint64_t crtime[2];
- ASSERT3P(zfsvfs->z_ctldir, ==, NULL);
+ ASSERT0P(zfsvfs->z_ctldir);
snapdir = sfs_alloc_node(sizeof (*snapdir), "snapshot", ZFSCTL_INO_ROOT,
ZFSCTL_INO_SNAPDIR);
@@ -1367,7 +1367,7 @@ zfsctl_snapshot_unmount(const char *snapname, int flags __unused)
int err = getzfsvfs(snapname, &zfsvfs);
if (err != 0) {
- ASSERT3P(zfsvfs, ==, NULL);
+ ASSERT0P(zfsvfs);
return (0);
}
vfsp = zfsvfs->z_vfs;
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c
index 191df832d726..75ba2ea0cb9e 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_dir.c
@@ -273,7 +273,7 @@ zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx)
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ASSERT(zp->z_unlinked);
- ASSERT3U(zp->z_links, ==, 0);
+ ASSERT0(zp->z_links);
VERIFY0(zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
@@ -437,7 +437,7 @@ zfs_rmnode(znode_t *zp)
uint64_t count;
int error;
- ASSERT3U(zp->z_links, ==, 0);
+ ASSERT0(zp->z_links);
if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
index 0456552ed07e..79b784288911 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
@@ -455,8 +455,13 @@ zfs_sync(vfs_t *vfsp, int waitfor)
return (0);
}
- if (zfsvfs->z_log != NULL)
- zil_commit(zfsvfs->z_log, 0);
+ if (zfsvfs->z_log != NULL) {
+ error = zil_commit(zfsvfs->z_log, 0);
+ if (error != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
+ }
zfs_exit(zfsvfs, FTAG);
} else {
@@ -1091,7 +1096,7 @@ zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
if (mounting) {
boolean_t readonly;
- ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
+ ASSERT0P(zfsvfs->z_kstat.dk_kstats);
error = dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
if (error)
return (error);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
index c4270d8b5d5c..1813c411b013 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
@@ -1101,7 +1101,7 @@ zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl, int mode,
zfs_exit(zfsvfs, FTAG);
return (error);
}
- ASSERT3P(zp, ==, NULL);
+ ASSERT0P(zp);
/*
* Create a new file object and update the directory
@@ -1193,8 +1193,8 @@ out:
*zpp = zp;
}
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1323,9 +1323,8 @@ out:
if (xzp)
vrele(ZTOV(xzp));
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
-
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1482,7 +1481,7 @@ zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, znode_t **zpp,
zfs_exit(zfsvfs, FTAG);
return (error);
}
- ASSERT3P(zp, ==, NULL);
+ ASSERT0P(zp);
if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr,
mnt_ns))) {
@@ -1556,8 +1555,8 @@ out:
getnewvnode_drop_reserve();
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1637,8 +1636,8 @@ zfs_rmdir_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
if (zfsvfs->z_use_namecache)
cache_vop_rmdir(dvp, vp);
out:
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -3009,8 +3008,8 @@ out:
}
out2:
- if (os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (err == 0 && os->os_sync == ZFS_SYNC_ALWAYS)
+ err = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (err);
@@ -3539,7 +3538,7 @@ out_seq:
out:
if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -3731,7 +3730,7 @@ zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
*zpp = zp;
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ error = zil_commit(zilog, 0);
}
zfs_exit(zfsvfs, FTAG);
@@ -3921,8 +3920,8 @@ zfs_link(znode_t *tdzp, znode_t *szp, const char *name, cred_t *cr,
vnevent_link(ZTOV(szp), ct);
}
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -4313,7 +4312,7 @@ typedef struct {
} putpage_commit_arg_t;
static void
-zfs_putpage_commit_cb(void *arg)
+zfs_putpage_commit_cb(void *arg, int err)
{
putpage_commit_arg_t *pca = arg;
vm_object_t object = pca->pca_pages[0]->object;
@@ -4322,7 +4321,17 @@ zfs_putpage_commit_cb(void *arg)
for (uint_t i = 0; i < pca->pca_npages; i++) {
vm_page_t pp = pca->pca_pages[i];
- vm_page_undirty(pp);
+
+ if (err == 0) {
+ /*
+ * Writeback succeeded, so undirty the page. If it
+ * fails, we leave it in the same state it was. That's
+ * most likely dirty, so it will get tried again some
+ * other time.
+ */
+ vm_page_undirty(pp);
+ }
+
vm_page_sunbusy(pp);
}
@@ -4510,8 +4519,13 @@ zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
out:
zfs_rangelock_exit(lr);
- if (commit)
- zil_commit(zfsvfs->z_log, zp->z_id);
+ if (commit) {
+ err = zil_commit(zfsvfs->z_log, zp->z_id);
+ if (err != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (err);
+ }
+ }
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, len);
@@ -5223,8 +5237,32 @@ struct vop_fsync_args {
static int
zfs_freebsd_fsync(struct vop_fsync_args *ap)
{
+ vnode_t *vp = ap->a_vp;
+ int err = 0;
+
+ /*
+ * Push any dirty mmap()'d data out to the DMU and ZIL, ready for
+ * zil_commit() to be called in zfs_fsync().
+ */
+ if (vm_object_mightbedirty(vp->v_object)) {
+ zfs_vmobject_wlock(vp->v_object);
+ if (!vm_object_page_clean(vp->v_object, 0, 0, 0))
+ err = SET_ERROR(EIO);
+ zfs_vmobject_wunlock(vp->v_object);
+ if (err) {
+ /*
+ * Unclear what state things are in. zfs_putpages()
+ * will ensure the pages remain dirty if they haven't
+ * been written down to the DMU, but because there may
+ * be nothing logged, we can't assume that zfs_sync()
+ * -> zil_commit() will give us a useful error. It's
+ * safest if we just error out here.
+ */
+ return (err);
+ }
+ }
- return (zfs_fsync(VTOZ(ap->a_vp), 0, ap->a_td->td_ucred));
+ return (zfs_fsync(VTOZ(vp), 0, ap->a_td->td_ucred));
}
#ifndef _SYS_SYSPROTO_H_
@@ -6773,9 +6811,11 @@ zfs_deallocate(struct vop_deallocate_args *ap)
if (error == 0) {
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS ||
(ap->a_ioflag & IO_SYNC) != 0)
- zil_commit(zilog, zp->z_id);
- *ap->a_offset = off + len;
- *ap->a_len = 0;
+ error = zil_commit(zilog, zp->z_id);
+ if (error == 0) {
+ *ap->a_offset = off + len;
+ *ap->a_len = 0;
+ }
}
zfs_exit(zfsvfs, FTAG);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode_os.c
index 775f54a65f7d..7cd0a153577c 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode_os.c
@@ -161,15 +161,15 @@ zfs_znode_cache_destructor(void *buf, void *arg)
znode_t *zp = buf;
ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
- ASSERT3P(zp->z_vnode, ==, NULL);
+ ASSERT0P(zp->z_vnode);
ASSERT(!list_link_active(&zp->z_link_node));
mutex_destroy(&zp->z_lock);
mutex_destroy(&zp->z_acl_lock);
rw_destroy(&zp->z_xattr_lock);
zfs_rangelock_fini(&zp->z_rangelock);
- ASSERT3P(zp->z_acl_cached, ==, NULL);
- ASSERT3P(zp->z_xattr_cached, ==, NULL);
+ ASSERT0P(zp->z_acl_cached);
+ ASSERT0P(zp->z_xattr_cached);
}
@@ -195,7 +195,7 @@ zfs_znode_init(void)
/*
* Initialize zcache
*/
- ASSERT3P(znode_uma_zone, ==, NULL);
+ ASSERT0P(znode_uma_zone);
znode_uma_zone = uma_zcreate("zfs_znode_cache",
sizeof (znode_t), zfs_znode_cache_constructor_smr,
zfs_znode_cache_destructor_smr, NULL, NULL, 0, 0);
@@ -224,7 +224,7 @@ zfs_znode_init(void)
/*
* Initialize zcache
*/
- ASSERT3P(znode_cache, ==, NULL);
+ ASSERT0P(znode_cache);
znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor,
zfs_znode_cache_destructor, NULL, NULL, NULL, KMC_RECLAIMABLE);
@@ -353,8 +353,8 @@ zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs));
ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id)));
- ASSERT3P(zp->z_sa_hdl, ==, NULL);
- ASSERT3P(zp->z_acl_cached, ==, NULL);
+ ASSERT0P(zp->z_sa_hdl);
+ ASSERT0P(zp->z_acl_cached);
if (sa_hdl == NULL) {
VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, zp,
SA_HDL_SHARED, &zp->z_sa_hdl));
@@ -1127,7 +1127,7 @@ zfs_rezget(znode_t *zp)
}
rw_exit(&zp->z_xattr_lock);
- ASSERT3P(zp->z_sa_hdl, ==, NULL);
+ ASSERT0P(zp->z_sa_hdl);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
@@ -1298,7 +1298,7 @@ zfs_znode_free(znode_t *zp)
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
char *symlink;
- ASSERT3P(zp->z_sa_hdl, ==, NULL);
+ ASSERT0P(zp->z_sa_hdl);
zp->z_vnode = NULL;
mutex_enter(&zfsvfs->z_znodes_lock);
POINTER_INVALIDATE(&zp->z_zfsvfs);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
index 72a7c4ea082a..265dfd55fc4d 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
@@ -727,9 +727,9 @@ unlock:
break;
}
- if (commit) {
+ if (error == 0 && commit) {
commit:
- zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
}
resume:
rw_exit(&zv->zv_suspend_lock);
@@ -906,8 +906,8 @@ zvol_cdev_write(struct cdev *dev, struct uio *uio_s, int ioflag)
zfs_rangelock_exit(lr);
int64_t nwritten = start_resid - zfs_uio_resid(&uio);
dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
- if (commit)
- zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ if (error == 0 && commit)
+ error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
return (error);
@@ -1117,7 +1117,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
case DIOCGFLUSH:
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
if (zv->zv_zilog != NULL)
- zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
break;
case DIOCGDELETE:
@@ -1152,7 +1152,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
}
zfs_rangelock_exit(lr);
if (sync)
- zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
break;
case DIOCGSTRIPESIZE:
@@ -1415,7 +1415,7 @@ zvol_os_free(zvol_state_t *zv)
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp __maybe_unused = zsg->zsg_provider;
- ASSERT3P(pp->private, ==, NULL);
+ ASSERT0P(pp->private);
g_topology_lock();
zvol_geom_destroy(zv);
@@ -1425,7 +1425,7 @@ zvol_os_free(zvol_state_t *zv)
struct cdev *dev = zsd->zsd_cdev;
if (dev != NULL) {
- ASSERT3P(dev->si_drv2, ==, NULL);
+ ASSERT0P(dev->si_drv2);
destroy_dev(dev);
knlist_clear(&zsd->zsd_selinfo.si_note, 0);
knlist_destroy(&zsd->zsd_selinfo.si_note);
@@ -1493,11 +1493,11 @@ zvol_os_create_minor(const char *name)
zv->zv_objset = os;
- ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
+ ASSERT0P(zv->zv_kstat.dk_kstats);
error = dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
if (error)
goto out_dmu_objset_disown;
- ASSERT3P(zv->zv_zilog, ==, NULL);
+ ASSERT0P(zv->zv_zilog);
zv->zv_zilog = zil_open(os, zvol_get_data, &zv->zv_kstat.dk_zil_sums);
if (spa_writeable(dmu_objset_spa(os))) {
if (zil_replay_disable)
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-condvar.c b/sys/contrib/openzfs/module/os/linux/spl/spl-condvar.c
index ce9c9e39e60c..aac5f2ebbfd2 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-condvar.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-condvar.c
@@ -66,9 +66,9 @@ void
__cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
{
ASSERT(cvp);
- ASSERT(name == NULL);
+ ASSERT0P(name);
ASSERT(type == CV_DEFAULT);
- ASSERT(arg == NULL);
+ ASSERT0P(arg);
cvp->cv_magic = CV_MAGIC;
init_waitqueue_head(&cvp->cv_event);
@@ -83,7 +83,7 @@ static int
cv_destroy_wakeup(kcondvar_t *cvp)
{
if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
- ASSERT(cvp->cv_mutex == NULL);
+ ASSERT0P(cvp->cv_mutex);
ASSERT(!waitqueue_active(&cvp->cv_event));
return (1);
}
@@ -104,7 +104,7 @@ __cv_destroy(kcondvar_t *cvp)
while (cv_destroy_wakeup(cvp) == 0)
wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
- ASSERT3P(cvp->cv_mutex, ==, NULL);
+ ASSERT0P(cvp->cv_mutex);
ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-generic.c b/sys/contrib/openzfs/module/os/linux/spl/spl-generic.c
index f37699b4347e..89ca4a648b2f 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-generic.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-generic.c
@@ -709,7 +709,7 @@ zone_get_hostid(void *zone)
{
uint32_t hostid;
- ASSERT3P(zone, ==, NULL);
+ ASSERT0P(zone);
if (spl_hostid != 0)
return ((uint32_t)(spl_hostid & HW_HOSTID_MASK));
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c b/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
index fab80289b278..22e4ed169d03 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-kmem-cache.c
@@ -296,7 +296,7 @@ spl_slab_free(spl_kmem_slab_t *sks,
spl_kmem_cache_t *skc;
ASSERT(sks->sks_magic == SKS_MAGIC);
- ASSERT(sks->sks_ref == 0);
+ ASSERT0(sks->sks_ref);
skc = sks->sks_cache;
ASSERT(skc->skc_magic == SKC_MAGIC);
@@ -598,7 +598,7 @@ static void
spl_magazine_free(spl_kmem_magazine_t *skm)
{
ASSERT(skm->skm_magic == SKM_MAGIC);
- ASSERT(skm->skm_avail == 0);
+ ASSERT0(skm->skm_avail);
kfree(skm);
}
@@ -610,7 +610,7 @@ spl_magazine_create(spl_kmem_cache_t *skc)
{
int i = 0;
- ASSERT((skc->skc_flags & KMC_SLAB) == 0);
+ ASSERT0((skc->skc_flags & KMC_SLAB));
skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
@@ -640,7 +640,7 @@ spl_magazine_destroy(spl_kmem_cache_t *skc)
spl_kmem_magazine_t *skm;
int i = 0;
- ASSERT((skc->skc_flags & KMC_SLAB) == 0);
+ ASSERT0((skc->skc_flags & KMC_SLAB));
for_each_possible_cpu(i) {
skm = skc->skc_mag[i];
@@ -679,8 +679,8 @@ spl_kmem_cache_create(const char *name, size_t size, size_t align,
/*
* Unsupported flags
*/
- ASSERT(vmp == NULL);
- ASSERT(reclaim == NULL);
+ ASSERT0P(vmp);
+ ASSERT0P(reclaim);
might_sleep();
@@ -863,11 +863,11 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
* Validate there are no objects in use and free all the
* spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
*/
- ASSERT3U(skc->skc_slab_alloc, ==, 0);
- ASSERT3U(skc->skc_obj_alloc, ==, 0);
- ASSERT3U(skc->skc_slab_total, ==, 0);
- ASSERT3U(skc->skc_obj_total, ==, 0);
- ASSERT3U(skc->skc_obj_emergency, ==, 0);
+ ASSERT0(skc->skc_slab_alloc);
+ ASSERT0(skc->skc_obj_alloc);
+ ASSERT0(skc->skc_slab_total);
+ ASSERT0(skc->skc_obj_total);
+ ASSERT0(skc->skc_obj_emergency);
ASSERT(list_empty(&skc->skc_complete_list));
ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0);
@@ -986,7 +986,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
ASSERT0(flags & ~KM_PUBLIC_MASK);
ASSERT(skc->skc_magic == SKC_MAGIC);
- ASSERT((skc->skc_flags & KMC_SLAB) == 0);
+ ASSERT0((skc->skc_flags & KMC_SLAB));
*obj = NULL;
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c b/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c
index 48f70b00c96b..02c5b42bc4a0 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c
@@ -541,7 +541,7 @@ __kstat_create(const char *ks_module, int ks_instance, const char *ks_name,
kstat_t *ksp;
ASSERT(ks_module);
- ASSERT(ks_instance == 0);
+ ASSERT0(ks_instance);
ASSERT(ks_name);
if ((ks_type == KSTAT_TYPE_INTR) || (ks_type == KSTAT_TYPE_IO))
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c b/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c
index f42f455222de..8f5c73b13df5 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c
@@ -80,7 +80,7 @@ __thread_create(caddr_t stk, size_t stksize, thread_func_t func,
/* Option pp is simply ignored */
/* Variable stack size unsupported */
- ASSERT(stk == NULL);
+ ASSERT0P(stk);
tp = kmem_alloc(sizeof (thread_priv_t), KM_PUSHPAGE);
if (tp == NULL)
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-tsd.c b/sys/contrib/openzfs/module/os/linux/spl/spl-tsd.c
index 34a61bef7d4f..2e8cedf0dc87 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-tsd.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-tsd.c
@@ -161,7 +161,7 @@ tsd_hash_add(tsd_hash_table_t *table, uint_t key, pid_t pid, void *value)
ulong_t hash;
int rc = 0;
- ASSERT3P(tsd_hash_search(table, key, pid), ==, NULL);
+ ASSERT0P(tsd_hash_search(table, key, pid));
/* New entry allocate structure, set value, and add to hash */
entry = kmem_alloc(sizeof (tsd_hash_entry_t), KM_PUSHPAGE);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c b/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
index 248c9b7a6d3b..8a8316f63c48 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/abd_os.c
@@ -863,9 +863,9 @@ abd_iter_advance(struct abd_iter *aiter, size_t amount)
* Ensure that last chunk is not in use. abd_iterate_*() must clear
* this state (directly or abd_iter_unmap()) before advancing.
*/
- ASSERT3P(aiter->iter_mapaddr, ==, NULL);
+ ASSERT0P(aiter->iter_mapaddr);
ASSERT0(aiter->iter_mapsize);
- ASSERT3P(aiter->iter_page, ==, NULL);
+ ASSERT0P(aiter->iter_page);
ASSERT0(aiter->iter_page_doff);
ASSERT0(aiter->iter_page_dsize);
@@ -897,7 +897,7 @@ abd_iter_map(struct abd_iter *aiter)
void *paddr;
size_t offset = 0;
- ASSERT3P(aiter->iter_mapaddr, ==, NULL);
+ ASSERT0P(aiter->iter_mapaddr);
ASSERT0(aiter->iter_mapsize);
/* There's nothing left to iterate over, so do nothing */
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c b/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
index 154ca22d9513..830fad7fe793 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
@@ -552,7 +552,7 @@ vdev_bio_associate_blkg(struct bio *bio)
#endif
ASSERT3P(q, !=, NULL);
- ASSERT3P(bio->bi_blkg, ==, NULL);
+ ASSERT0P(bio->bi_blkg);
if (q->root_blkg && vdev_blkg_tryget(q->root_blkg))
bio->bi_blkg = q->root_blkg;
@@ -574,7 +574,7 @@ vdev_bio_set_dev(struct bio *bio, struct block_device *bdev)
bio->bi_bdev = bdev;
ASSERT3P(q, !=, NULL);
- ASSERT3P(bio->bi_blkg, ==, NULL);
+ ASSERT0P(bio->bi_blkg);
if (q->root_blkg && vdev_blkg_tryget(q->root_blkg))
bio->bi_blkg = q->root_blkg;
@@ -806,7 +806,7 @@ vbio_completion(struct bio *bio)
* here; instead we stash vbio on the zio and take care of it in the
* done callback.
*/
- ASSERT3P(zio->io_bio, ==, NULL);
+ ASSERT0P(zio->io_bio);
zio->io_bio = vbio;
zio_delay_interrupt(zio);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
index 1b169122f25b..daa4b5776837 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
@@ -1900,7 +1900,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
if (!(flag & IS_ROOT_NODE) &&
(dzp->z_pflags & ZFS_INHERIT_ACE) &&
!(dzp->z_pflags & ZFS_XATTR)) {
- VERIFY(0 == zfs_acl_node_read(dzp, B_TRUE,
+ VERIFY0(zfs_acl_node_read(dzp, B_TRUE,
&paclp, B_FALSE));
acl_ids->z_aclp = zfs_acl_inherit(zfsvfs,
vap->va_mode, paclp, acl_ids->z_mode, &need_chmod);
@@ -2204,8 +2204,8 @@ top:
}
error = zfs_aclset_common(zp, aclp, cr, tx);
- ASSERT(error == 0);
- ASSERT(zp->z_acl_cached == NULL);
+ ASSERT0(error);
+ ASSERT0P(zp->z_acl_cached);
zp->z_acl_cached = aclp;
if (fuid_dirtied)
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
index 6552a933ce0a..fb4de50480a3 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c
@@ -494,9 +494,9 @@ zfsctl_inode_alloc(zfsvfs_t *zfsvfs, uint64_t id,
if (!creation)
now = current_time(ip);
zp = ITOZ(ip);
- ASSERT3P(zp->z_dirlocks, ==, NULL);
- ASSERT3P(zp->z_acl_cached, ==, NULL);
- ASSERT3P(zp->z_xattr_cached, ==, NULL);
+ ASSERT0P(zp->z_dirlocks);
+ ASSERT0P(zp->z_acl_cached);
+ ASSERT0P(zp->z_xattr_cached);
zp->z_id = id;
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
@@ -590,7 +590,7 @@ zfsctl_inode_lookup(zfsvfs_t *zfsvfs, uint64_t id,
int
zfsctl_create(zfsvfs_t *zfsvfs)
{
- ASSERT(zfsvfs->z_ctldir == NULL);
+ ASSERT0P(zfsvfs->z_ctldir);
zfsvfs->z_ctldir = zfsctl_inode_alloc(zfsvfs, ZFSCTL_INO_ROOT,
&zpl_fops_root, &zpl_ops_root, 0);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_dir.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_dir.c
index 2f935bb3fc8c..e8de536606e2 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_dir.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_dir.c
@@ -463,7 +463,7 @@ zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx)
zfsvfs_t *zfsvfs = ZTOZSB(zp);
ASSERT(zp->z_unlinked);
- ASSERT(ZTOI(zp)->i_nlink == 0);
+ ASSERT0(ZTOI(zp)->i_nlink);
VERIFY3U(0, ==,
zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
@@ -662,8 +662,8 @@ zfs_rmnode(znode_t *zp)
uint64_t links;
int error;
- ASSERT(ZTOI(zp)->i_nlink == 0);
- ASSERT(atomic_read(&ZTOI(zp)->i_count) == 0);
+ ASSERT0(ZTOI(zp)->i_nlink);
+ ASSERT0(atomic_read(&ZTOI(zp)->i_count));
/*
* If this is an attribute directory, purge its contents.
@@ -710,7 +710,7 @@ zfs_rmnode(znode_t *zp)
&xattr_obj, sizeof (xattr_obj));
if (error == 0 && xattr_obj) {
error = zfs_zget(zfsvfs, xattr_obj, &xzp);
- ASSERT(error == 0);
+ ASSERT0(error);
}
acl_obj = zfs_external_acl(zp);
@@ -744,12 +744,12 @@ zfs_rmnode(znode_t *zp)
}
if (xzp) {
- ASSERT(error == 0);
+ ASSERT0(error);
mutex_enter(&xzp->z_lock);
xzp->z_unlinked = B_TRUE; /* mark xzp for deletion */
clear_nlink(ZTOI(xzp)); /* no more links to it */
links = 0;
- VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
+ VERIFY0(sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
&links, sizeof (links), tx));
mutex_exit(&xzp->z_lock);
zfs_unlinked_add(xzp, tx);
@@ -872,7 +872,7 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
ctime);
}
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
mutex_exit(&zp->z_lock);
@@ -894,7 +894,7 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
&dzp->z_pflags, sizeof (dzp->z_pflags));
zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
mutex_exit(&dzp->z_lock);
return (0);
@@ -986,7 +986,7 @@ zfs_drop_nlink_locked(znode_t *zp, dmu_tx_t *tx, boolean_t *unlinkedp)
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
NULL, &links, sizeof (links));
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
- ASSERT3U(error, ==, 0);
+ ASSERT0(error);
if (unlinkedp != NULL)
*unlinkedp = unlinked;
@@ -1058,7 +1058,7 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
/* The only error is !zfs_dirempty() and we checked earlier. */
error = zfs_drop_nlink_locked(zp, tx, &unlinked);
- ASSERT3U(error, ==, 0);
+ ASSERT0(error);
mutex_exit(&zp->z_lock);
} else {
error = zfs_dropname(dl, zp, dzp, tx, flag);
@@ -1083,7 +1083,7 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
NULL, &dzp->z_pflags, sizeof (dzp->z_pflags));
zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
mutex_exit(&dzp->z_lock);
if (unlinkedp != NULL)
@@ -1167,7 +1167,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xzpp, cred_t *cr)
ASSERT(error == 0 && parent == zp->z_id);
#endif
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id,
+ VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id,
sizeof (xzp->z_id), tx));
if (!zp->z_unlinked)
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_sysfs.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_sysfs.c
index 1c187d7b9cab..895d80b2d79e 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_sysfs.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_sysfs.c
@@ -223,7 +223,7 @@ zfs_kobj_add(zfs_mod_kobj_t *zkobj, struct kobject *parent, const char *name)
{
/* zko_default_group.attrs must be NULL terminated */
ASSERT(zkobj->zko_default_group.attrs != NULL);
- ASSERT(zkobj->zko_default_group.attrs[zkobj->zko_attr_count] == NULL);
+ ASSERT0P(zkobj->zko_default_group.attrs[zkobj->zko_attr_count]);
kobject_init(&zkobj->zko_kobj, &zkobj->zko_kobj_type);
return (kobject_add(&zkobj->zko_kobj, parent, name));
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
index 396faef8f646..cd606e667bff 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
@@ -279,19 +279,14 @@ zfs_sync(struct super_block *sb, int wait, cred_t *cr)
return (err);
/*
- * If the pool is suspended, just return an error. This is to help
- * with shutting down with pools suspended, as we don't want to block
- * in that case.
+ * Sync any pending writes, but do not block if the pool is suspended.
+ * This is to help with shutting down with pools suspended, as we don't
+ * want to block in that case.
*/
- if (spa_suspended(zfsvfs->z_os->os_spa)) {
- zfs_exit(zfsvfs, FTAG);
- return (SET_ERROR(EIO));
- }
-
- zil_commit(zfsvfs->z_log, 0);
+ err = zil_commit_flags(zfsvfs->z_log, 0, ZIL_COMMIT_NOW);
zfs_exit(zfsvfs, FTAG);
- return (0);
+ return (err);
}
static void
@@ -883,7 +878,7 @@ zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
* operations out since we closed the ZIL.
*/
if (mounting) {
- ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
+ ASSERT0P(zfsvfs->z_kstat.dk_kstats);
error = dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
if (error)
return (error);
@@ -1676,7 +1671,7 @@ zfs_umount(struct super_block *sb)
if (zfsvfs->z_arc_prune != NULL)
arc_remove_prune_callback(zfsvfs->z_arc_prune);
- VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0);
+ VERIFY0(zfsvfs_teardown(zfsvfs, B_TRUE));
os = zfsvfs->z_os;
/*
@@ -1802,8 +1797,8 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp)
ASSERT(*ipp != NULL);
if (object == ZFSCTL_INO_SNAPDIR) {
- VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp,
- 0, kcred, NULL, NULL) == 0);
+ VERIFY0(zfsctl_root_lookup(*ipp, "snapshot", ipp,
+ 0, kcred, NULL, NULL));
} else {
/*
* Must have an existing ref, so igrab()
@@ -1905,7 +1900,7 @@ zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
goto bail;
ds->ds_dir->dd_activity_cancelled = B_FALSE;
- VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);
+ VERIFY0(zfsvfs_setup(zfsvfs, B_FALSE));
zfs_set_fuid_feature(zfsvfs);
zfsvfs->z_rollback_time = jiffies;
@@ -2078,7 +2073,7 @@ zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT0(error);
- VERIFY(0 == sa_set_sa_object(os, sa_obj));
+ VERIFY0(sa_set_sa_object(os, sa_obj));
sa_register_update_callback(os, zfs_sa_upgrade);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c
index 6a2fc5ad7935..6106726651a3 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c
@@ -841,8 +841,8 @@ out:
*zpp = zp;
}
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1203,8 +1203,8 @@ out:
zfs_zrele_async(xzp);
}
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1392,14 +1392,15 @@ out:
zfs_dirent_unlock(dl);
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
-
if (error != 0) {
zrele(zp);
} else {
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
+
+ if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
+
}
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1528,8 +1529,8 @@ out:
zfs_znode_update_vfs(zp);
zrele(zp);
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -2483,10 +2484,10 @@ top:
new_mode = zp->z_mode;
}
err = zfs_acl_chown_setattr(zp);
- ASSERT(err == 0);
+ ASSERT0(err);
if (attrzp) {
err = zfs_acl_chown_setattr(attrzp);
- ASSERT(err == 0);
+ ASSERT0(err);
}
}
@@ -2600,7 +2601,7 @@ out:
if (err == 0 && xattr_count > 0) {
err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
xattr_count, tx);
- ASSERT(err2 == 0);
+ ASSERT0(err2);
}
if (aclp)
@@ -2630,8 +2631,8 @@ out:
}
out2:
- if (os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (err == 0 && os->os_sync == ZFS_SYNC_ALWAYS)
+ err = zil_commit(zilog, 0);
out3:
kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * bulks);
@@ -3157,7 +3158,7 @@ top:
* zfs_link_create() to add back the same entry, but with a new
* dnode (szp), should not fail.
*/
- ASSERT3P(tzp, ==, NULL);
+ ASSERT0P(tzp);
goto commit_link_tzp;
}
@@ -3235,8 +3236,8 @@ out:
zfs_dirent_unlock(sdl);
zfs_dirent_unlock(tdl);
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -3436,7 +3437,7 @@ top:
*zpp = zp;
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ error = zil_commit(zilog, 0);
} else {
zrele(zp);
}
@@ -3654,8 +3655,8 @@ top:
* operation are sync safe.
*/
if (is_tmpfile) {
- VERIFY(zap_remove_int(zfsvfs->z_os,
- zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0);
+ VERIFY0(zap_remove_int(zfsvfs->z_os,
+ zfsvfs->z_unlinkedobj, szp->z_id, tx));
} else {
if (flags & FIGNORECASE)
txtype |= TX_CI;
@@ -3670,18 +3671,20 @@ top:
zfs_dirent_unlock(dl);
- if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
-
- if (is_tmpfile && zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
- txg_wait_flag_t wait_flags =
- spa_get_failmode(dmu_objset_spa(zfsvfs->z_os)) ==
- ZIO_FAILURE_MODE_CONTINUE ? TXG_WAIT_SUSPEND : 0;
- error = txg_wait_synced_flags(dmu_objset_pool(zfsvfs->z_os),
- txg, wait_flags);
- if (error != 0) {
- ASSERT3U(error, ==, ESHUTDOWN);
- error = SET_ERROR(EIO);
+ if (error == 0) {
+ if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
+
+ if (is_tmpfile && zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
+ txg_wait_flag_t wait_flags =
+ spa_get_failmode(dmu_objset_spa(zfsvfs->z_os)) ==
+ ZIO_FAILURE_MODE_CONTINUE ? TXG_WAIT_SUSPEND : 0;
+ error = txg_wait_synced_flags(
+ dmu_objset_pool(zfsvfs->z_os), txg, wait_flags);
+ if (error != 0) {
+ ASSERT3U(error, ==, ESHUTDOWN);
+ error = SET_ERROR(EIO);
+ }
}
}
@@ -3691,16 +3694,42 @@ top:
return (error);
}
-static void
-zfs_putpage_commit_cb(void *arg)
+/* Finish page writeback. */
+static inline void
+zfs_page_writeback_done(struct page *pp, int err)
{
- struct page *pp = arg;
+ if (err != 0) {
+ /*
+ * Writeback failed. Re-dirty the page. It was undirtied before
+ * the IO was issued (in zfs_putpage() or write_cache_pages()).
+ * The kernel only considers writeback for dirty pages; if we
+ * don't do this, it is eligible for eviction without being
+ * written out, which we definitely don't want.
+ */
+#ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
+ filemap_dirty_folio(page_mapping(pp), page_folio(pp));
+#else
+ __set_page_dirty_nobuffers(pp);
+#endif
+ }
ClearPageError(pp);
end_page_writeback(pp);
}
/*
+ * ZIL callback for page writeback. Passes to zfs_log_write() in zfs_putpage()
+ * for syncing writes. Called when the ZIL itx has been written to the log or
+ * the whole txg syncs, or if the ZIL crashes or the pool suspends. Any failure
+ * is passed as `err`.
+ */
+static void
+zfs_putpage_commit_cb(void *arg, int err)
+{
+ zfs_page_writeback_done(arg, err);
+}
+
+/*
* Push a page out to disk, once the page is on stable storage the
* registered commit callback will be run as notification of completion.
*
@@ -3853,16 +3882,15 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc,
err = dmu_tx_assign(tx, DMU_TX_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
-#ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
- filemap_dirty_folio(page_mapping(pp), page_folio(pp));
-#else
- __set_page_dirty_nobuffers(pp);
-#endif
- ClearPageError(pp);
- end_page_writeback(pp);
+ zfs_page_writeback_done(pp, err);
zfs_rangelock_exit(lr);
zfs_exit(zfsvfs, FTAG);
- return (err);
+
+ /*
+ * Don't return error for an async writeback; we've re-dirtied
+ * the page so it will be tried again some other time.
+ */
+ return (for_sync ? err : 0);
}
va = kmap(pp);
@@ -3916,7 +3944,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc,
* ALL, zfs_putpage should do it.
*
* Summary:
- * for_sync: 0=unlock immediately; 1 unlock once on disk
+ * for_sync: 0=unlock immediately; 1=unlock once on disk
* sync_mode: NONE=caller will commit; ALL=we will commit
*/
boolean_t need_commit = (wbc->sync_mode != WB_SYNC_NONE);
@@ -3931,16 +3959,24 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc,
B_FALSE, for_sync ? zfs_putpage_commit_cb : NULL, pp);
if (!for_sync) {
- ClearPageError(pp);
- end_page_writeback(pp);
+ /*
+ * Async writeback is logged and written to the DMU, so page
+ * can now be unlocked.
+ */
+ zfs_page_writeback_done(pp, 0);
}
dmu_tx_commit(tx);
zfs_rangelock_exit(lr);
- if (need_commit)
- zil_commit(zfsvfs->z_log, zp->z_id);
+ if (need_commit) {
+ err = zil_commit_flags(zfsvfs->z_log, zp->z_id, ZIL_COMMIT_NOW);
+ if (err != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (err);
+ }
+ }
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, pglen);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode_os.c
index 7683eeb3cf9f..bcaabeb32b8a 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode_os.c
@@ -144,9 +144,9 @@ zfs_znode_cache_destructor(void *buf, void *arg)
rw_destroy(&zp->z_xattr_lock);
zfs_rangelock_fini(&zp->z_rangelock);
- ASSERT3P(zp->z_dirlocks, ==, NULL);
- ASSERT3P(zp->z_acl_cached, ==, NULL);
- ASSERT3P(zp->z_xattr_cached, ==, NULL);
+ ASSERT0P(zp->z_dirlocks);
+ ASSERT0P(zp->z_acl_cached);
+ ASSERT0P(zp->z_xattr_cached);
}
static int
@@ -178,13 +178,13 @@ zfs_znode_init(void)
* backed by kmalloc() when on the Linux slab in order that any
* wait_on_bit() operations on the related inode operate properly.
*/
- ASSERT(znode_cache == NULL);
+ ASSERT0P(znode_cache);
znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor,
zfs_znode_cache_destructor, NULL, NULL, NULL,
KMC_SLAB | KMC_RECLAIMABLE);
- ASSERT(znode_hold_cache == NULL);
+ ASSERT0P(znode_hold_cache);
znode_hold_cache = kmem_cache_create("zfs_znode_hold_cache",
sizeof (znode_hold_t), 0, zfs_znode_hold_cache_constructor,
zfs_znode_hold_cache_destructor, NULL, NULL, NULL, 0);
@@ -327,10 +327,10 @@ zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
mutex_enter(&zp->z_lock);
- ASSERT(zp->z_sa_hdl == NULL);
- ASSERT(zp->z_acl_cached == NULL);
+ ASSERT0P(zp->z_sa_hdl);
+ ASSERT0P(zp->z_acl_cached);
if (sa_hdl == NULL) {
- VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp,
+ VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, zp,
SA_HDL_SHARED, &zp->z_sa_hdl));
} else {
zp->z_sa_hdl = sa_hdl;
@@ -530,9 +530,9 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
return (NULL);
zp = ITOZ(ip);
- ASSERT(zp->z_dirlocks == NULL);
- ASSERT3P(zp->z_acl_cached, ==, NULL);
- ASSERT3P(zp->z_xattr_cached, ==, NULL);
+ ASSERT0P(zp->z_dirlocks);
+ ASSERT0P(zp->z_acl_cached);
+ ASSERT0P(zp->z_xattr_cached);
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
zp->z_is_ctldir = B_FALSE;
@@ -611,7 +611,7 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
* processing so do not hash unlinked znodes.
*/
if (links > 0)
- VERIFY3S(insert_inode_locked(ip), ==, 0);
+ VERIFY0(insert_inode_locked(ip));
mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
@@ -811,7 +811,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
}
/* Now add in all of the "SA" attributes */
- VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
+ VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
&sa_hdl));
/*
@@ -901,7 +901,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
acl_ids->z_fuid, acl_ids->z_fgid);
}
- VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
+ VERIFY0(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx));
if (!(flag & IS_ROOT_NODE)) {
/*
@@ -1200,7 +1200,7 @@ zfs_rezget(znode_t *zp)
}
rw_exit(&zp->z_xattr_lock);
- ASSERT(zp->z_sa_hdl == NULL);
+ ASSERT0P(zp->z_sa_hdl);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
zfs_znode_hold_exit(zfsvfs, zh);
@@ -1314,9 +1314,9 @@ zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
zh = zfs_znode_hold_enter(zfsvfs, obj);
if (acl_obj) {
VERIFY(!zp->z_is_sa);
- VERIFY(0 == dmu_object_free(os, acl_obj, tx));
+ VERIFY0(dmu_object_free(os, acl_obj, tx));
}
- VERIFY(0 == dmu_object_free(os, obj, tx));
+ VERIFY0(dmu_object_free(os, obj, tx));
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
}
@@ -1536,7 +1536,7 @@ zfs_extend(znode_t *zp, uint64_t end)
zp->z_size = end;
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
+ VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
&zp->z_size, sizeof (zp->z_size), tx));
zfs_rangelock_exit(lr);
@@ -1726,7 +1726,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
}
- VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
+ VERIFY0(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
dmu_tx_commit(tx);
zfs_rangelock_exit(lr);
@@ -1793,7 +1793,7 @@ log:
NULL, &zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
@@ -1840,7 +1840,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
moid = MASTER_NODE_OBJ;
error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
DMU_OT_NONE, 0, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
/*
* Set starting attributes.
@@ -1853,7 +1853,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
const char *name;
ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64);
- VERIFY(nvpair_value_uint64(elem, &val) == 0);
+ VERIFY0(nvpair_value_uint64(elem, &val));
name = nvpair_name(elem);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
if (val < version)
@@ -1861,7 +1861,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
} else {
error = zap_update(os, moid, name, 8, 1, &val, tx);
}
- ASSERT(error == 0);
+ ASSERT0(error);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
norm = val;
else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
@@ -1869,7 +1869,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
}
ASSERT(version != 0);
error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
/*
* Create zap object used for SA attribute registration
@@ -1879,7 +1879,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
} else {
sa_obj = 0;
}
@@ -1889,7 +1889,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
/*
* Create root znode. Create minimal znode/inode/zfsvfs/sb
@@ -1922,7 +1922,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
- ASSERT(error == 0);
+ ASSERT0(error);
/*
* Fold case on file systems that are always or sometimes case
@@ -1946,12 +1946,12 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
}
- VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
+ VERIFY0(zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
cr, NULL, &acl_ids, zfs_init_idmap));
zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
ASSERT3P(zp, ==, rootzp);
error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
zfs_acl_ids_free(&acl_ids);
atomic_set(&ZTOI(rootzp)->i_count, 0);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
index ef7bd7352084..d07317b0d910 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
@@ -22,6 +22,7 @@
/*
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
+ * Copyright (c) 2025, Klara, Inc.
*/
@@ -106,6 +107,10 @@ zpl_iterate(struct file *filp, struct dir_context *ctx)
return (error);
}
+static inline int
+zpl_write_cache_pages(struct address_space *mapping,
+ struct writeback_control *wbc, void *data);
+
static int
zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
{
@@ -115,9 +120,38 @@ zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
int error;
fstrans_cookie_t cookie;
- error = filemap_write_and_wait_range(inode->i_mapping, start, end);
- if (error)
- return (error);
+ /*
+ * Force dirty pages in the range out to the DMU and the log, ready
+ * for zil_commit() to write down.
+ *
+ * We call write_cache_pages() directly to ensure that zpl_putpage() is
+ * called with the flags we need. We need WB_SYNC_NONE to avoid a call
+ * to zil_commit() (since we're doing this as a kind of pre-sync); but
+ * we do need for_sync so that the pages remain in writeback until
+ * they're on disk, and so that we get an error if the DMU write fails.
+ */
+ if (filemap_range_has_page(inode->i_mapping, start, end)) {
+ int for_sync = 1;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .nr_to_write = LONG_MAX,
+ .range_start = start,
+ .range_end = end,
+ };
+ error =
+ zpl_write_cache_pages(inode->i_mapping, &wbc, &for_sync);
+ if (error != 0) {
+ /*
+ * Unclear what state things are in. zfs_putpage() will
+ * ensure the pages remain dirty if they haven't been
+ * written down to the DMU, but because there may be
+ * nothing logged, we can't assume that zfs_sync() ->
+ * zil_commit() will give us a useful error. It's
+ * safest if we just error out here.
+ */
+ return (error);
+ }
+ }
crhold(cr);
cookie = spl_fstrans_mark();
@@ -494,11 +528,30 @@ zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
if (sync_mode != wbc->sync_mode) {
if ((result = zpl_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (result);
- if (zfsvfs->z_log != NULL)
- zil_commit(zfsvfs->z_log, zp->z_id);
+
+ if (zfsvfs->z_log != NULL) {
+ /*
+ * We don't want to block here if the pool suspends,
+ * because this is not a syncing op by itself, but
+ * might be part of one that the caller will
+ * coordinate.
+ */
+ result = -zil_commit_flags(zfsvfs->z_log, zp->z_id,
+ ZIL_COMMIT_NOW);
+ }
+
zpl_exit(zfsvfs, FTAG);
/*
+ * If zil_commit_flags() failed, it's unclear what state things
+ * are currently in. putpage() has written back out what it can
+ * to the DMU, but it may not be on disk. We have little choice
+ * but to escape.
+ */
+ if (result != 0)
+ return (result);
+
+ /*
* We need to call write_cache_pages() again (we can't just
* return after the commit) because the previous call in
* non-SYNC mode does not guarantee that we got all the dirty
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c
index f9f6406f8b47..f97662d052c7 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c
@@ -247,7 +247,7 @@ zpl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
* and fifos, but we want to know if this behavior ever changes.
*/
if (S_ISSOCK(mode) || S_ISFIFO(mode))
- ASSERT(rdev == 0);
+ ASSERT0(rdev);
crhold(cr);
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c
index 94dcdd0b887d..53819628627d 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c
@@ -49,7 +49,7 @@ zpl_inode_alloc(struct super_block *sb)
static void
zpl_inode_free(struct inode *ip)
{
- ASSERT(atomic_read(&ip->i_count) == 0);
+ ASSERT0(atomic_read(&ip->i_count));
zfs_inode_free(ip);
}
#endif
@@ -57,7 +57,7 @@ zpl_inode_free(struct inode *ip)
static void
zpl_inode_destroy(struct inode *ip)
{
- ASSERT(atomic_read(&ip->i_count) == 0);
+ ASSERT0(atomic_read(&ip->i_count));
zfs_inode_destroy(ip);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c
index a098197e7448..d93282db815a 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c
@@ -1494,7 +1494,7 @@ zpl_posix_acl_free(void *arg)
acl_rel_head = NULL;
if (cmpxchg(&acl_rel_tail, &a->next,
&acl_rel_head) == &a->next) {
- ASSERT3P(a->next, ==, NULL);
+ ASSERT0P(a->next);
a->next = freelist;
freelist = a;
break;
@@ -1544,7 +1544,7 @@ zpl_posix_acl_release_impl(struct posix_acl *acl)
a->time = ddi_get_lbolt();
/* atomically points tail to us and get the previous tail */
prev = xchg(&acl_rel_tail, &a->next);
- ASSERT3P(*prev, ==, NULL);
+ ASSERT0P(*prev);
*prev = a;
/* if it was empty before, schedule the free task */
if (prev == &acl_rel_head)
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
index a7431cc4da9d..a73acdad34ae 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
@@ -84,8 +84,9 @@ static unsigned int zvol_blk_mq_blocks_per_thread = 8;
static inline void
zvol_end_io(struct bio *bio, struct request *rq, int error)
{
+ ASSERT3U(error, >=, 0);
if (bio) {
- bio->bi_status = errno_to_bi_status(-error);
+ bio->bi_status = errno_to_bi_status(error);
bio_endio(bio);
} else {
blk_mq_end_request(rq, errno_to_bi_status(error));
@@ -208,8 +209,14 @@ zvol_write(zv_request_t *zvr)
disk = zv->zv_zso->zvo_disk;
/* bio marked as FLUSH need to flush before write */
- if (io_is_flush(bio, rq))
- zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ if (io_is_flush(bio, rq)) {
+ error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ if (error != 0) {
+ rw_exit(&zv->zv_suspend_lock);
+ zvol_end_io(bio, rq, -error);
+ return;
+ }
+ }
/* Some requests are just for flush and nothing else. */
if (io_size(bio, rq) == 0) {
@@ -273,8 +280,8 @@ zvol_write(zv_request_t *zvr)
dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
task_io_account_write(nwritten);
- if (sync)
- zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ if (error == 0 && sync)
+ error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
@@ -282,7 +289,7 @@ zvol_write(zv_request_t *zvr)
blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
}
- zvol_end_io(bio, rq, -error);
+ zvol_end_io(bio, rq, error);
}
static void
@@ -361,7 +368,7 @@ zvol_discard(zv_request_t *zvr)
zfs_rangelock_exit(lr);
if (error == 0 && sync)
- zil_commit(zv->zv_zilog, ZVOL_OBJ);
+ error = zil_commit(zv->zv_zilog, ZVOL_OBJ);
unlock:
rw_exit(&zv->zv_suspend_lock);
@@ -371,7 +378,7 @@ unlock:
start_time);
}
- zvol_end_io(bio, rq, -error);
+ zvol_end_io(bio, rq, error);
}
static void
@@ -449,7 +456,7 @@ zvol_read(zv_request_t *zvr)
blk_generic_end_io_acct(q, disk, READ, bio, start_time);
}
- zvol_end_io(bio, rq, -error);
+ zvol_end_io(bio, rq, error);
}
static void
@@ -480,7 +487,7 @@ zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
int rw = io_data_dir(bio, rq);
if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
- zvol_end_io(bio, rq, -SET_ERROR(ENXIO));
+ zvol_end_io(bio, rq, SET_ERROR(ENXIO));
goto out;
}
@@ -499,7 +506,7 @@ zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
(long long unsigned)offset,
(long unsigned)size);
- zvol_end_io(bio, rq, -SET_ERROR(EIO));
+ zvol_end_io(bio, rq, SET_ERROR(EIO));
goto out;
}
@@ -512,8 +519,8 @@ zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
#ifdef HAVE_BLK_MQ_RQ_HCTX
blk_mq_hw_queue = rq->mq_hctx->queue_num;
#else
- blk_mq_hw_queue =
- rq->q->queue_hw_ctx[rq->q->mq_map[rq->cpu]]->queue_num;
+ blk_mq_hw_queue = rq->q->queue_hw_ctx[
+ rq->q->mq_map[raw_smp_processor_id()]]->queue_num;
#endif
taskq_hash = cityhash3((uintptr_t)zv, offset >> ZVOL_TASKQ_OFFSET_SHIFT,
blk_mq_hw_queue);
@@ -521,7 +528,7 @@ zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
if (rw == WRITE) {
if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
- zvol_end_io(bio, rq, -SET_ERROR(EROFS));
+ zvol_end_io(bio, rq, SET_ERROR(EROFS));
goto out;
}
@@ -886,16 +893,18 @@ zvol_ioctl(struct block_device *bdev, fmode_t mode,
case BLKZNAME:
mutex_enter(&zv->zv_state_lock);
- error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
+ error = -copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
mutex_exit(&zv->zv_state_lock);
+ if (error)
+ error = SET_ERROR(error);
break;
default:
- error = -ENOTTY;
+ error = SET_ERROR(ENOTTY);
break;
}
- return (SET_ERROR(error));
+ return (-error);
}
#ifdef CONFIG_COMPAT
@@ -1426,7 +1435,7 @@ zvol_os_free(zvol_state_t *zv)
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
ASSERT0(zv->zv_open_count);
- ASSERT3P(zv->zv_zso->zvo_disk->private_data, ==, NULL);
+ ASSERT0P(zv->zv_zso->zvo_disk->private_data);
rw_destroy(&zv->zv_suspend_lock);
zfs_rangelock_fini(&zv->zv_rangelock);
@@ -1474,7 +1483,9 @@ __zvol_os_add_disk(struct gendisk *disk)
{
int error = 0;
#ifdef HAVE_ADD_DISK_RET
- error = add_disk(disk);
+ error = -add_disk(disk);
+ if (error)
+ error = SET_ERROR(error);
#else
add_disk(disk);
#endif
@@ -1649,11 +1660,11 @@ zvol_os_create_minor(const char *name)
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, zv->zv_zso->zvo_queue);
#endif
- ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
+ ASSERT0P(zv->zv_kstat.dk_kstats);
error = dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
if (error)
goto out_dmu_objset_disown;
- ASSERT3P(zv->zv_zilog, ==, NULL);
+ ASSERT0P(zv->zv_zilog);
zv->zv_zilog = zil_open(os, zvol_get_data, &zv->zv_kstat.dk_zil_sums);
if (spa_writeable(dmu_objset_spa(os))) {
if (zil_replay_disable)
@@ -1759,10 +1770,10 @@ zvol_init(void)
return (error);
}
- error = register_blkdev(zvol_major, ZVOL_DRIVER);
+ error = -register_blkdev(zvol_major, ZVOL_DRIVER);
if (error) {
printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
- return (error);
+ return (SET_ERROR(error));
}
if (zvol_blk_mq_queue_depth == 0) {
diff --git a/sys/contrib/openzfs/module/zfs/abd.c b/sys/contrib/openzfs/module/zfs/abd.c
index 826928e67350..bf9b13c30509 100644
--- a/sys/contrib/openzfs/module/zfs/abd.c
+++ b/sys/contrib/openzfs/module/zfs/abd.c
@@ -563,7 +563,7 @@ abd_get_offset_impl(abd_t *abd, abd_t *sabd, size_t off, size_t size)
left -= csize;
off = 0;
}
- ASSERT3U(left, ==, 0);
+ ASSERT0(left);
} else {
abd = abd_get_offset_scatter(abd, sabd, off, size);
}
diff --git a/sys/contrib/openzfs/module/zfs/arc.c b/sys/contrib/openzfs/module/zfs/arc.c
index 3483be64ec57..df41e3b49204 100644
--- a/sys/contrib/openzfs/module/zfs/arc.c
+++ b/sys/contrib/openzfs/module/zfs/arc.c
@@ -2239,8 +2239,8 @@ arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
- ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_buf);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
@@ -2278,8 +2278,8 @@ arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
- ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_buf);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
@@ -2319,7 +2319,7 @@ add_reference(arc_buf_hdr_t *hdr, const void *tag)
if (!HDR_EMPTY(hdr) && !MUTEX_HELD(HDR_LOCK(hdr))) {
ASSERT(state == arc_anon);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
- ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_buf);
}
if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
@@ -2503,7 +2503,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr)
(void) zfs_refcount_add_many(
&new_state->arcs_size[type],
HDR_GET_LSIZE(hdr), hdr);
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
@@ -2547,7 +2547,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr)
if (update_old && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(old_state)) {
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
/*
@@ -2758,7 +2758,7 @@ arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb,
VERIFY(hdr->b_type == ARC_BUFC_DATA ||
hdr->b_type == ARC_BUFC_METADATA);
ASSERT3P(ret, !=, NULL);
- ASSERT3P(*ret, ==, NULL);
+ ASSERT0P(*ret);
IMPLY(encrypted, compressed);
buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
@@ -2982,7 +2982,7 @@ static void
arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_can_share(hdr, buf));
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
@@ -3201,14 +3201,14 @@ arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, int alloc_flags)
if (alloc_rdata) {
size = HDR_GET_PSIZE(hdr);
- ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL);
+ ASSERT0P(hdr->b_crypt_hdr.b_rabd);
hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL);
ARCSTAT_INCR(arcstat_raw_size, size);
} else {
size = arc_hdr_size(hdr);
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
@@ -3290,7 +3290,7 @@ arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
ASSERT(HDR_EMPTY(hdr));
#ifdef ZFS_DEBUG
- ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_freeze_cksum);
#endif
HDR_SET_PSIZE(hdr, psize);
HDR_SET_LSIZE(hdr, lsize);
@@ -3351,12 +3351,12 @@ arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
nhdr->b_l1hdr.b_state = arc_l2c_only;
/* Verify previous threads set to NULL before freeing */
- ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(nhdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
- ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_buf);
#ifdef ZFS_DEBUG
- ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_freeze_cksum);
#endif
/*
@@ -3375,7 +3375,7 @@ arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
* might try to be accessed, even though it was removed.
*/
VERIFY(!HDR_L2_WRITING(hdr));
- VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ VERIFY0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR);
@@ -3698,12 +3698,12 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
arc_hdr_free_abd(hdr, B_TRUE);
}
- ASSERT3P(hdr->b_hash_next, ==, NULL);
+ ASSERT0P(hdr->b_hash_next);
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
- ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_acb);
#ifdef ZFS_DEBUG
- ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_freeze_cksum);
#endif
kmem_cache_free(hdr_full_cache, hdr);
} else {
@@ -3771,7 +3771,7 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, uint64_t *real_evicted)
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
- ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_buf);
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
*real_evicted = 0;
@@ -3796,7 +3796,7 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, uint64_t *real_evicted)
DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
if (HDR_HAS_L2HDR(hdr)) {
- ASSERT(hdr->b_l1hdr.b_pabd == NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* This buffer is cached on the 2nd Level ARC;
@@ -5554,7 +5554,7 @@ static void
arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp)
{
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
- ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0);
+ ASSERT0(HDR_GET_PSIZE(hdr));
ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF);
} else {
if (HDR_COMPRESSION_ENABLED(hdr)) {
@@ -6132,14 +6132,14 @@ top:
}
if (GHOST_STATE(hdr->b_l1hdr.b_state)) {
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT0(zfs_refcount_count(
&hdr->b_l1hdr.b_refcnt));
- ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_buf);
#ifdef ZFS_DEBUG
- ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_freeze_cksum);
#endif
} else if (HDR_IO_IN_PROGRESS(hdr)) {
/*
@@ -6233,7 +6233,7 @@ top:
acb->acb_nobuf = no_buf;
acb->acb_zb = *zb;
- ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_acb);
hdr->b_l1hdr.b_acb = acb;
if (HDR_HAS_L2HDR(hdr) &&
@@ -6717,7 +6717,7 @@ arc_release(arc_buf_t *buf, const void *tag)
nhdr = arc_hdr_alloc(spa, psize, lsize, protected,
compress, hdr->b_complevel, type);
- ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
+ ASSERT0P(nhdr->b_l1hdr.b_buf);
ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
VERIFY3U(nhdr->b_type, ==, type);
ASSERT(!HDR_SHARED_DATA(nhdr));
@@ -6804,7 +6804,7 @@ arc_write_ready(zio_t *zio)
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT(!arc_buf_is_shared(buf));
@@ -6948,7 +6948,7 @@ arc_write_done(zio_t *zio)
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
- ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_acb);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
@@ -6973,7 +6973,7 @@ arc_write_done(zio_t *zio)
arc_buf_hdr_t *exists;
kmutex_t *hash_lock;
- ASSERT3U(zio->io_error, ==, 0);
+ ASSERT0(zio->io_error);
arc_cksum_verify(buf);
@@ -6994,7 +6994,7 @@ arc_write_done(zio_t *zio)
arc_hdr_destroy(exists);
mutex_exit(hash_lock);
exists = buf_hash_insert(hdr, &hash_lock);
- ASSERT3P(exists, ==, NULL);
+ ASSERT0P(exists);
} else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
/* nopwrite */
ASSERT(zio->io_prop.zp_nopwrite);
@@ -7007,7 +7007,7 @@ arc_write_done(zio_t *zio)
ASSERT(ARC_BUF_LAST(hdr->b_l1hdr.b_buf));
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
ASSERT(BP_GET_DEDUP(zio->io_bp));
- ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
+ ASSERT0(BP_GET_LEVEL(zio->io_bp));
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
@@ -7044,7 +7044,7 @@ arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
ASSERT3P(done, !=, NULL);
ASSERT(!HDR_IO_ERROR(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
- ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_acb);
ASSERT3P(hdr->b_l1hdr.b_buf, !=, NULL);
if (uncached)
arc_hdr_set_flags(hdr, ARC_FLAG_UNCACHED);
@@ -7113,7 +7113,7 @@ arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
ASSERT(!arc_buf_is_shared(buf));
- ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
+ ASSERT0P(hdr->b_l1hdr.b_pabd);
zio = zio_write(pio, spa, txg, bp,
abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)),
diff --git a/sys/contrib/openzfs/module/zfs/bpobj.c b/sys/contrib/openzfs/module/zfs/bpobj.c
index 0a8a077edf63..ea9fbd036c6e 100644
--- a/sys/contrib/openzfs/module/zfs/bpobj.c
+++ b/sys/contrib/openzfs/module/zfs/bpobj.c
@@ -160,8 +160,8 @@ bpobj_open(bpobj_t *bpo, objset_t *os, uint64_t object)
memset(bpo, 0, sizeof (*bpo));
mutex_init(&bpo->bpo_lock, NULL, MUTEX_DEFAULT, NULL);
- ASSERT(bpo->bpo_dbuf == NULL);
- ASSERT(bpo->bpo_phys == NULL);
+ ASSERT0P(bpo->bpo_dbuf);
+ ASSERT0P(bpo->bpo_phys);
ASSERT(object != 0);
ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ);
ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_BPOBJ_HDR);
@@ -478,7 +478,7 @@ bpobj_iterate_impl(bpobj_t *initial_bpo, bpobj_itor_t func, void *arg,
* We have unprocessed subobjs. Process the next one.
*/
ASSERT(bpo->bpo_havecomp);
- ASSERT3P(bpobj_size, ==, NULL);
+ ASSERT0P(bpobj_size);
/* Add the last subobj to stack. */
int64_t i = bpi->bpi_unprocessed_subobjs - 1;
diff --git a/sys/contrib/openzfs/module/zfs/btree.c b/sys/contrib/openzfs/module/zfs/btree.c
index aa282f711bc3..725b96a3b2c7 100644
--- a/sys/contrib/openzfs/module/zfs/btree.c
+++ b/sys/contrib/openzfs/module/zfs/btree.c
@@ -1110,7 +1110,7 @@ zfs_btree_add_idx(zfs_btree_t *tree, const void *value,
if (where->bti_node == NULL) {
ASSERT3U(tree->bt_num_elems, ==, 1);
ASSERT3S(tree->bt_height, ==, -1);
- ASSERT3P(tree->bt_root, ==, NULL);
+ ASSERT0P(tree->bt_root);
ASSERT0(where->bti_offset);
tree->bt_num_nodes++;
@@ -1947,7 +1947,7 @@ void
zfs_btree_destroy(zfs_btree_t *tree)
{
ASSERT0(tree->bt_num_elems);
- ASSERT3P(tree->bt_root, ==, NULL);
+ ASSERT0P(tree->bt_root);
}
/* Verify that every child of this node has the correct parent pointer. */
@@ -1969,10 +1969,10 @@ static void
zfs_btree_verify_pointers(zfs_btree_t *tree)
{
if (tree->bt_height == -1) {
- VERIFY3P(tree->bt_root, ==, NULL);
+ VERIFY0P(tree->bt_root);
return;
}
- VERIFY3P(tree->bt_root->bth_parent, ==, NULL);
+ VERIFY0P(tree->bt_root->bth_parent);
zfs_btree_verify_pointers_helper(tree, tree->bt_root);
}
diff --git a/sys/contrib/openzfs/module/zfs/dataset_kstats.c b/sys/contrib/openzfs/module/zfs/dataset_kstats.c
index d3baabd6169f..e5abcd2044cf 100644
--- a/sys/contrib/openzfs/module/zfs/dataset_kstats.c
+++ b/sys/contrib/openzfs/module/zfs/dataset_kstats.c
@@ -44,6 +44,7 @@ static dataset_kstat_values_t empty_dataset_kstats = {
{ "zil_commit_error_count", KSTAT_DATA_UINT64 },
{ "zil_commit_stall_count", KSTAT_DATA_UINT64 },
{ "zil_commit_suspend_count", KSTAT_DATA_UINT64 },
+ { "zil_commit_crash_count", KSTAT_DATA_UINT64 },
{ "zil_itx_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
diff --git a/sys/contrib/openzfs/module/zfs/dbuf.c b/sys/contrib/openzfs/module/zfs/dbuf.c
index 432c99cec960..3d0f88b36336 100644
--- a/sys/contrib/openzfs/module/zfs/dbuf.c
+++ b/sys/contrib/openzfs/module/zfs/dbuf.c
@@ -523,7 +523,7 @@ dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
return;
/* Only data blocks support the attachment of user data. */
- ASSERT(db->db_level == 0);
+ ASSERT0(db->db_level);
/* Clients must resolve a dbuf before attaching user data. */
ASSERT(db->db.db_data != NULL);
@@ -1128,8 +1128,8 @@ dbuf_verify(dmu_buf_impl_t *db)
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dn == NULL) {
- ASSERT(db->db_parent == NULL);
- ASSERT(db->db_blkptr == NULL);
+ ASSERT0P(db->db_parent);
+ ASSERT0P(db->db_blkptr);
} else {
ASSERT3U(db->db.db_object, ==, dn->dn_object);
ASSERT3P(db->db_objset, ==, dn->dn_objset);
@@ -1180,7 +1180,7 @@ dbuf_verify(dmu_buf_impl_t *db)
/* db is pointed to by the dnode */
/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
- ASSERT(db->db_parent == NULL);
+ ASSERT0P(db->db_parent);
else
ASSERT(db->db_parent != NULL);
if (db->db_blkid != DMU_SPILL_BLKID)
@@ -1219,7 +1219,7 @@ dbuf_verify(dmu_buf_impl_t *db)
int i;
for (i = 0; i < db->db.db_size >> 3; i++) {
- ASSERT(buf[i] == 0);
+ ASSERT0(buf[i]);
}
} else {
blkptr_t *bps = db->db.db_data;
@@ -1259,7 +1259,7 @@ dbuf_clear_data(dmu_buf_impl_t *db)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
dbuf_evict_user(db);
- ASSERT3P(db->db_buf, ==, NULL);
+ ASSERT0P(db->db_buf);
db->db.db_data = NULL;
if (db->db_state != DB_NOFILL) {
db->db_state = DB_UNCACHED;
@@ -1384,13 +1384,13 @@ dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
* All reads are synchronous, so we must have a hold on the dbuf
*/
ASSERT(zfs_refcount_count(&db->db_holds) > 0);
- ASSERT(db->db_buf == NULL);
- ASSERT(db->db.db_data == NULL);
+ ASSERT0P(db->db_buf);
+ ASSERT0P(db->db.db_data);
if (buf == NULL) {
/* i/o error */
ASSERT(zio == NULL || zio->io_error != 0);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
- ASSERT3P(db->db_buf, ==, NULL);
+ ASSERT0P(db->db_buf);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "i/o error");
} else if (db->db_level == 0 && db->db_freed_in_flight) {
@@ -1584,7 +1584,7 @@ dbuf_read_impl(dmu_buf_impl_t *db, dnode_t *dn, zio_t *zio, dmu_flags_t flags,
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
- ASSERT(db->db_buf == NULL);
+ ASSERT0P(db->db_buf);
ASSERT(db->db_parent == NULL ||
RW_LOCK_HELD(&db->db_parent->db_rwlock));
@@ -1682,7 +1682,7 @@ dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db.db_data != NULL);
- ASSERT(db->db_level == 0);
+ ASSERT0(db->db_level);
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
if (dr == NULL ||
@@ -1901,8 +1901,8 @@ dbuf_noread(dmu_buf_impl_t *db, dmu_flags_t flags)
while (db->db_state == DB_READ || db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
if (db->db_state == DB_UNCACHED) {
- ASSERT(db->db_buf == NULL);
- ASSERT(db->db.db_data == NULL);
+ ASSERT0P(db->db_buf);
+ ASSERT0P(db->db.db_data);
dbuf_set_data(db, dbuf_alloc_arcbuf(db));
db->db_state = DB_FILL;
DTRACE_SET_STATE(db, "assigning filled buffer");
@@ -1929,7 +1929,7 @@ dbuf_unoverride(dbuf_dirty_record_t *dr)
* comes from dbuf_dirty() callers who must also hold a range lock.
*/
ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
- ASSERT(db->db_level == 0);
+ ASSERT0(db->db_level);
if (db->db_blkid == DMU_BONUS_BLKID ||
dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
@@ -1994,7 +1994,7 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
mutex_enter(&dn->dn_dbufs_mtx);
db = avl_find(&dn->dn_dbufs, db_search, &where);
- ASSERT3P(db, ==, NULL);
+ ASSERT0P(db);
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
@@ -2017,7 +2017,7 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
if (db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL ||
db->db_state == DB_EVICTING) {
- ASSERT(db->db.db_data == NULL);
+ ASSERT0P(db->db.db_data);
mutex_exit(&db->db_mtx);
continue;
}
@@ -2896,8 +2896,8 @@ dmu_buf_will_clone_or_dio(dmu_buf_t *db_fake, dmu_tx_t *tx)
dbuf_clear_data(db);
}
- ASSERT3P(db->db_buf, ==, NULL);
- ASSERT3P(db->db.db_data, ==, NULL);
+ ASSERT0P(db->db_buf);
+ ASSERT0P(db->db.db_data);
db->db_state = DB_NOFILL;
DTRACE_SET_STATE(db,
@@ -2932,7 +2932,7 @@ dmu_buf_will_fill_flags(dmu_buf_t *db_fake, dmu_tx_t *tx, boolean_t canfail,
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(tx->tx_txg != 0);
- ASSERT(db->db_level == 0);
+ ASSERT0(db->db_level);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
@@ -3144,7 +3144,7 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx,
{
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
- ASSERT(db->db_level == 0);
+ ASSERT0(db->db_level);
ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
ASSERT(buf != NULL);
ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
@@ -3209,7 +3209,7 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx,
VERIFY(!dbuf_undirty(db, tx));
db->db_state = DB_UNCACHED;
}
- ASSERT(db->db_buf == NULL);
+ ASSERT0P(db->db_buf);
dbuf_set_data(db, buf);
db->db_state = DB_FILL;
DTRACE_SET_STATE(db, "filling assigned arcbuf");
@@ -3269,7 +3269,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
}
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
- ASSERT(db->db_data_pending == NULL);
+ ASSERT0P(db->db_data_pending);
ASSERT(list_is_empty(&db->db_dirty_records));
db->db_state = DB_EVICTING;
@@ -3321,11 +3321,11 @@ dbuf_destroy(dmu_buf_impl_t *db)
db->db_parent = NULL;
- ASSERT(db->db_buf == NULL);
- ASSERT(db->db.db_data == NULL);
- ASSERT(db->db_hash_next == NULL);
- ASSERT(db->db_blkptr == NULL);
- ASSERT(db->db_data_pending == NULL);
+ ASSERT0P(db->db_buf);
+ ASSERT0P(db->db.db_data);
+ ASSERT0P(db->db_hash_next);
+ ASSERT0P(db->db_blkptr);
+ ASSERT0P(db->db_data_pending);
ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
ASSERT(!multilist_link_active(&db->db_cache_link));
@@ -3960,7 +3960,7 @@ dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
if (fail_uncached)
return (SET_ERROR(ENOENT));
- ASSERT3P(parent, ==, NULL);
+ ASSERT0P(parent);
err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
if (fail_sparse) {
if (err == 0 && bp && BP_IS_HOLE(bp))
@@ -4064,7 +4064,7 @@ dbuf_create_bonus(dnode_t *dn)
{
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
- ASSERT(dn->dn_bonus == NULL);
+ ASSERT0P(dn->dn_bonus);
dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL,
dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID));
dn->dn_bonus->db_pending_evict = FALSE;
@@ -4416,7 +4416,7 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
* inappropriate to hook it in (i.e., nlevels mismatch).
*/
ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
- ASSERT(db->db_parent == NULL);
+ ASSERT0P(db->db_parent);
db->db_parent = dn->dn_dbuf;
db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
DBUF_VERIFY(db);
@@ -4477,7 +4477,7 @@ dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
- ASSERT3U(db->db_level, ==, 0);
+ ASSERT0(db->db_level);
if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
zbookmark_phys_t zb;
@@ -4588,7 +4588,7 @@ dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
/* ensure that everything is zero after our data */
for (; datap_end < datap_max; datap_end++)
- ASSERT(*datap_end == 0);
+ ASSERT0(*datap_end);
#endif
}
@@ -4596,7 +4596,7 @@ static blkptr_t *
dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
{
/* This must be a lightweight dirty record. */
- ASSERT3P(dr->dr_dbuf, ==, NULL);
+ ASSERT0P(dr->dr_dbuf);
dnode_t *dn = dr->dr_dnode;
if (dn->dn_phys->dn_nlevels == 1) {
@@ -4739,7 +4739,7 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
*/
if (db->db_state == DB_UNCACHED) {
/* This buffer has been freed since it was dirtied */
- ASSERT3P(db->db.db_data, ==, NULL);
+ ASSERT0P(db->db.db_data);
} else if (db->db_state == DB_FILL) {
/* This buffer was freed and is now being re-filled */
ASSERT(db->db.db_data != dr->dt.dl.dr_data);
@@ -4756,9 +4756,9 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
*/
dbuf_dirty_record_t *dr_head =
list_head(&db->db_dirty_records);
- ASSERT3P(db->db_buf, ==, NULL);
- ASSERT3P(db->db.db_data, ==, NULL);
- ASSERT3P(dr_head->dt.dl.dr_data, ==, NULL);
+ ASSERT0P(db->db_buf);
+ ASSERT0P(db->db.db_data);
+ ASSERT0P(dr_head->dt.dl.dr_data);
ASSERT3U(dr_head->dt.dl.dr_override_state, ==, DR_OVERRIDDEN);
} else {
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
diff --git a/sys/contrib/openzfs/module/zfs/ddt.c b/sys/contrib/openzfs/module/zfs/ddt.c
index e0b9fc3951ff..d6658375f810 100644
--- a/sys/contrib/openzfs/module/zfs/ddt.c
+++ b/sys/contrib/openzfs/module/zfs/ddt.c
@@ -397,7 +397,7 @@ ddt_object_create(ddt_t *ddt, ddt_type_t type, ddt_class_t class,
ddt_object_name(ddt, type, class, name);
- ASSERT3U(*objectp, ==, 0);
+ ASSERT0(*objectp);
VERIFY0(ddt_ops[type]->ddt_op_create(os, objectp, tx, prehash));
ASSERT3U(*objectp, !=, 0);
@@ -1011,7 +1011,7 @@ ddt_free(const ddt_t *ddt, ddt_entry_t *dde)
{
if (dde->dde_io != NULL) {
for (int p = 0; p < DDT_NPHYS(ddt); p++)
- ASSERT3P(dde->dde_io->dde_lead_zio[p], ==, NULL);
+ ASSERT0P(dde->dde_io->dde_lead_zio[p]);
if (dde->dde_io->dde_repair_abd != NULL)
abd_free(dde->dde_io->dde_repair_abd);
@@ -1421,7 +1421,7 @@ ddt_key_compare(const void *x1, const void *x2)
static void
ddt_create_dir(ddt_t *ddt, dmu_tx_t *tx)
{
- ASSERT3U(ddt->ddt_dir_object, ==, 0);
+ ASSERT0(ddt->ddt_dir_object);
ASSERT3U(ddt->ddt_version, ==, DDT_VERSION_FDT);
char name[DDT_NAMELEN];
@@ -2395,7 +2395,7 @@ ddt_sync(spa_t *spa, uint64_t txg)
* scan's root zio here so that we can wait for any scan IOs in
* addition to the regular ddt IOs.
*/
- ASSERT3P(scn->scn_zio_root, ==, NULL);
+ ASSERT0P(scn->scn_zio_root);
scn->scn_zio_root = rio;
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
diff --git a/sys/contrib/openzfs/module/zfs/ddt_log.c b/sys/contrib/openzfs/module/zfs/ddt_log.c
index dbd381aa9609..3d30e244c1f7 100644
--- a/sys/contrib/openzfs/module/zfs/ddt_log.c
+++ b/sys/contrib/openzfs/module/zfs/ddt_log.c
@@ -116,7 +116,7 @@ static void
ddt_log_create_one(ddt_t *ddt, ddt_log_t *ddl, uint_t n, dmu_tx_t *tx)
{
ASSERT3U(ddt->ddt_dir_object, >, 0);
- ASSERT3U(ddl->ddl_object, ==, 0);
+ ASSERT0(ddl->ddl_object);
char name[DDT_NAMELEN];
ddt_log_name(ddt, name, n);
@@ -194,7 +194,7 @@ void
ddt_log_begin(ddt_t *ddt, size_t nentries, dmu_tx_t *tx, ddt_log_update_t *dlu)
{
ASSERT3U(nentries, >, 0);
- ASSERT3P(dlu->dlu_dbp, ==, NULL);
+ ASSERT0P(dlu->dlu_dbp);
if (ddt->ddt_log_active->ddl_object == 0)
ddt_log_create(ddt, tx);
@@ -748,8 +748,8 @@ ddt_log_load(ddt_t *ddt)
void
ddt_log_alloc(ddt_t *ddt)
{
- ASSERT3P(ddt->ddt_log_active, ==, NULL);
- ASSERT3P(ddt->ddt_log_flushing, ==, NULL);
+ ASSERT0P(ddt->ddt_log_active);
+ ASSERT0P(ddt->ddt_log_flushing);
avl_create(&ddt->ddt_log[0].ddl_tree, ddt_key_compare,
sizeof (ddt_log_entry_t), offsetof(ddt_log_entry_t, ddle_node));
diff --git a/sys/contrib/openzfs/module/zfs/dmu.c b/sys/contrib/openzfs/module/zfs/dmu.c
index 296e58ef9cd8..f7f808d5b8f7 100644
--- a/sys/contrib/openzfs/module/zfs/dmu.c
+++ b/sys/contrib/openzfs/module/zfs/dmu.c
@@ -1343,7 +1343,7 @@ dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
if (size == 0)
return;
- VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
+ VERIFY0(dmu_buf_hold_array(os, object, offset, size,
FALSE, FTAG, &numbufs, &dbp));
for (i = 0; i < numbufs; i++) {
@@ -1872,7 +1872,7 @@ dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
*/
BP_SET_LSIZE(bp, db->db_size);
} else if (!BP_IS_EMBEDDED(bp)) {
- ASSERT(BP_GET_LEVEL(bp) == 0);
+ ASSERT0(BP_GET_LEVEL(bp));
BP_SET_FILL(bp, 1);
}
}
@@ -2405,7 +2405,7 @@ dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
}
}
} else if (wp & WP_NOFILL) {
- ASSERT(level == 0);
+ ASSERT0(level);
/*
* If we're writing preallocated blocks, we aren't actually
@@ -2865,7 +2865,7 @@ byteswap_uint64_array(void *vbuf, size_t size)
size_t count = size >> 3;
int i;
- ASSERT((size & 7) == 0);
+ ASSERT0((size & 7));
for (i = 0; i < count; i++)
buf[i] = BSWAP_64(buf[i]);
@@ -2878,7 +2878,7 @@ byteswap_uint32_array(void *vbuf, size_t size)
size_t count = size >> 2;
int i;
- ASSERT((size & 3) == 0);
+ ASSERT0((size & 3));
for (i = 0; i < count; i++)
buf[i] = BSWAP_32(buf[i]);
@@ -2891,7 +2891,7 @@ byteswap_uint16_array(void *vbuf, size_t size)
size_t count = size >> 1;
int i;
- ASSERT((size & 1) == 0);
+ ASSERT0((size & 1));
for (i = 0; i < count; i++)
buf[i] = BSWAP_16(buf[i]);
diff --git a/sys/contrib/openzfs/module/zfs/dmu_direct.c b/sys/contrib/openzfs/module/zfs/dmu_direct.c
index 930ff101eca3..d44c686088fc 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_direct.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_direct.c
@@ -95,9 +95,9 @@ dmu_write_direct_done(zio_t *zio)
abd_free(zio->io_abd);
mutex_enter(&db->db_mtx);
- ASSERT3P(db->db_buf, ==, NULL);
- ASSERT3P(dr->dt.dl.dr_data, ==, NULL);
- ASSERT3P(db->db.db_data, ==, NULL);
+ ASSERT0P(db->db_buf);
+ ASSERT0P(dr->dt.dl.dr_data);
+ ASSERT0P(db->db.db_data);
db->db_state = DB_UNCACHED;
mutex_exit(&db->db_mtx);
diff --git a/sys/contrib/openzfs/module/zfs/dmu_object.c b/sys/contrib/openzfs/module/zfs/dmu_object.c
index b4ff7d224cc9..207cc6d0e713 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_object.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_object.c
@@ -90,7 +90,7 @@ dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize,
if (allocated_dnode != NULL) {
ASSERT3P(tag, !=, NULL);
} else {
- ASSERT3P(tag, ==, NULL);
+ ASSERT0P(tag);
tag = FTAG;
}
diff --git a/sys/contrib/openzfs/module/zfs/dmu_objset.c b/sys/contrib/openzfs/module/zfs/dmu_objset.c
index c135f620800f..a77f338bdfd3 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_objset.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_objset.c
@@ -724,7 +724,7 @@ dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
if (err == 0) {
mutex_enter(&ds->ds_lock);
- ASSERT(ds->ds_objset == NULL);
+ ASSERT0P(ds->ds_objset);
ds->ds_objset = os;
mutex_exit(&ds->ds_lock);
}
@@ -2226,7 +2226,7 @@ dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
rf |= DB_RF_HAVESTRUCT;
error = dmu_spill_hold_by_dnode(dn, rf,
FTAG, (dmu_buf_t **)&db);
- ASSERT(error == 0);
+ ASSERT0(error);
mutex_enter(&db->db_mtx);
data = (before) ? db->db.db_data :
dmu_objset_userquota_find_data(db, tx);
diff --git a/sys/contrib/openzfs/module/zfs/dmu_recv.c b/sys/contrib/openzfs/module/zfs/dmu_recv.c
index 73227b58c140..45c7af2bdcd2 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_recv.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_recv.c
@@ -866,7 +866,7 @@ dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
*/
if (dcp == NULL && drrb->drr_fromguid == 0 &&
drba->drba_origin == NULL) {
- ASSERT3P(dcp, ==, NULL);
+ ASSERT0P(dcp);
dcp = &dummy_dcp;
if (featureflags & DMU_BACKUP_FEATURE_RAW)
@@ -881,7 +881,7 @@ dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
if (drba->drba_cookie->drc_fromsnapobj != 0) {
VERIFY0(dsl_dataset_hold_obj(dp,
drba->drba_cookie->drc_fromsnapobj, FTAG, &snap));
- ASSERT3P(dcp, ==, NULL);
+ ASSERT0P(dcp);
}
if (drc->drc_heal) {
/* When healing we want to use the provided snapshot */
@@ -905,7 +905,7 @@ dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
if (drba->drba_origin != NULL) {
VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
FTAG, &origin));
- ASSERT3P(dcp, ==, NULL);
+ ASSERT0P(dcp);
}
/* Create new dataset. */
@@ -2792,7 +2792,7 @@ receive_read_payload_and_next_header(dmu_recv_cookie_t *drc, int len, void *buf)
drc->drc_rrd->bytes_read = drc->drc_bytes_read;
}
} else {
- ASSERT3P(buf, ==, NULL);
+ ASSERT0P(buf);
}
drc->drc_prev_cksum = drc->drc_cksum;
@@ -3450,7 +3450,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, offset_t *voffp)
break;
}
- ASSERT3P(drc->drc_rrd, ==, NULL);
+ ASSERT0P(drc->drc_rrd);
drc->drc_rrd = drc->drc_next_rrd;
drc->drc_next_rrd = NULL;
/* Allocates and loads header into drc->drc_next_rrd */
@@ -3468,7 +3468,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, offset_t *voffp)
drc->drc_rrd = NULL;
}
- ASSERT3P(drc->drc_rrd, ==, NULL);
+ ASSERT0P(drc->drc_rrd);
drc->drc_rrd = kmem_zalloc(sizeof (*drc->drc_rrd), KM_SLEEP);
drc->drc_rrd->eos_marker = B_TRUE;
bqueue_enqueue_flush(&rwa->q, drc->drc_rrd, 1);
diff --git a/sys/contrib/openzfs/module/zfs/dmu_redact.c b/sys/contrib/openzfs/module/zfs/dmu_redact.c
index 9226ac9e4b80..5a22ed71a5fe 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_redact.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_redact.c
@@ -1067,7 +1067,7 @@ dmu_redact_snap(const char *snapname, nvlist_t *redactnvl,
}
if (err != 0)
goto out;
- VERIFY3P(nvlist_next_nvpair(redactnvl, pair), ==, NULL);
+ VERIFY0P(nvlist_next_nvpair(redactnvl, pair));
boolean_t resuming = B_FALSE;
zfs_bookmark_phys_t bookmark;
diff --git a/sys/contrib/openzfs/module/zfs/dmu_send.c b/sys/contrib/openzfs/module/zfs/dmu_send.c
index deeba29e159a..8ecb99d5f57c 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_send.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_send.c
@@ -962,7 +962,7 @@ do_dump(dmu_send_cookie_t *dscp, struct send_range *range)
char *data = NULL;
if (srdp->abd != NULL) {
data = abd_to_buf(srdp->abd);
- ASSERT3P(srdp->abuf, ==, NULL);
+ ASSERT0P(srdp->abuf);
} else if (srdp->abuf != NULL) {
data = srdp->abuf->b_data;
}
@@ -2514,7 +2514,7 @@ dmu_send_impl(struct dmu_send_params *dspp)
* list in the stream.
*/
if (dspp->numfromredactsnaps != NUM_SNAPS_NOT_REDACTED) {
- ASSERT3P(from_rl, ==, NULL);
+ ASSERT0P(from_rl);
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
dspp->fromredactsnaps, (uint_t)dspp->numfromredactsnaps);
if (dspp->numfromredactsnaps > 0) {
@@ -2891,7 +2891,7 @@ dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
&fromds);
if (err != 0) {
- ASSERT3P(fromds, ==, NULL);
+ ASSERT0P(fromds);
} else {
/*
* We need to make a deep copy of the redact
diff --git a/sys/contrib/openzfs/module/zfs/dmu_tx.c b/sys/contrib/openzfs/module/zfs/dmu_tx.c
index d85d8b89423e..40c0b3402a05 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_tx.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_tx.c
@@ -126,7 +126,7 @@ dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
* problem, but there's no way for it to happen (for
* now, at least).
*/
- ASSERT(dn->dn_assigned_txg == 0);
+ ASSERT0(dn->dn_assigned_txg);
dn->dn_assigned_txg = tx->tx_txg;
(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
mutex_exit(&dn->dn_mtx);
@@ -443,7 +443,7 @@ dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
dnode_t *dn = txh->txh_dnode;
int err;
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz)
return;
@@ -607,7 +607,7 @@ dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
dnode_t *dn = txh->txh_dnode;
int err;
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
dmu_tx_count_dnode(txh);
@@ -681,7 +681,7 @@ dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
{
dmu_tx_hold_t *txh;
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
object, THT_BONUS, 0, 0);
@@ -706,7 +706,7 @@ dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
{
dmu_tx_hold_t *txh;
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
DMU_NEW_OBJECT, THT_SPACE, space, 0);
@@ -1232,7 +1232,7 @@ dmu_tx_assign(dmu_tx_t *tx, dmu_tx_flag_t flags)
{
int err;
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
ASSERT0(flags & ~(DMU_TX_WAIT | DMU_TX_NOTHROTTLE | DMU_TX_SUSPEND));
IMPLY(flags & DMU_TX_SUSPEND, flags & DMU_TX_WAIT);
ASSERT(!dsl_pool_sync_context(tx->tx_pool));
@@ -1328,7 +1328,7 @@ dmu_tx_wait(dmu_tx_t *tx)
dsl_pool_t *dp = tx->tx_pool;
hrtime_t before;
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
ASSERT(!dsl_pool_config_held(tx->tx_pool));
/*
@@ -1644,12 +1644,12 @@ dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
dmu_tx_hold_spill(tx, object);
} else {
DB_DNODE_ENTER(db);
if (DB_DNODE(db)->dn_have_spill) {
- ASSERT(tx->tx_txg == 0);
+ ASSERT0(tx->tx_txg);
dmu_tx_hold_spill(tx, object);
}
DB_DNODE_EXIT(db);
diff --git a/sys/contrib/openzfs/module/zfs/dnode.c b/sys/contrib/openzfs/module/zfs/dnode.c
index 451e1533efa0..963ff41232a3 100644
--- a/sys/contrib/openzfs/module/zfs/dnode.c
+++ b/sys/contrib/openzfs/module/zfs/dnode.c
@@ -214,7 +214,7 @@ dnode_dest(void *arg, void *unused)
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
- ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
+ ASSERT0P(dn->dn_free_ranges[i]);
list_destroy(&dn->dn_dirty_records[i]);
ASSERT0(dn->dn_next_nblkptr[i]);
ASSERT0(dn->dn_next_nlevels[i]);
@@ -231,10 +231,10 @@ dnode_dest(void *arg, void *unused)
ASSERT0(dn->dn_assigned_txg);
ASSERT0(dn->dn_dirty_txg);
ASSERT0(dn->dn_dirtyctx);
- ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL);
- ASSERT3P(dn->dn_bonus, ==, NULL);
+ ASSERT0P(dn->dn_dirtyctx_firstset);
+ ASSERT0P(dn->dn_bonus);
ASSERT(!dn->dn_have_spill);
- ASSERT3P(dn->dn_zio, ==, NULL);
+ ASSERT0P(dn->dn_zio);
ASSERT0(dn->dn_oldused);
ASSERT0(dn->dn_oldflags);
ASSERT0(dn->dn_olduid);
@@ -318,7 +318,7 @@ dnode_kstats_update(kstat_t *ksp, int rw)
void
dnode_init(void)
{
- ASSERT(dnode_cache == NULL);
+ ASSERT0P(dnode_cache);
dnode_cache = kmem_cache_create("dnode_t", sizeof (dnode_t),
0, dnode_cons, dnode_dest, NULL, NULL, NULL, KMC_RECLAIMABLE);
kmem_cache_set_move(dnode_cache, dnode_move);
@@ -509,7 +509,7 @@ dnode_buf_byteswap(void *vbuf, size_t size)
int i = 0;
ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
- ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
+ ASSERT0((size & (sizeof (dnode_phys_t)-1)));
while (i < size) {
dnode_phys_t *dnp = (void *)(((char *)vbuf) + i);
@@ -673,7 +673,7 @@ dnode_destroy(dnode_t *dn)
objset_t *os = dn->dn_objset;
boolean_t complete_os_eviction = B_FALSE;
- ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
+ ASSERT0((dn->dn_id_flags & DN_ID_NEW_EXIST));
mutex_enter(&os->os_lock);
POINTER_INVALIDATE(&dn->dn_objset);
@@ -780,7 +780,7 @@ dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
ASSERT0(dn->dn_next_maxblkid[i]);
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
- ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
+ ASSERT0P(dn->dn_free_ranges[i]);
}
dn->dn_type = ot;
@@ -958,7 +958,7 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
ndn->dn_dirty_txg = odn->dn_dirty_txg;
ndn->dn_dirtyctx = odn->dn_dirtyctx;
ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
- ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
+ ASSERT0(zfs_refcount_count(&odn->dn_tx_holds));
zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
ASSERT(avl_is_empty(&ndn->dn_dbufs));
avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
@@ -2304,7 +2304,7 @@ dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
if ((off >> blkshift) > dn->dn_maxblkid)
return;
} else {
- ASSERT(dn->dn_maxblkid == 0);
+ ASSERT0(dn->dn_maxblkid);
if (off == 0 && len >= blksz) {
/*
* Freeing the whole block; fast-track this request.
@@ -2524,7 +2524,7 @@ dnode_diduse_space(dnode_t *dn, int64_t delta)
}
space += delta;
if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
- ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
+ ASSERT0((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES));
ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
dn->dn_phys->dn_used = space >> DEV_BSHIFT;
} else {
diff --git a/sys/contrib/openzfs/module/zfs/dnode_sync.c b/sys/contrib/openzfs/module/zfs/dnode_sync.c
index 4067f221f1bf..046ceddb3609 100644
--- a/sys/contrib/openzfs/module/zfs/dnode_sync.c
+++ b/sys/contrib/openzfs/module/zfs/dnode_sync.c
@@ -209,8 +209,8 @@ free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
rw_exit(&dn->dn_struct_rwlock);
if (err == ENOENT)
continue;
- ASSERT(err == 0);
- ASSERT(child->db_level == 0);
+ ASSERT0(err);
+ ASSERT0(child->db_level);
dr = dbuf_find_dirty_eq(child, txg);
/* data_old better be zeroed */
@@ -868,7 +868,7 @@ dnode_sync(dnode_t *dn, dmu_tx_t *tx)
dbuf_sync_list(list, dn->dn_phys->dn_nlevels - 1, tx);
if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
- ASSERT3P(list_head(list), ==, NULL);
+ ASSERT0P(list_head(list));
dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
}
diff --git a/sys/contrib/openzfs/module/zfs/dsl_bookmark.c b/sys/contrib/openzfs/module/zfs/dsl_bookmark.c
index fdc8b7b198f0..ee574c499f9f 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_bookmark.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_bookmark.c
@@ -243,7 +243,7 @@ dsl_bookmark_create_check_impl(dsl_pool_t *dp,
/* error is retval of the following if-cascade */
if (strchr(source, '@') != NULL) {
dsl_dataset_t *source_snap_ds;
- ASSERT3S(snapshot_namecheck(source, NULL, NULL), ==, 0);
+ ASSERT0(snapshot_namecheck(source, NULL, NULL));
error = dsl_dataset_hold(dp, source, FTAG, &source_snap_ds);
if (error == 0) {
VERIFY(source_snap_ds->ds_is_snapshot);
@@ -258,7 +258,7 @@ dsl_bookmark_create_check_impl(dsl_pool_t *dp,
}
} else if (strchr(source, '#') != NULL) {
zfs_bookmark_phys_t source_phys;
- ASSERT3S(bookmark_namecheck(source, NULL, NULL), ==, 0);
+ ASSERT0(bookmark_namecheck(source, NULL, NULL));
/*
* Source must exists and be an earlier point in newbm_ds's
* timeline (newbm_ds's origin may be a snap of source's ds)
@@ -501,7 +501,7 @@ dsl_bookmark_create_sync_impl_snap(const char *bookmark, const char *snapshot,
sizeof (uint64_t) * num_redact_snaps);
local_rl->rl_phys->rlp_num_snaps = num_redact_snaps;
if (bookmark_redacted) {
- ASSERT3P(redaction_list, ==, NULL);
+ ASSERT0P(redaction_list);
local_rl->rl_phys->rlp_last_blkid = UINT64_MAX;
local_rl->rl_phys->rlp_last_object = UINT64_MAX;
dsl_redaction_list_long_rele(local_rl, tag);
diff --git a/sys/contrib/openzfs/module/zfs/dsl_crypt.c b/sys/contrib/openzfs/module/zfs/dsl_crypt.c
index 6b6bb8d45b6b..f519b937edc0 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_crypt.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_crypt.c
@@ -534,7 +534,7 @@ out:
static void
dsl_crypto_key_free(dsl_crypto_key_t *dck)
{
- ASSERT(zfs_refcount_count(&dck->dck_holds) == 0);
+ ASSERT0(zfs_refcount_count(&dck->dck_holds));
/* destroy the zio_crypt_key_t */
zio_crypt_key_destroy(&dck->dck_key);
@@ -1912,7 +1912,7 @@ dsl_dataset_create_crypt_sync(uint64_t dsobj, dsl_dir_t *dd,
/* clones always use their origin's wrapping key */
if (dsl_dir_is_clone(dd)) {
- ASSERT3P(dcp, ==, NULL);
+ ASSERT0P(dcp);
/*
* If this is an encrypted clone we just need to clone the
diff --git a/sys/contrib/openzfs/module/zfs/dsl_dataset.c b/sys/contrib/openzfs/module/zfs/dsl_dataset.c
index b767c9641419..420687480a76 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_dataset.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_dataset.c
@@ -450,7 +450,7 @@ dsl_dataset_evict_sync(void *dbu)
{
dsl_dataset_t *ds = dbu;
- ASSERT(ds->ds_owner == NULL);
+ ASSERT0P(ds->ds_owner);
unique_remove(ds->ds_fsid_guid);
}
@@ -460,7 +460,7 @@ dsl_dataset_evict_async(void *dbu)
{
dsl_dataset_t *ds = dbu;
- ASSERT(ds->ds_owner == NULL);
+ ASSERT0P(ds->ds_owner);
ds->ds_dbuf = NULL;
@@ -1187,7 +1187,7 @@ dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
ASSERT(origin == NULL || dsl_dataset_phys(origin)->ds_num_children > 0);
ASSERT(dmu_tx_is_syncing(tx));
- ASSERT(dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
+ ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
@@ -2112,7 +2112,7 @@ dsl_dataset_sync(dsl_dataset_t *ds, zio_t *rio, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(ds->ds_objset != NULL);
- ASSERT(dsl_dataset_phys(ds)->ds_next_snap_obj == 0);
+ ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
/*
* in case we had to change ds_fsid_guid when we opened it,
@@ -4180,7 +4180,7 @@ dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
dsl_pool_t *dp = dmu_tx_pool(tx);
int64_t unused_refres_delta;
- ASSERT(clone->ds_reserved == 0);
+ ASSERT0(clone->ds_reserved);
/*
* NOTE: On DEBUG kernels there could be a race between this and
* the check function if spa_asize_inflation is adjusted...
diff --git a/sys/contrib/openzfs/module/zfs/dsl_deadlist.c b/sys/contrib/openzfs/module/zfs/dsl_deadlist.c
index 9ffc998ac173..41ac72bf1c16 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_deadlist.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_deadlist.c
@@ -1037,7 +1037,7 @@ dsl_livelist_iterate(void *arg, const blkptr_t *bp, boolean_t bp_freed,
avl_tree_t *avl = lia->avl;
bplist_t *to_free = lia->to_free;
zthr_t *t = lia->t;
- ASSERT(tx == NULL);
+ ASSERT0P(tx);
if ((t != NULL) && (zthr_has_waiters(t) || zthr_iscancelled(t)))
return (SET_ERROR(EINTR));
@@ -1049,7 +1049,8 @@ dsl_livelist_iterate(void *arg, const blkptr_t *bp, boolean_t bp_freed,
ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(&found->le_bp));
ASSERT3U(BP_GET_CHECKSUM(bp), ==,
BP_GET_CHECKSUM(&found->le_bp));
- ASSERT3U(BP_GET_BIRTH(bp), ==, BP_GET_BIRTH(&found->le_bp));
+ ASSERT3U(BP_GET_PHYSICAL_BIRTH(bp), ==,
+ BP_GET_PHYSICAL_BIRTH(&found->le_bp));
}
if (bp_freed) {
if (found == NULL) {
diff --git a/sys/contrib/openzfs/module/zfs/dsl_deleg.c b/sys/contrib/openzfs/module/zfs/dsl_deleg.c
index c01a06e98340..fdd37b36e280 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_deleg.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_deleg.c
@@ -102,7 +102,7 @@ dsl_deleg_can_allow(char *ddname, nvlist_t *nvp, cred_t *cr)
nvlist_t *perms;
nvpair_t *permpair = NULL;
- VERIFY(nvpair_value_nvlist(whopair, &perms) == 0);
+ VERIFY0(nvpair_value_nvlist(whopair, &perms));
while ((permpair = nvlist_next_nvpair(perms, permpair))) {
const char *perm = nvpair_name(permpair);
@@ -189,8 +189,7 @@ dsl_deleg_set_sync(void *arg, dmu_tx_t *tx)
const char *perm = nvpair_name(permpair);
uint64_t n = 0;
- VERIFY(zap_update(mos, jumpobj,
- perm, 8, 1, &n, tx) == 0);
+ VERIFY0(zap_update(mos, jumpobj, perm, 8, 1, &n, tx));
spa_history_log_internal_dd(dd, "permission update", tx,
"%s %s", whokey, perm);
}
@@ -225,7 +224,7 @@ dsl_deleg_unset_sync(void *arg, dmu_tx_t *tx)
if (zap_lookup(mos, zapobj, whokey, 8,
1, &jumpobj) == 0) {
(void) zap_remove(mos, zapobj, whokey, tx);
- VERIFY(0 == zap_destroy(mos, jumpobj, tx));
+ VERIFY0(zap_destroy(mos, jumpobj, tx));
}
spa_history_log_internal_dd(dd, "permission who remove",
tx, "%s", whokey);
@@ -243,7 +242,7 @@ dsl_deleg_unset_sync(void *arg, dmu_tx_t *tx)
if (zap_count(mos, jumpobj, &n) == 0 && n == 0) {
(void) zap_remove(mos, zapobj,
whokey, tx);
- VERIFY(0 == zap_destroy(mos,
+ VERIFY0(zap_destroy(mos,
jumpobj, tx));
}
spa_history_log_internal_dd(dd, "permission remove", tx,
@@ -332,7 +331,7 @@ dsl_deleg_get(const char *ddname, nvlist_t **nvp)
basezc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
baseza = zap_attribute_alloc();
source = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
- VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
for (dd = startdd; dd != NULL; dd = dd->dd_parent) {
nvlist_t *sp_nvp;
@@ -706,7 +705,7 @@ copy_create_perms(dsl_dir_t *dd, uint64_t pzapobj,
ZFS_DELEG_LOCAL, &uid);
if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) == ENOENT) {
jumpobj = zap_create(mos, DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx);
- VERIFY(zap_add(mos, zapobj, whokey, 8, 1, &jumpobj, tx) == 0);
+ VERIFY0(zap_add(mos, zapobj, whokey, 8, 1, &jumpobj, tx));
}
za = zap_attribute_alloc();
@@ -716,8 +715,7 @@ copy_create_perms(dsl_dir_t *dd, uint64_t pzapobj,
uint64_t zero = 0;
ASSERT(za->za_integer_length == 8 && za->za_num_integers == 1);
- VERIFY(zap_update(mos, jumpobj, za->za_name,
- 8, 1, &zero, tx) == 0);
+ VERIFY0(zap_update(mos, jumpobj, za->za_name, 8, 1, &zero, tx));
}
zap_cursor_fini(&zc);
zap_attribute_free(za);
@@ -761,10 +759,10 @@ dsl_deleg_destroy(objset_t *mos, uint64_t zapobj, dmu_tx_t *tx)
zap_cursor_retrieve(&zc, za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za->za_integer_length == 8 && za->za_num_integers == 1);
- VERIFY(0 == zap_destroy(mos, za->za_first_integer, tx));
+ VERIFY0(zap_destroy(mos, za->za_first_integer, tx));
}
zap_cursor_fini(&zc);
- VERIFY(0 == zap_destroy(mos, zapobj, tx));
+ VERIFY0(zap_destroy(mos, zapobj, tx));
zap_attribute_free(za);
return (0);
}
diff --git a/sys/contrib/openzfs/module/zfs/dsl_destroy.c b/sys/contrib/openzfs/module/zfs/dsl_destroy.c
index fff49c97f4d2..ea01ee586f8b 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_destroy.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_destroy.c
@@ -350,7 +350,7 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
dsl_dataset_deactivate_feature(ds, f, tx);
}
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
- ASSERT3P(ds->ds_prev, ==, NULL);
+ ASSERT0P(ds->ds_prev);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
after_branch_point =
@@ -465,7 +465,7 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
&used, &comp, &uncomp);
dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
dsl_dataset_rele(ds_nextnext, FTAG);
- ASSERT3P(ds_next->ds_prev, ==, NULL);
+ ASSERT0P(ds_next->ds_prev);
/* Collapse range in this head. */
dsl_dataset_t *hds;
@@ -525,7 +525,7 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
/* remove from snapshot namespace */
dsl_dataset_t *ds_head;
- ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
+ ASSERT0(dsl_dataset_phys(ds)->ds_snapnames_zapobj);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
VERIFY0(dsl_dataset_get_snapname(ds));
@@ -728,7 +728,7 @@ kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
*/
dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
} else {
- ASSERT(zilog == NULL);
+ ASSERT0P(zilog);
ASSERT3U(BP_GET_BIRTH(bp), >,
dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
(void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
diff --git a/sys/contrib/openzfs/module/zfs/dsl_dir.c b/sys/contrib/openzfs/module/zfs/dsl_dir.c
index f24cd2049533..6ce1890cfea1 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_dir.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_dir.c
@@ -151,8 +151,8 @@ dsl_dir_evict_async(void *dbu)
for (t = 0; t < TXG_SIZE; t++) {
ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
- ASSERT(dd->dd_tempreserved[t] == 0);
- ASSERT(dd->dd_space_towrite[t] == 0);
+ ASSERT0(dd->dd_tempreserved[t]);
+ ASSERT0(dd->dd_space_towrite[t]);
}
if (dd->dd_parent)
diff --git a/sys/contrib/openzfs/module/zfs/dsl_pool.c b/sys/contrib/openzfs/module/zfs/dsl_pool.c
index 4f1f66b835f2..f47822df8b53 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_pool.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_pool.c
@@ -522,8 +522,8 @@ dsl_pool_create(spa_t *spa, nvlist_t *zplprops __attribute__((unused)),
/* create and open the free_bplist */
obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
- VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
- DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
+ VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
VERIFY0(bpobj_open(&dp->dp_free_bpobj,
dp->dp_meta_objset, obj));
}
@@ -1077,7 +1077,7 @@ upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
dsl_dataset_phys(prev)->ds_num_children++;
if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) {
- ASSERT(ds->ds_prev == NULL);
+ ASSERT0P(ds->ds_prev);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj,
ds, &ds->ds_prev));
@@ -1173,7 +1173,7 @@ dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
dsl_dataset_t *ds;
ASSERT(dmu_tx_is_syncing(tx));
- ASSERT(dp->dp_origin_snap == NULL);
+ ASSERT0P(dp->dp_origin_snap);
ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
/* create the origin dir, ds, & snap-ds */
@@ -1250,7 +1250,7 @@ dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
{
objset_t *mos = dp->dp_meta_objset;
- ASSERT(dp->dp_tmp_userrefs_obj == 0);
+ ASSERT0(dp->dp_tmp_userrefs_obj);
ASSERT(dmu_tx_is_syncing(tx));
dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
diff --git a/sys/contrib/openzfs/module/zfs/dsl_prop.c b/sys/contrib/openzfs/module/zfs/dsl_prop.c
index b76f22df61e2..51f624da5689 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_prop.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_prop.c
@@ -815,7 +815,7 @@ dsl_prop_set_sync_impl(dsl_dataset_t *ds, const char *propname,
*/
err = zap_update(mos, zapobj, recvdstr,
intsz, numints, value, tx);
- ASSERT(err == 0);
+ ASSERT0(err);
break;
case (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED):
/*
@@ -1166,7 +1166,7 @@ dsl_prop_get_all_impl(objset_t *mos, uint64_t propobj,
if (nvlist_exists(nv, propname))
continue;
- VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP));
if (za->za_integer_length == 1) {
/*
* String property
@@ -1179,8 +1179,7 @@ dsl_prop_get_all_impl(objset_t *mos, uint64_t propobj,
kmem_free(tmp, za->za_num_integers);
break;
}
- VERIFY(nvlist_add_string(propval, ZPROP_VALUE,
- tmp) == 0);
+ VERIFY0(nvlist_add_string(propval, ZPROP_VALUE, tmp));
kmem_free(tmp, za->za_num_integers);
} else {
/*
@@ -1191,8 +1190,8 @@ dsl_prop_get_all_impl(objset_t *mos, uint64_t propobj,
za->za_first_integer);
}
- VERIFY(nvlist_add_string(propval, ZPROP_SOURCE, source) == 0);
- VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
+ VERIFY0(nvlist_add_string(propval, ZPROP_SOURCE, source));
+ VERIFY0(nvlist_add_nvlist(nv, propname, propval));
nvlist_free(propval);
}
zap_cursor_fini(&zc);
@@ -1215,7 +1214,7 @@ dsl_prop_get_all_ds(dsl_dataset_t *ds, nvlist_t **nvp,
int err = 0;
char setpoint[ZFS_MAX_DATASET_NAME_LEN];
- VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
if (ds->ds_is_snapshot)
flags |= DSL_PROP_GET_SNAPSHOT;
@@ -1333,18 +1332,18 @@ dsl_prop_nvlist_add_uint64(nvlist_t *nv, zfs_prop_t prop, uint64_t value)
uint64_t default_value;
if (nvlist_lookup_nvlist(nv, propname, &propval) == 0) {
- VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, value) == 0);
+ VERIFY0(nvlist_add_uint64(propval, ZPROP_VALUE, value));
return;
}
- VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
- VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, value) == 0);
+ VERIFY0(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP));
+ VERIFY0(nvlist_add_uint64(propval, ZPROP_VALUE, value));
/* Indicate the default source if we can. */
if (dodefault(prop, 8, 1, &default_value) == 0 &&
value == default_value) {
- VERIFY(nvlist_add_string(propval, ZPROP_SOURCE, "") == 0);
+ VERIFY0(nvlist_add_string(propval, ZPROP_SOURCE, ""));
}
- VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
+ VERIFY0(nvlist_add_nvlist(nv, propname, propval));
nvlist_free(propval);
}
@@ -1355,13 +1354,13 @@ dsl_prop_nvlist_add_string(nvlist_t *nv, zfs_prop_t prop, const char *value)
const char *propname = zfs_prop_to_name(prop);
if (nvlist_lookup_nvlist(nv, propname, &propval) == 0) {
- VERIFY(nvlist_add_string(propval, ZPROP_VALUE, value) == 0);
+ VERIFY0(nvlist_add_string(propval, ZPROP_VALUE, value));
return;
}
- VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
- VERIFY(nvlist_add_string(propval, ZPROP_VALUE, value) == 0);
- VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
+ VERIFY0(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP));
+ VERIFY0(nvlist_add_string(propval, ZPROP_VALUE, value));
+ VERIFY0(nvlist_add_nvlist(nv, propname, propval));
nvlist_free(propval);
}
diff --git a/sys/contrib/openzfs/module/zfs/dsl_scan.c b/sys/contrib/openzfs/module/zfs/dsl_scan.c
index 5052992d775c..fcd50c459d07 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_scan.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_scan.c
@@ -1784,7 +1784,7 @@ dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
- VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
+ VERIFY0(scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
return (0);
}
@@ -1820,7 +1820,7 @@ dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
lr->lr_foid, ZB_ZIL_LEVEL,
lr->lr_offset / BP_GET_LSIZE(bp));
- VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
+ VERIFY0(scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
}
return (0);
}
@@ -5141,7 +5141,7 @@ dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd)
mutex_enter(&svd->vdev_scan_io_queue_lock);
mutex_enter(&tvd->vdev_scan_io_queue_lock);
- VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL);
+ VERIFY0P(tvd->vdev_scan_io_queue);
tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue;
svd->vdev_scan_io_queue = NULL;
if (tvd->vdev_scan_io_queue != NULL)
diff --git a/sys/contrib/openzfs/module/zfs/dsl_userhold.c b/sys/contrib/openzfs/module/zfs/dsl_userhold.c
index 57c70e4ce3d2..f91b7a1eb69a 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_userhold.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_userhold.c
@@ -335,7 +335,7 @@ dsl_dataset_user_hold(nvlist_t *holds, minor_t cleanup_minor, nvlist_t *errlist)
dduha.dduha_holds = holds;
/* chkholds can have non-unique name */
- VERIFY(0 == nvlist_alloc(&dduha.dduha_chkholds, 0, KM_SLEEP));
+ VERIFY0(nvlist_alloc(&dduha.dduha_chkholds, 0, KM_SLEEP));
dduha.dduha_errlist = errlist;
dduha.dduha_minor = cleanup_minor;
diff --git a/sys/contrib/openzfs/module/zfs/fm.c b/sys/contrib/openzfs/module/zfs/fm.c
index a092817efedd..ae788b2310d8 100644
--- a/sys/contrib/openzfs/module/zfs/fm.c
+++ b/sys/contrib/openzfs/module/zfs/fm.c
@@ -337,7 +337,7 @@ zfs_zevent_next(zfs_zevent_t *ze, nvlist_t **event, uint64_t *event_size,
}
}
- VERIFY(nvlist_size(ev->ev_nvl, &size, NV_ENCODE_NATIVE) == 0);
+ VERIFY0(nvlist_size(ev->ev_nvl, &size, NV_ENCODE_NATIVE));
if (size > *event_size) {
*event_size = size;
error = ENOMEM;
diff --git a/sys/contrib/openzfs/module/zfs/metaslab.c b/sys/contrib/openzfs/module/zfs/metaslab.c
index 0e5f09b2724c..9f4399af56bd 100644
--- a/sys/contrib/openzfs/module/zfs/metaslab.c
+++ b/sys/contrib/openzfs/module/zfs/metaslab.c
@@ -391,7 +391,7 @@ static kstat_t *metaslab_ksp;
void
metaslab_stat_init(void)
{
- ASSERT(metaslab_alloc_trace_cache == NULL);
+ ASSERT0P(metaslab_alloc_trace_cache);
metaslab_alloc_trace_cache = kmem_cache_create(
"metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
0, NULL, NULL, NULL, NULL, NULL, 0);
@@ -456,16 +456,16 @@ metaslab_class_destroy(metaslab_class_t *mc)
{
spa_t *spa = mc->mc_spa;
- ASSERT(mc->mc_alloc == 0);
- ASSERT(mc->mc_deferred == 0);
- ASSERT(mc->mc_space == 0);
- ASSERT(mc->mc_dspace == 0);
+ ASSERT0(mc->mc_alloc);
+ ASSERT0(mc->mc_deferred);
+ ASSERT0(mc->mc_space);
+ ASSERT0(mc->mc_dspace);
for (int i = 0; i < spa->spa_alloc_count; i++) {
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
avl_destroy(&mca->mca_tree);
mutex_destroy(&mca->mca_lock);
- ASSERT(mca->mca_rotor == NULL);
+ ASSERT0P(mca->mca_rotor);
ASSERT0(mca->mca_reserved);
}
mutex_destroy(&mc->mc_lock);
@@ -1087,8 +1087,8 @@ metaslab_group_destroy(metaslab_group_t *mg)
{
spa_t *spa = mg->mg_class->mc_spa;
- ASSERT(mg->mg_prev == NULL);
- ASSERT(mg->mg_next == NULL);
+ ASSERT0P(mg->mg_prev);
+ ASSERT0P(mg->mg_next);
/*
* We may have gone below zero with the activation count
* either because we never activated in the first place or
@@ -1118,8 +1118,8 @@ metaslab_group_activate(metaslab_group_t *mg)
ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
- ASSERT(mg->mg_prev == NULL);
- ASSERT(mg->mg_next == NULL);
+ ASSERT0P(mg->mg_prev);
+ ASSERT0P(mg->mg_next);
ASSERT(mg->mg_activation_count <= 0);
if (++mg->mg_activation_count <= 0)
@@ -1164,8 +1164,8 @@ metaslab_group_passivate(metaslab_group_t *mg)
if (--mg->mg_activation_count != 0) {
for (int i = 0; i < spa->spa_alloc_count; i++)
ASSERT(mc->mc_allocator[i].mca_rotor != mg);
- ASSERT(mg->mg_prev == NULL);
- ASSERT(mg->mg_next == NULL);
+ ASSERT0P(mg->mg_prev);
+ ASSERT0P(mg->mg_next);
ASSERT(mg->mg_activation_count < 0);
return;
}
@@ -1345,7 +1345,7 @@ metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
static void
metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
{
- ASSERT(msp->ms_group == NULL);
+ ASSERT0P(msp->ms_group);
mutex_enter(&mg->mg_lock);
msp->ms_group = mg;
msp->ms_weight = 0;
@@ -3017,7 +3017,7 @@ metaslab_fini(metaslab_t *msp)
metaslab_group_remove(mg, msp);
mutex_enter(&msp->ms_lock);
- VERIFY(msp->ms_group == NULL);
+ VERIFY0P(msp->ms_group);
/*
* If this metaslab hasn't been through metaslab_sync_done() yet its
@@ -5739,7 +5739,7 @@ metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
ASSERT(!vd->vdev_removing);
ASSERT(vdev_is_concrete(vd));
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
- ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
+ ASSERT0P(vd->vdev_indirect_mapping);
if (DVA_GET_GANG(dva))
size = vdev_gang_header_asize(vd);
@@ -5997,7 +5997,7 @@ metaslab_alloc_range(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
}
ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
- ASSERT(BP_GET_NDVAS(bp) == 0);
+ ASSERT0(BP_GET_NDVAS(bp));
ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
ASSERT3P(zal, !=, NULL);
@@ -6029,7 +6029,7 @@ metaslab_alloc_range(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
smallest_psize = MIN(cur_psize, smallest_psize);
}
}
- ASSERT(error == 0);
+ ASSERT0(error);
ASSERT(BP_GET_NDVAS(bp) == ndvas);
if (actual_psize)
*actual_psize = smallest_psize;
diff --git a/sys/contrib/openzfs/module/zfs/mmp.c b/sys/contrib/openzfs/module/zfs/mmp.c
index f3665d29b8b4..7db72b9b04b0 100644
--- a/sys/contrib/openzfs/module/zfs/mmp.c
+++ b/sys/contrib/openzfs/module/zfs/mmp.c
@@ -260,7 +260,7 @@ mmp_thread_stop(spa_t *spa)
zfs_dbgmsg("MMP thread stopped pool '%s' gethrtime %llu",
spa_name(spa), gethrtime());
- ASSERT(mmp->mmp_thread == NULL);
+ ASSERT0P(mmp->mmp_thread);
mmp->mmp_thread_exiting = 0;
}
diff --git a/sys/contrib/openzfs/module/zfs/range_tree.c b/sys/contrib/openzfs/module/zfs/range_tree.c
index fc2b17606bd2..ea2d2c7227c8 100644
--- a/sys/contrib/openzfs/module/zfs/range_tree.c
+++ b/sys/contrib/openzfs/module/zfs/range_tree.c
@@ -377,7 +377,7 @@ zfs_range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
return;
}
- ASSERT3P(rs, ==, NULL);
+ ASSERT0P(rs);
/*
* Determine whether or not we will have to merge with our neighbors.
@@ -867,7 +867,7 @@ zfs_range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
next = zfs_btree_next(&removefrom->rt_root, &where, &where);
}
- VERIFY3P(curr, ==, NULL);
+ VERIFY0P(curr);
if (start != end) {
VERIFY3U(start, <, end);
diff --git a/sys/contrib/openzfs/module/zfs/rrwlock.c b/sys/contrib/openzfs/module/zfs/rrwlock.c
index 8ee784619839..d0df39b93560 100644
--- a/sys/contrib/openzfs/module/zfs/rrwlock.c
+++ b/sys/contrib/openzfs/module/zfs/rrwlock.c
@@ -108,7 +108,7 @@ rrn_add(rrwlock_t *rrl, const void *tag)
rn->rn_rrl = rrl;
rn->rn_next = tsd_get(rrw_tsd_key);
rn->rn_tag = tag;
- VERIFY(tsd_set(rrw_tsd_key, rn) == 0);
+ VERIFY0(tsd_set(rrw_tsd_key, rn));
}
/*
@@ -129,7 +129,7 @@ rrn_find_and_remove(rrwlock_t *rrl, const void *tag)
if (prev)
prev->rn_next = rn->rn_next;
else
- VERIFY(tsd_set(rrw_tsd_key, rn->rn_next) == 0);
+ VERIFY0(tsd_set(rrw_tsd_key, rn->rn_next));
kmem_free(rn, sizeof (*rn));
return (B_TRUE);
}
@@ -155,7 +155,7 @@ rrw_destroy(rrwlock_t *rrl)
{
mutex_destroy(&rrl->rr_lock);
cv_destroy(&rrl->rr_cv);
- ASSERT(rrl->rr_writer == NULL);
+ ASSERT0P(rrl->rr_writer);
zfs_refcount_destroy(&rrl->rr_anon_rcount);
zfs_refcount_destroy(&rrl->rr_linked_rcount);
}
@@ -188,7 +188,7 @@ rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, const void *tag)
} else {
(void) zfs_refcount_add(&rrl->rr_anon_rcount, tag);
}
- ASSERT(rrl->rr_writer == NULL);
+ ASSERT0P(rrl->rr_writer);
mutex_exit(&rrl->rr_lock);
}
diff --git a/sys/contrib/openzfs/module/zfs/sa.c b/sys/contrib/openzfs/module/zfs/sa.c
index 5db470ce6242..7ad25d4d85ba 100644
--- a/sys/contrib/openzfs/module/zfs/sa.c
+++ b/sys/contrib/openzfs/module/zfs/sa.c
@@ -304,7 +304,7 @@ sa_get_spill(sa_handle_t *hdl)
if (hdl->sa_spill == NULL) {
if ((rc = dmu_spill_hold_existing(hdl->sa_bonus, NULL,
&hdl->sa_spill)) == 0)
- VERIFY(0 == sa_build_index(hdl, SA_SPILL));
+ VERIFY0(sa_build_index(hdl, SA_SPILL));
} else {
rc = 0;
}
@@ -432,7 +432,7 @@ sa_add_layout_entry(objset_t *os, const sa_attr_type_t *attrs, int attr_count,
(void) snprintf(attr_name, sizeof (attr_name),
"%d", (int)lot_num);
- VERIFY(0 == zap_update(os, os->os_sa->sa_layout_attr_obj,
+ VERIFY0(zap_update(os, os->os_sa->sa_layout_attr_obj,
attr_name, 2, attr_count, attrs, tx));
}
@@ -505,7 +505,7 @@ sa_resize_spill(sa_handle_t *hdl, uint32_t size, dmu_tx_t *tx)
}
error = dbuf_spill_set_blksz(hdl->sa_spill, blocksize, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
return (error);
}
@@ -717,7 +717,7 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
if (BUF_SPACE_NEEDED(spill_used, spillhdrsize) >
hdl->sa_spill->db_size)
- VERIFY(0 == sa_resize_spill(hdl,
+ VERIFY0(sa_resize_spill(hdl,
BUF_SPACE_NEEDED(spill_used, spillhdrsize), tx));
}
@@ -791,7 +791,7 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
hdl->sa_bonus_tab = NULL;
}
if (!sa->sa_force_spill)
- VERIFY(0 == sa_build_index(hdl, SA_BONUS));
+ VERIFY0(sa_build_index(hdl, SA_BONUS));
if (hdl->sa_spill) {
sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
if (!spilling) {
@@ -801,10 +801,10 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
dmu_buf_rele(hdl->sa_spill, NULL);
hdl->sa_spill = NULL;
hdl->sa_spill_tab = NULL;
- VERIFY(0 == dmu_rm_spill(hdl->sa_os,
+ VERIFY0(dmu_rm_spill(hdl->sa_os,
sa_handle_object(hdl), tx));
} else {
- VERIFY(0 == sa_build_index(hdl, SA_SPILL));
+ VERIFY0(sa_build_index(hdl, SA_SPILL));
}
}
@@ -1733,10 +1733,10 @@ sa_add_projid(sa_handle_t *hdl, dmu_tx_t *tx, uint64_t projid)
NULL, dxattr_obj, dxattr_size);
}
- VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
- VERIFY(sa_replace_all_by_template_locked(hdl, attrs, count, tx) == 0);
+ VERIFY0(dmu_set_bonustype(db, DMU_OT_SA, tx));
+ VERIFY0(sa_replace_all_by_template_locked(hdl, attrs, count, tx));
if (znode_acl.z_acl_extern_obj) {
- VERIFY(0 == dmu_object_free(zfsvfs->z_os,
+ VERIFY0(dmu_object_free(zfsvfs->z_os,
znode_acl.z_acl_extern_obj, tx));
}
@@ -1858,7 +1858,7 @@ sa_attr_register_sync(sa_handle_t *hdl, dmu_tx_t *tx)
continue;
ATTR_ENCODE(attr_value, tb[i].sa_attr, tb[i].sa_length,
tb[i].sa_byteswap);
- VERIFY(0 == zap_update(hdl->sa_os, sa->sa_reg_attr_obj,
+ VERIFY0(zap_update(hdl->sa_os, sa->sa_reg_attr_obj,
tb[i].sa_name, 8, 1, &attr_value, tx));
tb[i].sa_registered = B_TRUE;
}
@@ -2013,7 +2013,7 @@ sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
* Only a variable-sized attribute can be
* replaced here, and its size must be changing.
*/
- ASSERT3U(reg_length, ==, 0);
+ ASSERT0(reg_length);
ASSERT3U(length, !=, buflen);
SA_ADD_BULK_ATTR(attr_desc, j, attr,
locator, datastart, buflen);
diff --git a/sys/contrib/openzfs/module/zfs/spa.c b/sys/contrib/openzfs/module/zfs/spa.c
index 5ecb175fbd63..b3bb46da263b 100644
--- a/sys/contrib/openzfs/module/zfs/spa.c
+++ b/sys/contrib/openzfs/module/zfs/spa.c
@@ -426,10 +426,10 @@ spa_prop_add_user(nvlist_t *nvl, const char *propname, char *strval,
{
nvlist_t *propval;
- VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
- VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
- VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
- VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
+ VERIFY0(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP));
+ VERIFY0(nvlist_add_uint64(propval, ZPROP_SOURCE, src));
+ VERIFY0(nvlist_add_string(propval, ZPROP_VALUE, strval));
+ VERIFY0(nvlist_add_nvlist(nvl, propname, propval));
nvlist_free(propval);
}
@@ -965,7 +965,7 @@ spa_prop_set(spa_t *spa, nvlist_t *nvp)
uint64_t ver = 0;
if (prop == ZPOOL_PROP_VERSION) {
- VERIFY(nvpair_value_uint64(elem, &ver) == 0);
+ VERIFY0(nvpair_value_uint64(elem, &ver));
} else {
ASSERT(zpool_prop_feature(nvpair_name(elem)));
ver = SPA_VERSION_FEATURES;
@@ -1295,7 +1295,7 @@ spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
if (tqs->stqs_taskq == NULL) {
- ASSERT3U(tqs->stqs_count, ==, 0);
+ ASSERT0(tqs->stqs_count);
return;
}
@@ -1836,9 +1836,9 @@ static void
spa_deactivate(spa_t *spa)
{
ASSERT(spa->spa_sync_on == B_FALSE);
- ASSERT(spa->spa_dsl_pool == NULL);
- ASSERT(spa->spa_root_vdev == NULL);
- ASSERT(spa->spa_async_zio_root == NULL);
+ ASSERT0P(spa->spa_dsl_pool);
+ ASSERT0P(spa->spa_root_vdev);
+ ASSERT0P(spa->spa_async_zio_root);
ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
spa_evicting_os_wait(spa);
@@ -2021,7 +2021,7 @@ spa_unload_log_sm_flush_all(spa_t *spa)
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
- ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
+ ASSERT0(spa->spa_log_flushall_txg);
spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
dmu_tx_commit(tx);
@@ -2280,7 +2280,7 @@ spa_unload(spa_t *spa)
*/
if (spa->spa_root_vdev)
vdev_free(spa->spa_root_vdev);
- ASSERT(spa->spa_root_vdev == NULL);
+ ASSERT0P(spa->spa_root_vdev);
/*
* Close the dsl pool.
@@ -2418,8 +2418,8 @@ spa_load_spares(spa_t *spa)
spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++) {
- VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
- VDEV_ALLOC_SPARE) == 0);
+ VERIFY0(spa_config_parse(spa, &vd, spares[i], NULL, 0,
+ VDEV_ALLOC_SPARE));
ASSERT(vd != NULL);
spa->spa_spares.sav_vdevs[i] = vd;
@@ -2546,8 +2546,8 @@ spa_load_l2cache(spa_t *spa)
/*
* Create new vdev
*/
- VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
- VDEV_ALLOC_L2CACHE) == 0);
+ VERIFY0(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
+ VDEV_ALLOC_L2CACHE));
ASSERT(vd != NULL);
newvdevs[i] = vd;
@@ -2799,7 +2799,7 @@ spa_passivate_log(spa_t *spa)
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
- ASSERT3P(tvd->vdev_log_mg, ==, NULL);
+ ASSERT0P(tvd->vdev_log_mg);
metaslab_group_passivate(tvd->vdev_mg);
slog_found = B_TRUE;
}
@@ -2822,7 +2822,7 @@ spa_activate_log(spa_t *spa)
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
- ASSERT3P(tvd->vdev_log_mg, ==, NULL);
+ ASSERT0P(tvd->vdev_log_mg);
metaslab_group_activate(tvd->vdev_mg);
}
}
@@ -3259,7 +3259,7 @@ spa_livelist_delete_cb(void *arg, zthr_t *z)
static void
spa_start_livelist_destroy_thread(spa_t *spa)
{
- ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
+ ASSERT0P(spa->spa_livelist_delete_zthr);
spa->spa_livelist_delete_zthr =
zthr_create("z_livelist_destroy",
spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa,
@@ -3275,7 +3275,7 @@ static int
livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
- ASSERT(tx == NULL);
+ ASSERT0P(tx);
livelist_new_arg_t *lna = arg;
if (bp_freed) {
bplist_append(lna->frees, bp);
@@ -3469,7 +3469,7 @@ spa_start_livelist_condensing_thread(spa_t *spa)
spa->spa_to_condense.syncing = B_FALSE;
spa->spa_to_condense.cancelled = B_FALSE;
- ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL);
+ ASSERT0P(spa->spa_livelist_condense_zthr);
spa->spa_livelist_condense_zthr =
zthr_create("z_livelist_condense",
spa_livelist_condense_cb_check,
@@ -3486,7 +3486,7 @@ spa_spawn_aux_threads(spa_t *spa)
spa_start_livelist_destroy_thread(spa);
spa_start_livelist_condensing_thread(spa);
- ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
+ ASSERT0P(spa->spa_checkpoint_discard_zthr);
spa->spa_checkpoint_discard_zthr =
zthr_create("z_checkpoint_discard",
spa_checkpoint_discard_thread_check,
@@ -4091,11 +4091,11 @@ spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
nvlist_free(spa->spa_load_info);
spa->spa_load_info = fnvlist_alloc();
- ASSERT(spa->spa_comment == NULL);
+ ASSERT0P(spa->spa_comment);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
spa->spa_comment = spa_strdup(comment);
- ASSERT(spa->spa_compatibility == NULL);
+ ASSERT0P(spa->spa_compatibility);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY,
&compatibility) == 0)
spa->spa_compatibility = spa_strdup(compatibility);
@@ -5913,7 +5913,7 @@ spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
nvlist_free(config);
if (state == SPA_LOAD_RECOVER) {
- ASSERT3P(loadinfo, ==, NULL);
+ ASSERT0P(loadinfo);
spa_import_progress_remove(spa_guid(spa));
return (rewind_error);
} else {
@@ -9091,7 +9091,7 @@ spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
int
spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
{
- ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
+ ASSERT0(spa_config_held(spa, SCL_ALL, RW_WRITER));
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
@@ -9102,7 +9102,7 @@ spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
int
spa_scan_stop(spa_t *spa)
{
- ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
+ ASSERT0(spa_config_held(spa, SCL_ALL, RW_WRITER));
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
@@ -9119,7 +9119,7 @@ int
spa_scan_range(spa_t *spa, pool_scan_func_t func, uint64_t txgstart,
uint64_t txgend)
{
- ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
+ ASSERT0(spa_config_held(spa, SCL_ALL, RW_WRITER));
if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
return (SET_ERROR(ENOTSUP));
@@ -9548,7 +9548,7 @@ spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
{
zio_t *zio = zio_root(spa, NULL, NULL, 0);
bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
- VERIFY(zio_wait(zio) == 0);
+ VERIFY0(zio_wait(zio));
}
/*
@@ -9587,7 +9587,7 @@ spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
size_t nvsize = 0;
dmu_buf_t *db;
- VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
+ VERIFY0(nvlist_size(nv, &nvsize, NV_ENCODE_XDR));
/*
* Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
@@ -9597,15 +9597,15 @@ spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
packed = vmem_alloc(bufsize, KM_SLEEP);
- VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
- KM_SLEEP) == 0);
+ VERIFY0(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
+ KM_SLEEP));
memset(packed + nvsize, 0, bufsize - nvsize);
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
vmem_free(packed, bufsize);
- VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
+ VERIFY0(dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
dmu_buf_will_dirty(db, tx);
*(uint64_t *)db->db_data = nvsize;
dmu_buf_rele(db, FTAG);
@@ -10541,7 +10541,7 @@ spa_sync_tq_create(spa_t *spa, const char *name)
{
kthread_t **kthreads;
- ASSERT(spa->spa_sync_tq == NULL);
+ ASSERT0P(spa->spa_sync_tq);
ASSERT3S(spa->spa_alloc_count, <=, boot_ncpus);
/*
diff --git a/sys/contrib/openzfs/module/zfs/spa_misc.c b/sys/contrib/openzfs/module/zfs/spa_misc.c
index 2eba8362a166..cce772eae598 100644
--- a/sys/contrib/openzfs/module/zfs/spa_misc.c
+++ b/sys/contrib/openzfs/module/zfs/spa_misc.c
@@ -471,9 +471,9 @@ spa_config_lock_destroy(spa_t *spa)
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_destroy(&scl->scl_lock);
cv_destroy(&scl->scl_cv);
- ASSERT(scl->scl_writer == NULL);
- ASSERT(scl->scl_write_wanted == 0);
- ASSERT(scl->scl_count == 0);
+ ASSERT0P(scl->scl_writer);
+ ASSERT0(scl->scl_write_wanted);
+ ASSERT0(scl->scl_count);
}
}
@@ -784,24 +784,23 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
list_insert_head(&spa->spa_config_list, dp);
- VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
- KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, KM_SLEEP));
if (config != NULL) {
nvlist_t *features;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) == 0) {
- VERIFY(nvlist_dup(features, &spa->spa_label_features,
- 0) == 0);
+ VERIFY0(nvlist_dup(features,
+ &spa->spa_label_features, 0));
}
- VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
+ VERIFY0(nvlist_dup(config, &spa->spa_config, 0));
}
if (spa->spa_label_features == NULL) {
- VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
- KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
+ KM_SLEEP));
}
spa->spa_min_ashift = INT_MAX;
diff --git a/sys/contrib/openzfs/module/zfs/spa_stats.c b/sys/contrib/openzfs/module/zfs/spa_stats.c
index 6d7cabcf766d..2c87122a0aa9 100644
--- a/sys/contrib/openzfs/module/zfs/spa_stats.c
+++ b/sys/contrib/openzfs/module/zfs/spa_stats.c
@@ -718,7 +718,7 @@ spa_mmp_history_set(spa_t *spa, uint64_t mmp_node_id, int io_error,
for (smh = list_tail(&shl->procfs_list.pl_list); smh != NULL;
smh = list_prev(&shl->procfs_list.pl_list, smh)) {
if (smh->mmp_node_id == mmp_node_id) {
- ASSERT(smh->io_error == 0);
+ ASSERT0(smh->io_error);
smh->io_error = io_error;
smh->duration = duration;
error = 0;
diff --git a/sys/contrib/openzfs/module/zfs/space_map.c b/sys/contrib/openzfs/module/zfs/space_map.c
index c429e0edd168..5f24963f2291 100644
--- a/sys/contrib/openzfs/module/zfs/space_map.c
+++ b/sys/contrib/openzfs/module/zfs/space_map.c
@@ -817,7 +817,7 @@ space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
space_map_t *sm;
int error;
- ASSERT(*smp == NULL);
+ ASSERT0P(*smp);
ASSERT(os != NULL);
ASSERT(object != 0);
diff --git a/sys/contrib/openzfs/module/zfs/space_reftree.c b/sys/contrib/openzfs/module/zfs/space_reftree.c
index 9b2d5ed31dc9..889980e08c06 100644
--- a/sys/contrib/openzfs/module/zfs/space_reftree.c
+++ b/sys/contrib/openzfs/module/zfs/space_reftree.c
@@ -149,6 +149,6 @@ space_reftree_generate_map(avl_tree_t *t, zfs_range_tree_t *rt, int64_t minref)
}
}
}
- ASSERT(refcnt == 0);
+ ASSERT0(refcnt);
ASSERT(start == -1ULL);
}
diff --git a/sys/contrib/openzfs/module/zfs/vdev.c b/sys/contrib/openzfs/module/zfs/vdev.c
index 70b14fb9b2c8..9cf35e379000 100644
--- a/sys/contrib/openzfs/module/zfs/vdev.c
+++ b/sys/contrib/openzfs/module/zfs/vdev.c
@@ -554,7 +554,7 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd)
vdev_t **newchild;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
- ASSERT(cvd->vdev_parent == NULL);
+ ASSERT0P(cvd->vdev_parent);
cvd->vdev_parent = pvd;
@@ -578,7 +578,7 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd)
pvd->vdev_nonrot &= cvd->vdev_nonrot;
cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
- ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
+ ASSERT0P(cvd->vdev_top->vdev_parent->vdev_parent);
/*
* Walk up all ancestors to update guid sum.
@@ -1101,10 +1101,10 @@ vdev_free(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
- ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
- ASSERT3P(vd->vdev_trim_thread, ==, NULL);
- ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
- ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
+ ASSERT0P(vd->vdev_initialize_thread);
+ ASSERT0P(vd->vdev_trim_thread);
+ ASSERT0P(vd->vdev_autotrim_thread);
+ ASSERT0P(vd->vdev_rebuild_thread);
/*
* Scan queues are normally destroyed at the end of a scan. If the
@@ -1133,7 +1133,7 @@ vdev_free(vdev_t *vd)
for (int c = 0; c < vd->vdev_children; c++)
vdev_free(vd->vdev_child[c]);
- ASSERT(vd->vdev_child == NULL);
+ ASSERT0P(vd->vdev_child);
ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
if (vd->vdev_ops->vdev_op_fini != NULL)
@@ -1162,7 +1162,7 @@ vdev_free(vdev_t *vd)
*/
vdev_remove_child(vd->vdev_parent, vd);
- ASSERT(vd->vdev_parent == NULL);
+ ASSERT0P(vd->vdev_parent);
ASSERT(!list_link_active(&vd->vdev_leaf_node));
/*
@@ -1309,9 +1309,9 @@ vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
ASSERT0(tvd->vdev_indirect_config.vic_births_object);
ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
- ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
- ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
- ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
+ ASSERT0P(tvd->vdev_indirect_mapping);
+ ASSERT0P(tvd->vdev_indirect_births);
+ ASSERT0P(tvd->vdev_obsolete_sm);
ASSERT0(tvd->vdev_noalloc);
ASSERT0(tvd->vdev_removing);
ASSERT0(tvd->vdev_rebuilding);
@@ -1464,7 +1464,7 @@ vdev_remove_parent(vdev_t *cvd)
if (cvd == cvd->vdev_top)
vdev_top_transfer(mvd, cvd);
- ASSERT(mvd->vdev_children == 0);
+ ASSERT0(mvd->vdev_children);
vdev_free(mvd);
}
@@ -2134,14 +2134,14 @@ vdev_open(vdev_t *vd)
* faulted, bail out of the open.
*/
if (!vd->vdev_removed && vd->vdev_faulted) {
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
} else if (vd->vdev_offline) {
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
return (SET_ERROR(ENXIO));
}
@@ -2197,7 +2197,7 @@ vdev_open(vdev_t *vd)
* the vdev is accessible. If we're faulted, bail.
*/
if (vd->vdev_faulted) {
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
@@ -2206,7 +2206,7 @@ vdev_open(vdev_t *vd)
}
if (vd->vdev_degraded) {
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_ERR_EXCEEDED);
} else {
@@ -3945,7 +3945,7 @@ vdev_load(vdev_t *vd)
if (error == 0 && checkpoint_sm_obj != 0) {
objset_t *mos = spa_meta_objset(vd->vdev_spa);
ASSERT(vd->vdev_asize != 0);
- ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
+ ASSERT0P(vd->vdev_checkpoint_sm);
error = space_map_open(&vd->vdev_checkpoint_sm,
mos, checkpoint_sm_obj, 0, vd->vdev_asize,
@@ -3993,7 +3993,7 @@ vdev_load(vdev_t *vd)
if (error == 0 && obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
ASSERT(vd->vdev_asize != 0);
- ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
+ ASSERT0P(vd->vdev_obsolete_sm);
if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
obsolete_sm_object, 0, vd->vdev_asize, 0))) {
@@ -4521,7 +4521,7 @@ top:
/*
* Prevent any future allocations.
*/
- ASSERT3P(tvd->vdev_log_mg, ==, NULL);
+ ASSERT0P(tvd->vdev_log_mg);
metaslab_group_passivate(mg);
(void) spa_vdev_state_exit(spa, vd, 0);
@@ -5194,7 +5194,7 @@ vdev_stat_update(zio_t *zio, uint64_t psize)
int64_t
vdev_deflated_space(vdev_t *vd, int64_t space)
{
- ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
+ ASSERT0((space & (SPA_MINBLOCKSIZE-1)));
ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
@@ -5286,8 +5286,8 @@ vdev_config_dirty(vdev_t *vd)
if (nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
- VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
- ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
+ VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config,
+ ZPOOL_CONFIG_SPARES, &aux, &naux));
}
ASSERT(c < naux);
@@ -5675,7 +5675,7 @@ vdev_expand(vdev_t *vd, uint64_t txg)
(vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
- VERIFY(vdev_metaslab_init(vd, txg) == 0);
+ VERIFY0(vdev_metaslab_init(vd, txg));
vdev_config_dirty(vd);
}
}
diff --git a/sys/contrib/openzfs/module/zfs/vdev_draid.c b/sys/contrib/openzfs/module/zfs/vdev_draid.c
index feec5fd3ce17..a05289102af2 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_draid.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_draid.c
@@ -477,7 +477,7 @@ vdev_draid_generate_perms(const draid_map_t *map, uint8_t **permsp)
VERIFY3U(map->dm_children, <=, VDEV_DRAID_MAX_CHILDREN);
VERIFY3U(map->dm_seed, !=, 0);
VERIFY3U(map->dm_nperms, !=, 0);
- VERIFY3P(map->dm_perms, ==, NULL);
+ VERIFY0P(map->dm_perms);
#ifdef _KERNEL
/*
@@ -590,7 +590,7 @@ vdev_draid_psize_to_asize(vdev_t *vd, uint64_t psize, uint64_t txg)
uint64_t asize = (rows * vdc->vdc_groupwidth) << ashift;
ASSERT3U(asize, !=, 0);
- ASSERT3U(asize % (vdc->vdc_groupwidth), ==, 0);
+ ASSERT0(asize % (vdc->vdc_groupwidth));
return (asize);
}
@@ -704,7 +704,7 @@ vdev_draid_map_alloc_scrub(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
uint64_t skip_off = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
- ASSERT3P(rr->rr_abd_empty, ==, NULL);
+ ASSERT0P(rr->rr_abd_empty);
if (rr->rr_nempty > 0) {
rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
@@ -793,7 +793,7 @@ vdev_draid_map_alloc_empty(zio_t *zio, raidz_row_t *rr)
uint64_t skip_off = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
- ASSERT3P(rr->rr_abd_empty, ==, NULL);
+ ASSERT0P(rr->rr_abd_empty);
if (rr->rr_nempty > 0) {
rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
@@ -807,7 +807,7 @@ vdev_draid_map_alloc_empty(zio_t *zio, raidz_row_t *rr)
/* empty data column (small read), add a skip sector */
ASSERT3U(skip_size, ==, parity_size);
ASSERT3U(rr->rr_nempty, !=, 0);
- ASSERT3P(rc->rc_abd, ==, NULL);
+ ASSERT0P(rc->rc_abd);
rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
skip_off, skip_size);
skip_off += skip_size;
@@ -1623,7 +1623,7 @@ vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
SPA_MAXBLOCKSIZE);
ASSERT3U(vdev_draid_get_astart(vd, start), ==, start);
- ASSERT3U(asize % (vdc->vdc_groupwidth << ashift), ==, 0);
+ ASSERT0(asize % (vdc->vdc_groupwidth << ashift));
/* Chunks must evenly span all data columns in the group. */
psize = (((psize >> ashift) / ndata) * ndata) << ashift;
@@ -1634,7 +1634,7 @@ vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
uint64_t left = vdev_draid_group_to_offset(vd, group + 1) - start;
chunk_size = MIN(chunk_size, left);
- ASSERT3U(chunk_size % (vdc->vdc_groupwidth << ashift), ==, 0);
+ ASSERT0(chunk_size % (vdc->vdc_groupwidth << ashift));
ASSERT3U(vdev_draid_offset_to_group(vd, start), ==,
vdev_draid_offset_to_group(vd, start + chunk_size - 1));
@@ -2272,7 +2272,7 @@ vdev_draid_init(spa_t *spa, nvlist_t *nv, void **tsd)
ASSERT3U(vdc->vdc_groupwidth, <=, vdc->vdc_ndisks);
ASSERT3U(vdc->vdc_groupsz, >=, 2 * VDEV_DRAID_ROWHEIGHT);
ASSERT3U(vdc->vdc_devslicesz, >=, VDEV_DRAID_ROWHEIGHT);
- ASSERT3U(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT, ==, 0);
+ ASSERT0(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT);
ASSERT3U((vdc->vdc_groupwidth * vdc->vdc_ngroups) %
vdc->vdc_ndisks, ==, 0);
diff --git a/sys/contrib/openzfs/module/zfs/vdev_indirect.c b/sys/contrib/openzfs/module/zfs/vdev_indirect.c
index 9fc71fa0e03e..7538f471e63c 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_indirect.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_indirect.c
@@ -792,7 +792,7 @@ spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
sizeof (*scip) / sizeof (uint64_t), scip, tx));
- ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
+ ASSERT0P(spa->spa_condensing_indirect);
spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
@@ -882,7 +882,7 @@ spa_condense_fini(spa_t *spa)
void
spa_start_indirect_condensing_thread(spa_t *spa)
{
- ASSERT3P(spa->spa_condense_zthr, ==, NULL);
+ ASSERT0P(spa->spa_condense_zthr);
spa->spa_condense_zthr = zthr_create("z_indirect_condense",
spa_condense_indirect_thread_check,
spa_condense_indirect_thread, spa, minclsyspri);
@@ -1504,7 +1504,7 @@ vdev_indirect_splits_checksum_validate(indirect_vsd_t *iv, zio_t *zio)
is != NULL; is = list_next(&iv->iv_splits, is)) {
ASSERT3P(is->is_good_child->ic_data, !=, NULL);
- ASSERT3P(is->is_good_child->ic_duplicate, ==, NULL);
+ ASSERT0P(is->is_good_child->ic_duplicate);
abd_copy_off(zio->io_abd, is->is_good_child->ic_data,
is->is_split_offset, 0, is->is_size);
diff --git a/sys/contrib/openzfs/module/zfs/vdev_initialize.c b/sys/contrib/openzfs/module/zfs/vdev_initialize.c
index 9243c76e810d..27188c46e561 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_initialize.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_initialize.c
@@ -632,7 +632,7 @@ vdev_initialize(vdev_t *vd)
ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
- ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
+ ASSERT0P(vd->vdev_initialize_thread);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_initialize_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
@@ -653,7 +653,7 @@ vdev_uninitialize(vdev_t *vd)
ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
- ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
+ ASSERT0P(vd->vdev_initialize_thread);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_initialize_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
@@ -672,7 +672,7 @@ vdev_initialize_stop_wait_impl(vdev_t *vd)
while (vd->vdev_initialize_thread != NULL)
cv_wait(&vd->vdev_initialize_cv, &vd->vdev_initialize_lock);
- ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
+ ASSERT0P(vd->vdev_initialize_thread);
vd->vdev_initialize_exit_wanted = B_FALSE;
}
diff --git a/sys/contrib/openzfs/module/zfs/vdev_label.c b/sys/contrib/openzfs/module/zfs/vdev_label.c
index 6baa6236aac2..c44f654b0261 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_label.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_label.c
@@ -163,7 +163,7 @@ uint64_t
vdev_label_offset(uint64_t psize, int l, uint64_t offset)
{
ASSERT(offset < sizeof (vdev_label_t));
- ASSERT(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t) == 0);
+ ASSERT0(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t));
return (offset + l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
@@ -768,12 +768,12 @@ vdev_top_config_generate(spa_t *spa, nvlist_t *config)
}
if (idx) {
- VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY,
- array, idx) == 0);
+ VERIFY0(nvlist_add_uint64_array(config,
+ ZPOOL_CONFIG_HOLE_ARRAY, array, idx));
}
- VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
- rvd->vdev_children) == 0);
+ VERIFY0(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
+ rvd->vdev_children));
kmem_free(array, rvd->vdev_children * sizeof (uint64_t));
}
@@ -1189,8 +1189,8 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
* vdev uses as described above, and automatically expires if we
* fail.
*/
- VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
- crtxg) == 0);
+ VERIFY0(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
+ crtxg));
}
buf = vp->vp_nvlist;
diff --git a/sys/contrib/openzfs/module/zfs/vdev_queue.c b/sys/contrib/openzfs/module/zfs/vdev_queue.c
index aa41f7066036..c12713b107bf 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_queue.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_queue.c
@@ -780,7 +780,7 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
if (dio->io_flags & ZIO_FLAG_NODATA) {
/* allocate a buffer for a write gap */
ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
- ASSERT3P(dio->io_abd, ==, NULL);
+ ASSERT0P(dio->io_abd);
abd_gang_add(aio->io_abd,
abd_get_zeros(dio->io_size), B_TRUE);
} else {
diff --git a/sys/contrib/openzfs/module/zfs/vdev_raidz.c b/sys/contrib/openzfs/module/zfs/vdev_raidz.c
index 210cdcab1ecc..b597d6daefde 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_raidz.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_raidz.c
@@ -412,7 +412,7 @@ vdev_raidz_map_free(raidz_map_t *rm)
rm->rm_nphys_cols);
}
- ASSERT3P(rm->rm_lr, ==, NULL);
+ ASSERT0P(rm->rm_lr);
kmem_free(rm, offsetof(raidz_map_t, rm_row[rm->rm_nrows]));
}
@@ -2431,7 +2431,7 @@ raidz_start_skip_writes(zio_t *zio)
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (rc->rc_size != 0)
continue;
- ASSERT3P(rc->rc_abd, ==, NULL);
+ ASSERT0P(rc->rc_abd);
ASSERT3U(rc->rc_offset, <,
cvd->vdev_psize - VDEV_LABEL_END_SIZE);
@@ -3363,7 +3363,7 @@ vdev_raidz_io_done_reconstruct_known_missing(zio_t *zio, raidz_map_t *rm,
* also have been fewer parity errors than parity
* columns or, again, we wouldn't be in this code path.
*/
- ASSERT(parity_untried == 0);
+ ASSERT0(parity_untried);
ASSERT(parity_errors < rr->rr_firstdatacol);
/*
@@ -4743,7 +4743,7 @@ spa_raidz_expand_thread(void *arg, zthr_t *zthr)
void
spa_start_raidz_expansion_thread(spa_t *spa)
{
- ASSERT3P(spa->spa_raidz_expand_zthr, ==, NULL);
+ ASSERT0P(spa->spa_raidz_expand_zthr);
spa->spa_raidz_expand_zthr = zthr_create("raidz_expand",
spa_raidz_expand_thread_check, spa_raidz_expand_thread,
spa, defclsyspri);
diff --git a/sys/contrib/openzfs/module/zfs/vdev_rebuild.c b/sys/contrib/openzfs/module/zfs/vdev_rebuild.c
index cf259788ccf4..47b3b9921abe 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_rebuild.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_rebuild.c
@@ -256,7 +256,7 @@ vdev_rebuild_initiate_sync(void *arg, dmu_tx_t *tx)
"vdev_id=%llu vdev_guid=%llu started",
(u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
- ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
+ ASSERT0P(vd->vdev_rebuild_thread);
vd->vdev_rebuild_thread = thread_create(NULL, 0,
vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
@@ -413,7 +413,7 @@ vdev_rebuild_reset_sync(void *arg, dmu_tx_t *tx)
mutex_enter(&vd->vdev_rebuild_lock);
ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE);
- ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
+ ASSERT0P(vd->vdev_rebuild_thread);
vrp->vrp_last_offset = 0;
vrp->vrp_min_txg = 0;
diff --git a/sys/contrib/openzfs/module/zfs/vdev_removal.c b/sys/contrib/openzfs/module/zfs/vdev_removal.c
index 3887be4bd548..2f7a739da241 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_removal.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_removal.c
@@ -344,10 +344,10 @@ spa_vdev_remove_aux(nvlist_t *config, const char *name, nvlist_t **dev,
for (int i = 0, j = 0; i < count; i++) {
if (dev[i] == dev_to_remove)
continue;
- VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
+ VERIFY0(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP));
}
- VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
+ VERIFY0(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY));
fnvlist_add_nvlist_array(config, name, (const nvlist_t * const *)newdev,
count - 1);
@@ -423,7 +423,7 @@ vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx)
svr = spa_vdev_removal_create(vd);
ASSERT(vd->vdev_removing);
- ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
+ ASSERT0P(vd->vdev_indirect_mapping);
spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
@@ -529,7 +529,7 @@ vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx)
* but in any case only when there are outstanding free i/os, which
* there are not).
*/
- ASSERT3P(spa->spa_vdev_removal, ==, NULL);
+ ASSERT0P(spa->spa_vdev_removal);
spa->spa_vdev_removal = svr;
svr->svr_thread = thread_create(NULL, 0,
spa_vdev_remove_thread, spa, 0, &p0, TS_RUN, minclsyspri);
@@ -1362,11 +1362,11 @@ vdev_remove_complete(spa_t *spa)
txg_wait_synced(spa->spa_dsl_pool, 0);
txg = spa_vdev_enter(spa);
vdev_t *vd = vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
- ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
- ASSERT3P(vd->vdev_trim_thread, ==, NULL);
- ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
+ ASSERT0P(vd->vdev_initialize_thread);
+ ASSERT0P(vd->vdev_trim_thread);
+ ASSERT0P(vd->vdev_autotrim_thread);
vdev_rebuild_stop_wait(vd);
- ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
+ ASSERT0P(vd->vdev_rebuild_thread);
sysevent_t *ev = spa_event_create(spa, vd, NULL,
ESC_ZFS_VDEV_REMOVE_DEV);
@@ -1868,7 +1868,7 @@ spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
objset_t *mos = spa->spa_meta_objset;
- ASSERT3P(svr->svr_thread, ==, NULL);
+ ASSERT0P(svr->svr_thread);
spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
@@ -2076,7 +2076,7 @@ spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
ASSERT(vd->vdev_islog);
ASSERT(vd == vd->vdev_top);
- ASSERT3P(vd->vdev_log_mg, ==, NULL);
+ ASSERT0P(vd->vdev_log_mg);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
@@ -2112,7 +2112,7 @@ spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
if (error != 0) {
metaslab_group_activate(mg);
- ASSERT3P(vd->vdev_log_mg, ==, NULL);
+ ASSERT0P(vd->vdev_log_mg);
return (error);
}
ASSERT0(vd->vdev_stat.vs_alloc);
diff --git a/sys/contrib/openzfs/module/zfs/vdev_trim.c b/sys/contrib/openzfs/module/zfs/vdev_trim.c
index fc8d5b8e9a8a..eee18b367909 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_trim.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_trim.c
@@ -1010,7 +1010,7 @@ vdev_trim(vdev_t *vd, uint64_t rate, boolean_t partial, boolean_t secure)
ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
- ASSERT3P(vd->vdev_trim_thread, ==, NULL);
+ ASSERT0P(vd->vdev_trim_thread);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_trim_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
@@ -1032,7 +1032,7 @@ vdev_trim_stop_wait_impl(vdev_t *vd)
while (vd->vdev_trim_thread != NULL)
cv_wait(&vd->vdev_trim_cv, &vd->vdev_trim_lock);
- ASSERT3P(vd->vdev_trim_thread, ==, NULL);
+ ASSERT0P(vd->vdev_trim_thread);
vd->vdev_trim_exit_wanted = B_FALSE;
}
@@ -1539,7 +1539,7 @@ vdev_autotrim_stop_wait(vdev_t *tvd)
cv_wait(&tvd->vdev_autotrim_cv,
&tvd->vdev_autotrim_lock);
- ASSERT3P(tvd->vdev_autotrim_thread, ==, NULL);
+ ASSERT0P(tvd->vdev_autotrim_thread);
tvd->vdev_autotrim_exit_wanted = B_FALSE;
}
mutex_exit(&tvd->vdev_autotrim_lock);
@@ -1712,7 +1712,7 @@ vdev_trim_l2arc(spa_t *spa)
mutex_enter(&vd->vdev_trim_lock);
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
- ASSERT3P(vd->vdev_trim_thread, ==, NULL);
+ ASSERT0P(vd->vdev_trim_thread);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_trim_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
diff --git a/sys/contrib/openzfs/module/zfs/zap.c b/sys/contrib/openzfs/module/zfs/zap.c
index 0896690c97e3..3e4e997798a3 100644
--- a/sys/contrib/openzfs/module/zfs/zap.c
+++ b/sys/contrib/openzfs/module/zfs/zap.c
@@ -921,7 +921,7 @@ fzap_add_cd(zap_name_t *zn,
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
ASSERT(!zap->zap_ismicro);
- ASSERT(fzap_check(zn, integer_size, num_integers) == 0);
+ ASSERT0(fzap_check(zn, integer_size, num_integers));
err = zap_deref_leaf(zap, zn->zn_hash, tx, RW_WRITER, &l);
if (err != 0)
@@ -1386,7 +1386,7 @@ again:
}
err = zap_entry_read_name(zap, &zeh,
za->za_name_len, za->za_name);
- ASSERT(err == 0);
+ ASSERT0(err);
za->za_normalization_conflict =
zap_entry_normalization_conflict(&zeh,
@@ -1546,7 +1546,7 @@ zap_shrink(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx)
boolean_t trunc = B_FALSE;
int err = 0;
- ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_nentries, ==, 0);
+ ASSERT0(zap_leaf_phys(l)->l_hdr.lh_nentries);
ASSERT3U(prefix_len, <=, zap_f_phys(zap)->zap_ptrtbl.zt_shift);
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
ASSERT3U(ZAP_HASH_IDX(hash, prefix_len), ==, prefix);
@@ -1564,7 +1564,7 @@ zap_shrink(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx)
uint64_t sl_hash = ZAP_PREFIX_HASH(sl_prefix, prefix_len);
int slbit = prefix & 1;
- ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_nentries, ==, 0);
+ ASSERT0(zap_leaf_phys(l)->l_hdr.lh_nentries);
/*
* Check if there is a sibling by reading ptrtbl ptrs.
diff --git a/sys/contrib/openzfs/module/zfs/zap_micro.c b/sys/contrib/openzfs/module/zfs/zap_micro.c
index 411b1a9db5ab..ea4e3117a8b9 100644
--- a/sys/contrib/openzfs/module/zfs/zap_micro.c
+++ b/sys/contrib/openzfs/module/zfs/zap_micro.c
@@ -346,7 +346,7 @@ zap_name_alloc_uint64(zap_t *zap, const uint64_t *key, int numints)
{
zap_name_t *zn = kmem_cache_alloc(zap_name_cache, KM_SLEEP);
- ASSERT(zap->zap_normflags == 0);
+ ASSERT0(zap->zap_normflags);
zn->zn_zap = zap;
zn->zn_key_intlen = sizeof (*key);
zn->zn_key_orig = zn->zn_key_norm = key;
@@ -1876,7 +1876,7 @@ zap_cursor_serialize(zap_cursor_t *zc)
return (-1ULL);
if (zc->zc_zap == NULL)
return (zc->zc_serialized);
- ASSERT((zc->zc_hash & zap_maxcd(zc->zc_zap)) == 0);
+ ASSERT0((zc->zc_hash & zap_maxcd(zc->zc_zap)));
ASSERT(zc->zc_cd < zap_maxcd(zc->zc_zap));
/*
@@ -1911,7 +1911,7 @@ zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
* we must add to the existing zc_cd, which may already
* be 1 due to the zap_cursor_advance.
*/
- ASSERT(zc->zc_hash == 0);
+ ASSERT0(zc->zc_hash);
hb = zap_hashbits(zc->zc_zap);
zc->zc_hash = zc->zc_serialized << (64 - hb);
zc->zc_cd += zc->zc_serialized >> hb;
diff --git a/sys/contrib/openzfs/module/zfs/zcp.c b/sys/contrib/openzfs/module/zfs/zcp.c
index 9aecf67fd256..c6684f453e95 100644
--- a/sys/contrib/openzfs/module/zfs/zcp.c
+++ b/sys/contrib/openzfs/module/zfs/zcp.c
@@ -765,7 +765,7 @@ zcp_lua_alloc(void *ud, void *ptr, size_t osize, size_t nsize)
return (NULL);
}
(void) memcpy(luabuf, ptr, osize);
- VERIFY3P(zcp_lua_alloc(ud, ptr, osize, 0), ==, NULL);
+ VERIFY0P(zcp_lua_alloc(ud, ptr, osize, 0));
return (luabuf);
}
}
diff --git a/sys/contrib/openzfs/module/zfs/zfeature.c b/sys/contrib/openzfs/module/zfs/zfeature.c
index 7dfe00d42a08..0816ea134bf3 100644
--- a/sys/contrib/openzfs/module/zfs/zfeature.c
+++ b/sys/contrib/openzfs/module/zfs/zfeature.c
@@ -210,8 +210,8 @@ spa_features_check(spa_t *spa, boolean_t for_write,
za->za_name, 1, MAXPATHLEN, buf) == 0)
desc = buf;
- VERIFY(nvlist_add_string(unsup_feat,
- za->za_name, desc) == 0);
+ VERIFY0(nvlist_add_string(unsup_feat,
+ za->za_name, desc));
}
}
}
diff --git a/sys/contrib/openzfs/module/zfs/zfs_fuid.c b/sys/contrib/openzfs/module/zfs/zfs_fuid.c
index 10a6d289fbf8..2af1efe82e62 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_fuid.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_fuid.c
@@ -112,8 +112,7 @@ zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
uint64_t fuid_size;
ASSERT(fuid_obj != 0);
- VERIFY(0 == dmu_bonus_hold(os, fuid_obj,
- FTAG, &db));
+ VERIFY0(dmu_bonus_hold(os, fuid_obj, FTAG, &db));
fuid_size = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
@@ -125,22 +124,21 @@ zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
int i;
packed = kmem_alloc(fuid_size, KM_SLEEP);
- VERIFY(dmu_read(os, fuid_obj, 0,
- fuid_size, packed, DMU_READ_PREFETCH) == 0);
- VERIFY(nvlist_unpack(packed, fuid_size,
- &nvp, 0) == 0);
- VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
- &fuidnvp, &count) == 0);
+ VERIFY0(dmu_read(os, fuid_obj, 0,
+ fuid_size, packed, DMU_READ_PREFETCH));
+ VERIFY0(nvlist_unpack(packed, fuid_size, &nvp, 0));
+ VERIFY0(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
+ &fuidnvp, &count));
for (i = 0; i != count; i++) {
fuid_domain_t *domnode;
const char *domain;
uint64_t idx;
- VERIFY(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN,
- &domain) == 0);
- VERIFY(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX,
- &idx) == 0);
+ VERIFY0(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN,
+ &domain));
+ VERIFY0(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX,
+ &idx));
domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
@@ -246,35 +244,33 @@ zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
&zfsvfs->z_fuid_obj, tx) == 0);
}
- VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP));
numnodes = avl_numnodes(&zfsvfs->z_fuid_idx);
fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++,
domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) {
- VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
- VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
- domnode->f_idx) == 0);
- VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0);
- VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN,
- domnode->f_ksid->kd_name) == 0);
+ VERIFY0(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP));
+ VERIFY0(nvlist_add_uint64(fuids[i], FUID_IDX,
+ domnode->f_idx));
+ VERIFY0(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0));
+ VERIFY0(nvlist_add_string(fuids[i], FUID_DOMAIN,
+ domnode->f_ksid->kd_name));
}
fnvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
(const nvlist_t * const *)fuids, numnodes);
for (i = 0; i != numnodes; i++)
nvlist_free(fuids[i]);
kmem_free(fuids, numnodes * sizeof (void *));
- VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
+ VERIFY0(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR));
packed = kmem_alloc(nvsize, KM_SLEEP);
- VERIFY(nvlist_pack(nvp, &packed, &nvsize,
- NV_ENCODE_XDR, KM_SLEEP) == 0);
+ VERIFY0(nvlist_pack(nvp, &packed, &nvsize, NV_ENCODE_XDR, KM_SLEEP));
nvlist_free(nvp);
zfsvfs->z_fuid_size = nvsize;
dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
zfsvfs->z_fuid_size, packed, tx);
kmem_free(packed, zfsvfs->z_fuid_size);
- VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
- FTAG, &db));
+ VERIFY0(dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj, FTAG, &db));
dmu_buf_will_dirty(db, tx);
*(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
dmu_buf_rele(db, FTAG);
diff --git a/sys/contrib/openzfs/module/zfs/zfs_ioctl.c b/sys/contrib/openzfs/module/zfs/zfs_ioctl.c
index dcb71229f96a..121b966b9864 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_ioctl.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_ioctl.c
@@ -1493,7 +1493,7 @@ zfs_ioc_pool_create(zfs_cmd_t *zc)
goto pool_props_bad;
(void) nvlist_remove_all(props, ZPOOL_HIDDEN_ARGS);
- VERIFY(nvlist_alloc(&zplprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&zplprops, NV_UNIQUE_NAME, KM_SLEEP));
error = zfs_fill_zplprops_root(version, rootprops,
zplprops, NULL);
if (error != 0)
@@ -2245,7 +2245,7 @@ nvl_add_zplprop(objset_t *os, nvlist_t *props, zfs_prop_t prop)
*/
if ((error = zfs_get_zplprop(os, prop, &value)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(props, zfs_prop_to_name(prop), value) == 0);
+ VERIFY0(nvlist_add_uint64(props, zfs_prop_to_name(prop), value));
return (0);
}
@@ -2280,7 +2280,7 @@ zfs_ioc_objset_zplprops(zfs_cmd_t *zc)
dmu_objset_type(os) == DMU_OST_ZFS) {
nvlist_t *nv;
- VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP));
if ((err = nvl_add_zplprop(os, nv, ZFS_PROP_VERSION)) == 0 &&
(err = nvl_add_zplprop(os, nv, ZFS_PROP_NORMALIZE)) == 0 &&
(err = nvl_add_zplprop(os, nv, ZFS_PROP_UTF8ONLY)) == 0 &&
@@ -2483,7 +2483,7 @@ zfs_prop_set_userquota(const char *dsname, nvpair_t *pair)
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
- VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
+ VERIFY0(nvpair_value_nvlist(pair, &attrs));
if (nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
&pair) != 0)
return (SET_ERROR(EINVAL));
@@ -2538,9 +2538,8 @@ zfs_prop_set_special(const char *dsname, zprop_source_t source,
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
- VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
- VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
- &pair) == 0);
+ VERIFY0(nvpair_value_nvlist(pair, &attrs));
+ VERIFY0(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &pair));
}
/* all special properties are numeric except for keylocation */
@@ -2932,14 +2931,14 @@ props_skip(nvlist_t *props, nvlist_t *skipped, nvlist_t **newprops)
{
nvpair_t *pair;
- VERIFY(nvlist_alloc(newprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(newprops, NV_UNIQUE_NAME, KM_SLEEP));
pair = NULL;
while ((pair = nvlist_next_nvpair(props, pair)) != NULL) {
if (nvlist_exists(skipped, nvpair_name(pair)))
continue;
- VERIFY(nvlist_add_nvpair(*newprops, pair) == 0);
+ VERIFY0(nvlist_add_nvpair(*newprops, pair));
}
}
@@ -3064,11 +3063,11 @@ zfs_ioc_inherit_prop(zfs_cmd_t *zc)
switch (type) {
case PROP_TYPE_STRING:
- VERIFY(0 == nvlist_add_string(dummy, propname, ""));
+ VERIFY0(nvlist_add_string(dummy, propname, ""));
break;
case PROP_TYPE_NUMBER:
case PROP_TYPE_INDEX:
- VERIFY(0 == nvlist_add_uint64(dummy, propname, 0));
+ VERIFY0(nvlist_add_uint64(dummy, propname, 0));
break;
default:
err = SET_ERROR(EINVAL);
@@ -3454,14 +3453,14 @@ zfs_fill_zplprops_impl(objset_t *os, uint64_t zplver,
/*
* Put the version in the zplprops
*/
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_VERSION), zplver) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_VERSION), zplver));
if (norm == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &norm)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_NORMALIZE), norm) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_NORMALIZE), norm));
/*
* If we're normalizing, names must always be valid UTF-8 strings.
@@ -3471,55 +3470,55 @@ zfs_fill_zplprops_impl(objset_t *os, uint64_t zplver,
if (u8 == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &u8)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_UTF8ONLY), u8) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_UTF8ONLY), u8));
if (sense == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_CASE, &sense)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_CASE), sense) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_CASE), sense));
if (duq == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTUSERQUOTA, &duq)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_DEFAULTUSERQUOTA), duq) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_DEFAULTUSERQUOTA), duq));
if (dgq == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTGROUPQUOTA,
&dgq)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_DEFAULTGROUPQUOTA), dgq) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_DEFAULTGROUPQUOTA), dgq));
if (dpq == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTPROJECTQUOTA,
&dpq)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_DEFAULTPROJECTQUOTA), dpq) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_DEFAULTPROJECTQUOTA), dpq));
if (duoq == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTUSEROBJQUOTA,
&duoq)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_DEFAULTUSEROBJQUOTA), duoq) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_DEFAULTUSEROBJQUOTA), duoq));
if (dgoq == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTGROUPOBJQUOTA,
&dgoq)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_DEFAULTGROUPOBJQUOTA), dgoq) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_DEFAULTGROUPOBJQUOTA), dgoq));
if (dpoq == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTPROJECTOBJQUOTA,
&dpoq)) != 0)
return (error);
- VERIFY(nvlist_add_uint64(zplprops,
- zfs_prop_to_name(ZFS_PROP_DEFAULTPROJECTOBJQUOTA), dpoq) == 0);
+ VERIFY0(nvlist_add_uint64(zplprops,
+ zfs_prop_to_name(ZFS_PROP_DEFAULTPROJECTOBJQUOTA), dpoq));
if (is_ci)
*is_ci = (sense == ZFS_CASE_INSENSITIVE);
@@ -3668,8 +3667,8 @@ zfs_ioc_create(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
* file system creation, so go figure them out
* now.
*/
- VERIFY(nvlist_alloc(&zct.zct_zplprops,
- NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&zct.zct_zplprops,
+ NV_UNIQUE_NAME, KM_SLEEP));
error = zfs_fill_zplprops(fsname, nvprops,
zct.zct_zplprops, &is_insensitive);
if (error != 0) {
@@ -4916,9 +4915,8 @@ zfs_check_settable(const char *dsname, nvpair_t *pair, cred_t *cr)
* format.
*/
nvlist_t *attrs;
- VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
- VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
- &pair) == 0);
+ VERIFY0(nvpair_value_nvlist(pair, &attrs));
+ VERIFY0(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &pair));
}
/*
@@ -5103,7 +5101,7 @@ zfs_check_clearable(const char *dataset, nvlist_t *props, nvlist_t **errlist)
if (props == NULL)
return (0);
- VERIFY(nvlist_alloc(&errors, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&errors, NV_UNIQUE_NAME, KM_SLEEP));
zc = kmem_alloc(sizeof (zfs_cmd_t), KM_SLEEP);
(void) strlcpy(zc->zc_name, dataset, sizeof (zc->zc_name));
@@ -5115,9 +5113,8 @@ zfs_check_clearable(const char *dataset, nvlist_t *props, nvlist_t **errlist)
sizeof (zc->zc_value));
if ((err = zfs_check_settable(dataset, pair, CRED())) != 0 ||
(err = zfs_secpolicy_inherit_prop(zc, NULL, CRED())) != 0) {
- VERIFY(nvlist_remove_nvpair(props, pair) == 0);
- VERIFY(nvlist_add_int32(errors,
- zc->zc_value, err) == 0);
+ VERIFY0(nvlist_remove_nvpair(props, pair));
+ VERIFY0(nvlist_add_int32(errors, zc->zc_value, err));
}
pair = next_pair;
}
@@ -5127,7 +5124,7 @@ zfs_check_clearable(const char *dataset, nvlist_t *props, nvlist_t **errlist)
nvlist_free(errors);
errors = NULL;
} else {
- VERIFY(nvpair_value_int32(pair, &rv) == 0);
+ VERIFY0(nvpair_value_int32(pair, &rv));
}
if (errlist == NULL)
@@ -5144,16 +5141,14 @@ propval_equals(nvpair_t *p1, nvpair_t *p2)
if (nvpair_type(p1) == DATA_TYPE_NVLIST) {
/* dsl_prop_get_all_impl() format */
nvlist_t *attrs;
- VERIFY(nvpair_value_nvlist(p1, &attrs) == 0);
- VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
- &p1) == 0);
+ VERIFY0(nvpair_value_nvlist(p1, &attrs));
+ VERIFY0(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &p1));
}
if (nvpair_type(p2) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
- VERIFY(nvpair_value_nvlist(p2, &attrs) == 0);
- VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
- &p2) == 0);
+ VERIFY0(nvpair_value_nvlist(p2, &attrs));
+ VERIFY0(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &p2));
}
if (nvpair_type(p1) != nvpair_type(p2))
@@ -5162,14 +5157,14 @@ propval_equals(nvpair_t *p1, nvpair_t *p2)
if (nvpair_type(p1) == DATA_TYPE_STRING) {
const char *valstr1, *valstr2;
- VERIFY(nvpair_value_string(p1, &valstr1) == 0);
- VERIFY(nvpair_value_string(p2, &valstr2) == 0);
+ VERIFY0(nvpair_value_string(p1, &valstr1));
+ VERIFY0(nvpair_value_string(p2, &valstr2));
return (strcmp(valstr1, valstr2) == 0);
} else {
uint64_t intval1, intval2;
- VERIFY(nvpair_value_uint64(p1, &intval1) == 0);
- VERIFY(nvpair_value_uint64(p2, &intval2) == 0);
+ VERIFY0(nvpair_value_uint64(p1, &intval1));
+ VERIFY0(nvpair_value_uint64(p2, &intval2));
return (intval1 == intval2);
}
}
@@ -5237,7 +5232,7 @@ extract_delay_props(nvlist_t *props)
};
int i;
- VERIFY(nvlist_alloc(&delayprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+ VERIFY0(nvlist_alloc(&delayprops, NV_UNIQUE_NAME, KM_SLEEP));
for (nvp = nvlist_next_nvpair(props, NULL); nvp != NULL;
nvp = nvlist_next_nvpair(props, nvp)) {
@@ -5253,8 +5248,8 @@ extract_delay_props(nvlist_t *props)
}
if (delayable[i] != 0) {
tmp = nvlist_prev_nvpair(props, nvp);
- VERIFY(nvlist_add_nvpair(delayprops, nvp) == 0);
- VERIFY(nvlist_remove_nvpair(props, nvp) == 0);
+ VERIFY0(nvlist_add_nvpair(delayprops, nvp));
+ VERIFY0(nvlist_remove_nvpair(props, nvp));
nvp = tmp;
}
}
@@ -5485,15 +5480,15 @@ zfs_ioc_recv_impl(char *tofs, char *tosnap, const char *origin,
* using ASSERT() will be just like a VERIFY.
*/
if (recv_delayprops != NULL) {
- ASSERT(nvlist_merge(recvprops, recv_delayprops, 0) == 0);
+ ASSERT0(nvlist_merge(recvprops, recv_delayprops, 0));
nvlist_free(recv_delayprops);
}
if (local_delayprops != NULL) {
- ASSERT(nvlist_merge(localprops, local_delayprops, 0) == 0);
+ ASSERT0(nvlist_merge(localprops, local_delayprops, 0));
nvlist_free(local_delayprops);
}
if (inherited_delayprops != NULL) {
- ASSERT(nvlist_merge(localprops, inherited_delayprops, 0) == 0);
+ ASSERT0(nvlist_merge(localprops, inherited_delayprops, 0));
nvlist_free(inherited_delayprops);
}
*read_bytes = off - noff;
@@ -7342,8 +7337,8 @@ zfs_ioctl_register_legacy(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
ASSERT3U(ioc, >=, ZFS_IOC_FIRST);
ASSERT3U(ioc, <, ZFS_IOC_LAST);
- ASSERT3P(vec->zvec_legacy_func, ==, NULL);
- ASSERT3P(vec->zvec_func, ==, NULL);
+ ASSERT0P(vec->zvec_legacy_func);
+ ASSERT0P(vec->zvec_func);
vec->zvec_legacy_func = func;
vec->zvec_secpolicy = secpolicy;
@@ -7366,8 +7361,8 @@ zfs_ioctl_register(const char *name, zfs_ioc_t ioc, zfs_ioc_func_t *func,
ASSERT3U(ioc, >=, ZFS_IOC_FIRST);
ASSERT3U(ioc, <, ZFS_IOC_LAST);
- ASSERT3P(vec->zvec_legacy_func, ==, NULL);
- ASSERT3P(vec->zvec_func, ==, NULL);
+ ASSERT0P(vec->zvec_legacy_func);
+ ASSERT0P(vec->zvec_func);
/* if we are logging, the name must be valid */
ASSERT(!allow_log || namecheck != NO_NAME);
@@ -8148,7 +8143,7 @@ zfsdev_ioctl_common(uint_t vecnum, zfs_cmd_t *zc, int flag)
spa_t *spa;
nvlist_t *lognv = NULL;
- ASSERT(vec->zvec_legacy_func == NULL);
+ ASSERT0P(vec->zvec_legacy_func);
/*
* Add the innvl to the lognv before calling the func,
diff --git a/sys/contrib/openzfs/module/zfs/zfs_log.c b/sys/contrib/openzfs/module/zfs/zfs_log.c
index 2f61ecfd9b3b..ea17e049279f 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_log.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_log.c
@@ -620,7 +620,7 @@ zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
if (zil_replaying(zilog, tx) || zp->z_unlinked ||
zfs_xattr_owner_unlinked(zp)) {
if (callback != NULL)
- callback(callback_data);
+ callback(callback_data, 0);
return;
}
@@ -663,7 +663,7 @@ zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
DMU_KEEP_CACHING);
DB_DNODE_EXIT(db);
if (err != 0) {
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
itx = zil_itx_create(txtype, sizeof (*lr));
lr = (lr_write_t *)&itx->itx_lr;
wr_state = WR_NEED_COPY;
diff --git a/sys/contrib/openzfs/module/zfs/zfs_quota.c b/sys/contrib/openzfs/module/zfs/zfs_quota.c
index b8fe512d4f09..2e91ccc27d6d 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_quota.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_quota.c
@@ -374,7 +374,7 @@ zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
if (*objp == 0) {
*objp = zap_create(zfsvfs->z_os, DMU_OT_USERGROUP_QUOTA,
DMU_OT_NONE, 0, tx);
- VERIFY(0 == zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
+ VERIFY0(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[type], 8, 1, objp, tx));
}
mutex_exit(&zfsvfs->z_lock);
@@ -386,7 +386,7 @@ zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
} else {
err = zap_update(zfsvfs->z_os, *objp, buf, 8, 1, &quota, tx);
}
- ASSERT(err == 0);
+ ASSERT0(err);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
dmu_tx_commit(tx);
diff --git a/sys/contrib/openzfs/module/zfs/zfs_rlock.c b/sys/contrib/openzfs/module/zfs/zfs_rlock.c
index 53eb3ef1b66e..4035baff77d6 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_rlock.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_rlock.c
@@ -666,7 +666,7 @@ zfs_rangelock_reduce(zfs_locked_range_t *lr, uint64_t off, uint64_t len)
/* Ensure there are no other locks */
ASSERT3U(avl_numnodes(&rl->rl_tree), ==, 1);
- ASSERT3U(lr->lr_offset, ==, 0);
+ ASSERT0(lr->lr_offset);
ASSERT3U(lr->lr_type, ==, RL_WRITER);
ASSERT(!lr->lr_proxy);
ASSERT3U(lr->lr_length, ==, UINT64_MAX);
diff --git a/sys/contrib/openzfs/module/zfs/zfs_sa.c b/sys/contrib/openzfs/module/zfs/zfs_sa.c
index 59b6ae4e4203..8b4fc6fd7fbd 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_sa.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_sa.c
@@ -169,7 +169,7 @@ zfs_sa_set_scanstamp(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
ASSERT(MUTEX_HELD(&zp->z_lock));
VERIFY((xoap = xva_getxoptattr(xvap)) != NULL);
if (zp->z_is_sa)
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs),
+ VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs),
&xoap->xoa_av_scanstamp,
sizeof (xoap->xoa_av_scanstamp), tx));
else {
@@ -181,12 +181,12 @@ zfs_sa_set_scanstamp(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
len = sizeof (xoap->xoa_av_scanstamp) +
ZFS_OLD_ZNODE_PHYS_SIZE;
if (len > doi.doi_bonus_size)
- VERIFY(dmu_set_bonus(db, len, tx) == 0);
+ VERIFY0(dmu_set_bonus(db, len, tx));
(void) memcpy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
xoap->xoa_av_scanstamp, sizeof (xoap->xoa_av_scanstamp));
zp->z_pflags |= ZFS_BONUS_SCANSTAMP;
- VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
+ VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
&zp->z_pflags, sizeof (uint64_t), tx));
}
}
@@ -286,7 +286,7 @@ zfs_sa_set_xattr(znode_t *zp, const char *name, const void *value, size_t vsize)
dmu_tx_commit(tx);
if (logsaxattr && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ error = zil_commit(zilog, 0);
}
out_free:
vmem_free(obj, size);
@@ -427,11 +427,10 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
}
- VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
- VERIFY(sa_replace_all_by_template_locked(hdl, sa_attrs,
- count, tx) == 0);
+ VERIFY0(dmu_set_bonustype(db, DMU_OT_SA, tx));
+ VERIFY0(sa_replace_all_by_template_locked(hdl, sa_attrs, count, tx));
if (znode_acl.z_acl_extern_obj)
- VERIFY(0 == dmu_object_free(zfsvfs->z_os,
+ VERIFY0(dmu_object_free(zfsvfs->z_os,
znode_acl.z_acl_extern_obj, tx));
zp->z_is_sa = B_TRUE;
diff --git a/sys/contrib/openzfs/module/zfs/zfs_vnops.c b/sys/contrib/openzfs/module/zfs/zfs_vnops.c
index 74aa91a4f2eb..7bb9ba57c69e 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_vnops.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_vnops.c
@@ -27,6 +27,7 @@
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
* Copyright (c) 2025, Rob Norris <robn@despairlabs.com>
+ * Copyright (c) 2025, Klara, Inc.
*/
/* Portions Copyright 2007 Jeremy Teo */
@@ -116,7 +117,7 @@ zfs_fsync(znode_t *zp, int syncflag, cred_t *cr)
if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (error);
- zil_commit(zfsvfs->z_log, zp->z_id);
+ error = zil_commit(zfsvfs->z_log, zp->z_id);
zfs_exit(zfsvfs, FTAG);
}
return (error);
@@ -375,8 +376,13 @@ zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
frsync = !!(ioflag & FRSYNC);
#endif
if (zfsvfs->z_log &&
- (frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
- zil_commit(zfsvfs->z_log, zp->z_id);
+ (frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)) {
+ error = zil_commit(zfsvfs->z_log, zp->z_id);
+ if (error != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
+ }
/*
* Lock the range against changes.
@@ -1074,8 +1080,13 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
return (error);
}
- if (commit)
- zil_commit(zilog, zp->z_id);
+ if (commit) {
+ error = zil_commit(zilog, zp->z_id);
+ if (error != 0) {
+ zfs_exit(zfsvfs, FTAG);
+ return (error);
+ }
+ }
int64_t nwritten = start_resid - zfs_uio_resid(uio);
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
@@ -1260,8 +1271,8 @@ zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
zilog = zfsvfs->z_log;
error = zfs_setacl(zp, vsecp, skipaclchk, cr);
- if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
- zil_commit(zilog, 0);
+ if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ error = zil_commit(zilog, 0);
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1946,7 +1957,7 @@ unlock:
ZFS_ACCESSTIME_STAMP(inzfsvfs, inzp);
if (outos->os_sync == ZFS_SYNC_ALWAYS) {
- zil_commit(zilog, outzp->z_id);
+ error = zil_commit(zilog, outzp->z_id);
}
*inoffp += done;
diff --git a/sys/contrib/openzfs/module/zfs/zil.c b/sys/contrib/openzfs/module/zfs/zil.c
index 6e4f84257407..31b59c55f17b 100644
--- a/sys/contrib/openzfs/module/zfs/zil.c
+++ b/sys/contrib/openzfs/module/zfs/zil.c
@@ -24,6 +24,7 @@
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright (c) 2018 Datto Inc.
+ * Copyright (c) 2025, Klara, Inc.
*/
/* Portions Copyright 2010 Robert Milkowski */
@@ -103,6 +104,7 @@ static zil_kstat_values_t zil_stats = {
{ "zil_commit_error_count", KSTAT_DATA_UINT64 },
{ "zil_commit_stall_count", KSTAT_DATA_UINT64 },
{ "zil_commit_suspend_count", KSTAT_DATA_UINT64 },
+ { "zil_commit_crash_count", KSTAT_DATA_UINT64 },
{ "zil_itx_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
@@ -145,7 +147,7 @@ static uint64_t zil_slog_bulk = 64 * 1024 * 1024;
static kmem_cache_t *zil_lwb_cache;
static kmem_cache_t *zil_zcw_cache;
-static void zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx);
+static int zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx);
static itx_t *zil_itx_clone(itx_t *oitx);
static uint64_t zil_max_waste_space(zilog_t *zilog);
@@ -367,6 +369,7 @@ zil_sums_init(zil_sums_t *zs)
wmsum_init(&zs->zil_commit_error_count, 0);
wmsum_init(&zs->zil_commit_stall_count, 0);
wmsum_init(&zs->zil_commit_suspend_count, 0);
+ wmsum_init(&zs->zil_commit_crash_count, 0);
wmsum_init(&zs->zil_itx_count, 0);
wmsum_init(&zs->zil_itx_indirect_count, 0);
wmsum_init(&zs->zil_itx_indirect_bytes, 0);
@@ -392,6 +395,7 @@ zil_sums_fini(zil_sums_t *zs)
wmsum_fini(&zs->zil_commit_error_count);
wmsum_fini(&zs->zil_commit_stall_count);
wmsum_fini(&zs->zil_commit_suspend_count);
+ wmsum_fini(&zs->zil_commit_crash_count);
wmsum_fini(&zs->zil_itx_count);
wmsum_fini(&zs->zil_itx_indirect_count);
wmsum_fini(&zs->zil_itx_indirect_bytes);
@@ -422,6 +426,8 @@ zil_kstat_values_update(zil_kstat_values_t *zs, zil_sums_t *zil_sums)
wmsum_value(&zil_sums->zil_commit_stall_count);
zs->zil_commit_suspend_count.value.ui64 =
wmsum_value(&zil_sums->zil_commit_suspend_count);
+ zs->zil_commit_crash_count.value.ui64 =
+ wmsum_value(&zil_sums->zil_commit_crash_count);
zs->zil_itx_count.value.ui64 =
wmsum_value(&zil_sums->zil_itx_count);
zs->zil_itx_indirect_count.value.ui64 =
@@ -864,9 +870,9 @@ zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
ASSERT(MUTEX_HELD(&zilog->zl_lock));
ASSERT(lwb->lwb_state == LWB_STATE_NEW ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
- ASSERT3P(lwb->lwb_child_zio, ==, NULL);
- ASSERT3P(lwb->lwb_write_zio, ==, NULL);
- ASSERT3P(lwb->lwb_root_zio, ==, NULL);
+ ASSERT0P(lwb->lwb_child_zio);
+ ASSERT0P(lwb->lwb_write_zio);
+ ASSERT0P(lwb->lwb_root_zio);
ASSERT3U(lwb->lwb_alloc_txg, <=, spa_syncing_txg(zilog->zl_spa));
ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa));
VERIFY(list_is_empty(&lwb->lwb_itxs));
@@ -991,8 +997,8 @@ zil_create(zilog_t *zilog)
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
- ASSERT(zh->zh_claim_txg == 0);
- ASSERT(zh->zh_replay_seq == 0);
+ ASSERT0(zh->zh_claim_txg);
+ ASSERT0(zh->zh_replay_seq);
blk = zh->zh_log;
@@ -1104,7 +1110,7 @@ zil_destroy(zilog_t *zilog, boolean_t keep_first)
zilog->zl_keep_first = keep_first;
if (!list_is_empty(&zilog->zl_lwb_list)) {
- ASSERT(zh->zh_claim_txg == 0);
+ ASSERT0(zh->zh_claim_txg);
VERIFY(!keep_first);
while ((lwb = list_remove_head(&zilog->zl_lwb_list)) != NULL) {
if (lwb->lwb_buf != NULL)
@@ -1250,7 +1256,7 @@ zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
blkptr_t *bp;
int error;
- ASSERT(tx == NULL);
+ ASSERT0P(tx);
error = dmu_objset_from_ds(ds, &os);
if (error != 0) {
@@ -1351,7 +1357,7 @@ zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb)
ASSERT(!list_link_active(&zcw->zcw_node));
list_insert_tail(&lwb->lwb_waiters, zcw);
- ASSERT3P(zcw->zcw_lwb, ==, NULL);
+ ASSERT0P(zcw->zcw_lwb);
zcw->zcw_lwb = lwb;
}
@@ -1365,7 +1371,7 @@ zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb)
{
ASSERT(!list_link_active(&zcw->zcw_node));
list_insert_tail(nolwb, zcw);
- ASSERT3P(zcw->zcw_lwb, ==, NULL);
+ ASSERT0P(zcw->zcw_lwb);
}
void
@@ -1482,7 +1488,7 @@ zil_lwb_flush_vdevs_done(zio_t *zio)
}
while ((itx = list_remove_head(&lwb->lwb_itxs)) != NULL)
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
while ((zcw = list_remove_head(&lwb->lwb_waiters)) != NULL) {
mutex_enter(&zcw->zcw_lock);
@@ -1895,7 +1901,7 @@ zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, lwb_state_t state)
/*
* Finalize previously closed block and issue the write zio.
*/
-static void
+static int
zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
{
spa_t *spa = zilog->zl_spa;
@@ -1909,8 +1915,13 @@ zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
/* Actually fill the lwb with the data. */
for (itx_t *itx = list_head(&lwb->lwb_itxs); itx;
- itx = list_next(&lwb->lwb_itxs, itx))
- zil_lwb_commit(zilog, lwb, itx);
+ itx = list_next(&lwb->lwb_itxs, itx)) {
+ error = zil_lwb_commit(zilog, lwb, itx);
+ if (error != 0) {
+ ASSERT3U(error, ==, ESHUTDOWN);
+ return (error);
+ }
+ }
lwb->lwb_nused = lwb->lwb_nfilled;
ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_nmax);
@@ -1928,7 +1939,7 @@ zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
lwb->lwb_state = LWB_STATE_READY;
if (BP_IS_HOLE(&lwb->lwb_blk) && lwb->lwb_error == 0) {
mutex_exit(&zilog->zl_lock);
- return;
+ return (0);
}
mutex_exit(&zilog->zl_lock);
@@ -2065,6 +2076,8 @@ next_lwb:
lwb = nlwb;
if (lwb)
goto next_lwb;
+
+ return (0);
}
/*
@@ -2308,11 +2321,13 @@ cont:
return (lwb);
}
+static void zil_crash(zilog_t *zilog);
+
/*
* Fill the actual transaction data into the lwb, following zil_lwb_assign().
* Does not require locking.
*/
-static void
+static int
zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx)
{
lr_t *lr, *lrb;
@@ -2324,7 +2339,7 @@ zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx)
lrw = (lr_write_t *)lr;
if (lr->lrc_txtype == TX_COMMIT)
- return;
+ return (0);
reclen = lr->lrc_reclen;
dlen = zil_itx_data_size(itx);
@@ -2410,16 +2425,35 @@ zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx)
". Falling back to txg_wait_synced().",
error);
zfs_fallthrough;
- case EIO:
- txg_wait_synced(zilog->zl_dmu_pool,
- lr->lrc_txg);
+ case EIO: {
+ int error = txg_wait_synced_flags(
+ zilog->zl_dmu_pool,
+ lr->lrc_txg, TXG_WAIT_SUSPEND);
+ if (error != 0) {
+ ASSERT3U(error, ==, ESHUTDOWN);
+ /*
+ * zil_lwb_commit() is called from a
+ * loop over a list of itxs at the
+ * top of zil_lwb_write_issue(), which
+ * itself is called from a loop over a
+ * list of lwbs in various places.
+ * zil_crash() will free those itxs
+ * and sometimes the lwbs, so they
+ * are invalid when zil_crash() returns.
+ * Callers must pretty much abort
+ * immediately.
+ */
+ zil_crash(zilog);
+ return (error);
+ }
zfs_fallthrough;
+ }
case ENOENT:
zfs_fallthrough;
case EEXIST:
zfs_fallthrough;
case EALREADY:
- return;
+ return (0);
}
}
}
@@ -2427,6 +2461,8 @@ zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx)
lwb->lwb_nfilled += reclen + dlen;
ASSERT3S(lwb->lwb_nfilled, <=, lwb->lwb_nused);
ASSERT0(P2PHASE(lwb->lwb_nfilled, sizeof (uint64_t)));
+
+ return (0);
}
itx_t *
@@ -2468,7 +2504,7 @@ zil_itx_clone(itx_t *oitx)
}
void
-zil_itx_destroy(itx_t *itx)
+zil_itx_destroy(itx_t *itx, int err)
{
ASSERT3U(itx->itx_size, >=, sizeof (itx_t));
ASSERT3U(itx->itx_lr.lrc_reclen, ==,
@@ -2477,7 +2513,7 @@ zil_itx_destroy(itx_t *itx)
IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
if (itx->itx_callback != NULL)
- itx->itx_callback(itx->itx_callback_data);
+ itx->itx_callback(itx->itx_callback_data, err);
zio_data_buf_free(itx, itx->itx_size);
}
@@ -2520,7 +2556,7 @@ zil_itxg_clean(void *arg)
if (itx->itx_lr.lrc_txtype == TX_COMMIT)
zil_commit_waiter_skip(itx->itx_private);
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
}
cookie = NULL;
@@ -2530,7 +2566,7 @@ zil_itxg_clean(void *arg)
while ((itx = list_remove_head(list)) != NULL) {
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
}
list_destroy(list);
kmem_free(ian, sizeof (itx_async_node_t));
@@ -2592,7 +2628,7 @@ zil_remove_async(zilog_t *zilog, uint64_t oid)
while ((itx = list_remove_head(&clean_list)) != NULL) {
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
}
list_destroy(&clean_list);
}
@@ -2677,6 +2713,67 @@ zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
}
/*
+ * Post-crash cleanup. This is called from zil_clean() because it needs to
+ * do cleanup after every txg until the ZIL is restarted, and zilog_dirty()
+ * can arrange that easily, unlike zil_sync() which is more complicated to
+ * get a call to without actual dirty data.
+ */
+static void
+zil_crash_clean(zilog_t *zilog, uint64_t synced_txg)
+{
+ ASSERT(MUTEX_HELD(&zilog->zl_lock));
+ ASSERT3U(zilog->zl_restart_txg, >, 0);
+
+ /* Clean up anything on the crash list from earlier txgs */
+ lwb_t *lwb;
+ while ((lwb = list_head(&zilog->zl_lwb_crash_list)) != NULL) {
+ if (lwb->lwb_alloc_txg >= synced_txg ||
+ lwb->lwb_max_txg >= synced_txg) {
+ /*
+ * This lwb was allocated or updated on this txg, or
+ * in the future. We stop processing here, to avoid
+ * the strange situation of freeing a ZIL block on
+ * on the same or earlier txg than what it was
+ * allocated for.
+ *
+ * We'll take care of it on the next txg.
+ */
+ break;
+ }
+
+ /* This LWB is from the past, so we can clean it up now. */
+ list_remove(&zilog->zl_lwb_crash_list, lwb);
+ if (lwb->lwb_buf != NULL)
+ zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
+ if (!BP_IS_HOLE(&lwb->lwb_blk))
+ /*
+ * Free on the next txg, since zil_clean() is called
+ * once synced_txg has already been completed.
+ */
+ zio_free(zilog->zl_spa, synced_txg+1, &lwb->lwb_blk);
+ zil_free_lwb(zilog, lwb);
+ }
+
+ if (zilog->zl_restart_txg > synced_txg) {
+ /*
+ * Not reached the restart txg yet, so mark the ZIL dirty for
+ * the next txg and we'll consider it all again then.
+ */
+ zilog_dirty(zilog, synced_txg+1);
+ return;
+ }
+
+ /*
+ * Reached the restart txg, so we can allow new calls to zil_commit().
+ * All ZIL txgs have long past so there should be no IO waiting.
+ */
+ ASSERT(list_is_empty(&zilog->zl_lwb_list));
+ ASSERT(list_is_empty(&zilog->zl_lwb_crash_list));
+
+ zilog->zl_restart_txg = 0;
+}
+
+/*
* If there are any in-memory intent log transactions which have now been
* synced then start up a taskq to free them. We should only do this after we
* have written out the uberblocks (i.e. txg has been committed) so that
@@ -2691,6 +2788,15 @@ zil_clean(zilog_t *zilog, uint64_t synced_txg)
ASSERT3U(synced_txg, <, ZILTEST_TXG);
+ /* Do cleanup and restart after crash. */
+ if (zilog->zl_restart_txg > 0) {
+ mutex_enter(&zilog->zl_lock);
+ /* Make sure we didn't lose a race. */
+ if (zilog->zl_restart_txg > 0)
+ zil_crash_clean(zilog, synced_txg);
+ mutex_exit(&zilog->zl_lock);
+ }
+
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
mutex_exit(&itxg->itxg_lock);
@@ -2883,13 +2989,13 @@ zil_prune_commit_list(zilog_t *zilog)
mutex_exit(&zilog->zl_lock);
list_remove(&zilog->zl_itx_commit_list, itx);
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
}
IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
}
-static void
+static int
zil_commit_writer_stall(zilog_t *zilog)
{
/*
@@ -2914,8 +3020,22 @@ zil_commit_writer_stall(zilog_t *zilog)
*/
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ZIL_STAT_BUMP(zilog, zil_commit_stall_count);
- txg_wait_synced(zilog->zl_dmu_pool, 0);
+
+ int err = txg_wait_synced_flags(zilog->zl_dmu_pool, 0,
+ TXG_WAIT_SUSPEND);
+ if (err != 0) {
+ ASSERT3U(err, ==, ESHUTDOWN);
+ zil_crash(zilog);
+ }
+
+ /*
+ * Either zil_sync() has been called to wait for and clean up any
+ * in-flight LWBs, or zil_crash() has emptied out the list and arranged
+ * for them to be cleaned up later.
+ */
ASSERT(list_is_empty(&zilog->zl_lwb_list));
+
+ return (err);
}
static void
@@ -3082,7 +3202,7 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
} else {
ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT);
zilog->zl_cur_left -= zil_itx_full_size(itx);
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
}
}
@@ -3093,9 +3213,14 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
* the ZIL write pipeline; see the comment within
* zil_commit_writer_stall() for more details.
*/
- while ((lwb = list_remove_head(ilwbs)) != NULL)
- zil_lwb_write_issue(zilog, lwb);
- zil_commit_writer_stall(zilog);
+ int err = 0;
+ while ((lwb = list_remove_head(ilwbs)) != NULL) {
+ err = zil_lwb_write_issue(zilog, lwb);
+ if (err != 0)
+ break;
+ }
+ if (err == 0)
+ err = zil_commit_writer_stall(zilog);
/*
* Additionally, we have to signal and mark the "nolwb"
@@ -3113,7 +3238,7 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
* the itx's callback if one exists for the itx.
*/
while ((itx = list_remove_head(&nolwb_itxs)) != NULL)
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
} else {
ASSERT(list_is_empty(&nolwb_waiters));
ASSERT3P(lwb, !=, NULL);
@@ -3169,9 +3294,15 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
list_insert_tail(ilwbs, lwb);
lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW);
if (lwb == NULL) {
- while ((lwb = list_remove_head(ilwbs)) != NULL)
- zil_lwb_write_issue(zilog, lwb);
- zil_commit_writer_stall(zilog);
+ int err = 0;
+ while ((lwb =
+ list_remove_head(ilwbs)) != NULL) {
+ err = zil_lwb_write_issue(zilog, lwb);
+ if (err != 0)
+ break;
+ }
+ if (err == 0)
+ zil_commit_writer_stall(zilog);
}
}
}
@@ -3230,10 +3361,23 @@ zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw)
zil_prune_commit_list(zilog);
zil_process_commit_list(zilog, zcw, &ilwbs);
+ /*
+ * If the ZIL failed somewhere inside zil_process_commit_list(), it's
+ * will be because a fallback to txg_wait_sync_flags() happened at some
+ * point (eg zil_commit_writer_stall()). All cases should issue and
+ * empty ilwbs, so there will be nothing to in the issue loop below.
+ * That's why we don't have to plumb the error value back from
+ * zil_process_commit_list(), and don't have to skip it.
+ */
+ IMPLY(zilog->zl_restart_txg > 0, list_is_empty(&ilwbs));
+
out:
mutex_exit(&zilog->zl_issuer_lock);
- while ((lwb = list_remove_head(&ilwbs)) != NULL)
- zil_lwb_write_issue(zilog, lwb);
+ int err = 0;
+ while ((lwb = list_remove_head(&ilwbs)) != NULL) {
+ if (err == 0)
+ err = zil_lwb_write_issue(zilog, lwb);
+ }
list_destroy(&ilwbs);
return (wtxg);
}
@@ -3489,7 +3633,7 @@ static void
zil_free_commit_waiter(zil_commit_waiter_t *zcw)
{
ASSERT(!list_link_active(&zcw->zcw_node));
- ASSERT3P(zcw->zcw_lwb, ==, NULL);
+ ASSERT0P(zcw->zcw_lwb);
ASSERT3B(zcw->zcw_done, ==, B_TRUE);
mutex_destroy(&zcw->zcw_lock);
cv_destroy(&zcw->zcw_cv);
@@ -3526,6 +3670,96 @@ zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
}
/*
+ * Crash the ZIL. This is something like suspending, but abandons the ZIL
+ * without further IO until the wanted txg completes. No effort is made to
+ * close the on-disk chain or do any other on-disk work, as the pool may
+ * have suspended. zil_sync() will handle cleanup as normal and restart the
+ * ZIL once enough txgs have passed.
+ */
+static void
+zil_crash(zilog_t *zilog)
+{
+ mutex_enter(&zilog->zl_lock);
+
+ uint64_t txg = spa_syncing_txg(zilog->zl_spa);
+ uint64_t restart_txg =
+ spa_syncing_txg(zilog->zl_spa) + TXG_CONCURRENT_STATES;
+
+ if (zilog->zl_restart_txg > 0) {
+ /*
+ * If the ZIL is already crashed, it's almost certainly because
+ * we lost a race involving multiple callers from
+ * zil_commit_impl().
+ */
+
+ /*
+ * This sanity check is to support my understanding that in the
+ * event of multiple callers to zil_crash(), only one of them
+ * can possibly be in the codepath to issue lwbs; the rest
+ * should be calling from zil_commit_impl() after their waiters
+ * have completed. As I understand it, a second thread trying
+ * to issue will eventually wait on zl_issuer_lock, and then
+ * have no work to do and leave.
+ *
+ * If more lwbs had been created an issued between zil_crash()
+ * calls, then we probably just need to take those too, add
+ * them to the crash list and clean them up, but it complicates
+ * this function and I don't think it can happend.
+ */
+ ASSERT(list_is_empty(&zilog->zl_lwb_list));
+
+ mutex_exit(&zilog->zl_lock);
+ return;
+ }
+
+ zilog->zl_restart_txg = restart_txg;
+
+ /*
+ * Capture any live LWBs. Depending on the state of the pool they may
+ * represent in-flight IO that won't return for some time, and we want
+ * to make sure they don't get in the way of normal ZIL operation.
+ */
+ ASSERT(list_is_empty(&zilog->zl_lwb_crash_list));
+ list_move_tail(&zilog->zl_lwb_crash_list, &zilog->zl_lwb_list);
+
+ /*
+ * Run through the LWB list; erroring all itxes and signalling error
+ * to all waiters.
+ */
+ for (lwb_t *lwb = list_head(&zilog->zl_lwb_crash_list); lwb != NULL;
+ lwb = list_next(&zilog->zl_lwb_crash_list, lwb)) {
+ itx_t *itx;
+ while ((itx = list_remove_head(&lwb->lwb_itxs)) != NULL)
+ zil_itx_destroy(itx, EIO);
+
+ zil_commit_waiter_t *zcw;
+ while ((zcw = list_remove_head(&lwb->lwb_waiters)) != NULL) {
+ mutex_enter(&zcw->zcw_lock);
+ zcw->zcw_lwb = NULL;
+ zcw->zcw_zio_error = EIO;
+ zcw->zcw_done = B_TRUE;
+ cv_broadcast(&zcw->zcw_cv);
+ mutex_exit(&zcw->zcw_lock);
+ }
+ }
+
+ /*
+ * Zero the ZIL header bp after the ZIL restarts. We'll free it in
+ * zil_clean() when we clean up the lwbs.
+ */
+ zil_header_t *zh = zil_header_in_syncing_context(zilog);
+ BP_ZERO(&zh->zh_log);
+
+ /*
+ * Mark this ZIL dirty on the next txg, so that zil_clean() will be
+ * called for cleanup.
+ */
+ zilog_dirty(zilog, txg+1);
+
+ mutex_exit(&zilog->zl_lock);
+}
+
+/*
* Commit ZFS Intent Log transactions (itxs) to stable storage.
*
* When writing ZIL transactions to the on-disk representation of the
@@ -3640,9 +3874,17 @@ zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
* but the order in which they complete will be the same order in
* which they were created.
*/
-void
+static int zil_commit_impl(zilog_t *zilog, uint64_t foid);
+
+int
zil_commit(zilog_t *zilog, uint64_t foid)
{
+ return (zil_commit_flags(zilog, foid, ZIL_COMMIT_FAILMODE));
+}
+
+int
+zil_commit_flags(zilog_t *zilog, uint64_t foid, zil_commit_flag_t flags)
+{
/*
* We should never attempt to call zil_commit on a snapshot for
* a couple of reasons:
@@ -3659,7 +3901,7 @@ zil_commit(zilog_t *zilog, uint64_t foid)
ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE);
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
- return;
+ return (0);
if (!spa_writeable(zilog->zl_spa)) {
/*
@@ -3670,10 +3912,23 @@ zil_commit(zilog_t *zilog, uint64_t foid)
* verifying that truth before we return to the caller.
*/
ASSERT(list_is_empty(&zilog->zl_lwb_list));
- ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
+ ASSERT0P(zilog->zl_last_lwb_opened);
for (int i = 0; i < TXG_SIZE; i++)
- ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL);
- return;
+ ASSERT0P(zilog->zl_itxg[i].itxg_itxs);
+ return (0);
+ }
+
+ int err = 0;
+
+ /*
+ * If the ZIL crashed, bypass it entirely, and rely on txg_wait_sync()
+ * to get the data out to disk.
+ */
+ if (zilog->zl_restart_txg > 0) {
+ ZIL_STAT_BUMP(zilog, zil_commit_crash_count);
+ err = txg_wait_synced_flags(zilog->zl_dmu_pool, 0,
+ TXG_WAIT_SUSPEND);
+ goto out;
}
/*
@@ -3685,14 +3940,43 @@ zil_commit(zilog_t *zilog, uint64_t foid)
*/
if (zilog->zl_suspend > 0) {
ZIL_STAT_BUMP(zilog, zil_commit_suspend_count);
- txg_wait_synced(zilog->zl_dmu_pool, 0);
- return;
+ err = txg_wait_synced_flags(zilog->zl_dmu_pool, 0,
+ TXG_WAIT_SUSPEND);
+ if (err != 0) {
+ ASSERT3U(err, ==, ESHUTDOWN);
+ zil_crash(zilog);
+ }
+ goto out;
}
- zil_commit_impl(zilog, foid);
+ err = zil_commit_impl(zilog, foid);
+
+out:
+ if (err == 0)
+ return (0);
+
+ /*
+ * The ZIL write failed and the pool is suspended. There's nothing else
+ * we can do except return or block.
+ */
+ ASSERT3U(err, ==, ESHUTDOWN);
+
+ /*
+ * Return error if failmode=continue or caller will handle directly.
+ */
+ if (!(flags & ZIL_COMMIT_FAILMODE) ||
+ spa_get_failmode(zilog->zl_spa) == ZIO_FAILURE_MODE_CONTINUE)
+ return (SET_ERROR(EIO));
+
+ /*
+ * Block until the pool returns. We assume that the data will make
+ * it out to disk in the end, and so return success.
+ */
+ txg_wait_synced(zilog->zl_dmu_pool, 0);
+ return (0);
}
-void
+static int
zil_commit_impl(zilog_t *zilog, uint64_t foid)
{
ZIL_STAT_BUMP(zilog, zil_commit_count);
@@ -3729,6 +4013,7 @@ zil_commit_impl(zilog_t *zilog, uint64_t foid)
uint64_t wtxg = zil_commit_writer(zilog, zcw);
zil_commit_waiter(zilog, zcw);
+ int err = 0;
if (zcw->zcw_zio_error != 0) {
/*
* If there was an error writing out the ZIL blocks that
@@ -3741,13 +4026,29 @@ zil_commit_impl(zilog_t *zilog, uint64_t foid)
ZIL_STAT_BUMP(zilog, zil_commit_error_count);
DTRACE_PROBE2(zil__commit__io__error,
zilog_t *, zilog, zil_commit_waiter_t *, zcw);
- txg_wait_synced(zilog->zl_dmu_pool, 0);
+ err = txg_wait_synced_flags(zilog->zl_dmu_pool, 0,
+ TXG_WAIT_SUSPEND);
} else if (wtxg != 0) {
ZIL_STAT_BUMP(zilog, zil_commit_suspend_count);
- txg_wait_synced(zilog->zl_dmu_pool, wtxg);
+ err = txg_wait_synced_flags(zilog->zl_dmu_pool, wtxg,
+ TXG_WAIT_SUSPEND);
}
zil_free_commit_waiter(zcw);
+
+ if (err == 0)
+ return (0);
+
+ /*
+ * ZIL write failed and pool failed in the fallback to
+ * txg_wait_synced_flags(). Right now we have no idea if the data is on
+ * disk and the pool is probably suspended so we have no idea when it's
+ * coming back. All we can do is shut down and return error to the
+ * caller.
+ */
+ ASSERT3U(err, ==, ESHUTDOWN);
+ zil_crash(zilog);
+ return (err);
}
/*
@@ -3773,7 +4074,7 @@ zil_sync(zilog_t *zilog, dmu_tx_t *tx)
mutex_enter(&zilog->zl_lock);
- ASSERT(zilog->zl_stop_sync == 0);
+ ASSERT0(zilog->zl_stop_sync);
if (*replayed_seq != 0) {
ASSERT(zh->zh_replay_seq < *replayed_seq);
@@ -3943,6 +4244,8 @@ zil_alloc(objset_t *os, zil_header_t *zh_phys)
list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
offsetof(lwb_t, lwb_node));
+ list_create(&zilog->zl_lwb_crash_list, sizeof (lwb_t),
+ offsetof(lwb_t, lwb_node));
list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
@@ -3967,9 +4270,12 @@ zil_free(zilog_t *zilog)
ASSERT0(zilog->zl_suspend);
ASSERT0(zilog->zl_suspending);
+ ASSERT0(zilog->zl_restart_txg);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
list_destroy(&zilog->zl_lwb_list);
+ ASSERT(list_is_empty(&zilog->zl_lwb_crash_list));
+ list_destroy(&zilog->zl_lwb_crash_list);
ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
list_destroy(&zilog->zl_itx_commit_list);
@@ -4005,8 +4311,8 @@ zil_open(objset_t *os, zil_get_data_t *get_data, zil_sums_t *zil_sums)
{
zilog_t *zilog = dmu_objset_zil(os);
- ASSERT3P(zilog->zl_get_data, ==, NULL);
- ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
+ ASSERT0P(zilog->zl_get_data);
+ ASSERT0P(zilog->zl_last_lwb_opened);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
zilog->zl_get_data = get_data;
@@ -4025,7 +4331,8 @@ zil_close(zilog_t *zilog)
uint64_t txg;
if (!dmu_objset_is_snapshot(zilog->zl_os)) {
- zil_commit(zilog, 0);
+ if (zil_commit_flags(zilog, 0, ZIL_COMMIT_NOW) != 0)
+ txg_wait_synced(zilog->zl_dmu_pool, 0);
} else {
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT0(zilog->zl_dirty_max_txg);
@@ -4126,6 +4433,17 @@ zil_suspend(const char *osname, void **cookiep)
return (SET_ERROR(EBUSY));
}
+ if (zilog->zl_restart_txg > 0) {
+ /*
+ * ZIL crashed. It effectively _is_ suspended, but callers
+ * are usually trying to make sure it's empty on-disk, which
+ * we can't guarantee right now.
+ */
+ mutex_exit(&zilog->zl_lock);
+ dmu_objset_rele(os, suspend_tag);
+ return (SET_ERROR(EBUSY));
+ }
+
/*
* Don't put a long hold in the cases where we can avoid it. This
* is when there is no cookie so we are doing a suspend & resume
@@ -4158,6 +4476,11 @@ zil_suspend(const char *osname, void **cookiep)
zil_resume(os);
else
*cookiep = os;
+
+ if (zilog->zl_restart_txg > 0)
+ /* ZIL crashed while we were waiting. */
+ return (SET_ERROR(EBUSY));
+
return (0);
}
@@ -4199,17 +4522,34 @@ zil_suspend(const char *osname, void **cookiep)
* would just call txg_wait_synced(), because zl_suspend is set.
* txg_wait_synced() doesn't wait for these lwb's to be
* LWB_STATE_FLUSH_DONE before returning.
+ *
+ * However, zil_commit_impl() itself can return an error if any of the
+ * lwbs fail, or the pool suspends in the fallback
+ * txg_wait_sync_flushed(), which affects what we do next, so we
+ * capture that error.
*/
- zil_commit_impl(zilog, 0);
+ error = zil_commit_impl(zilog, 0);
+ if (error == ESHUTDOWN)
+ /* zil_commit_impl() has called zil_crash() already */
+ error = SET_ERROR(EBUSY);
/*
* Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we
* use txg_wait_synced() to ensure the data from the zilog has
* migrated to the main pool before calling zil_destroy().
*/
- txg_wait_synced(zilog->zl_dmu_pool, 0);
+ if (error == 0) {
+ error = txg_wait_synced_flags(zilog->zl_dmu_pool, 0,
+ TXG_WAIT_SUSPEND);
+ if (error != 0) {
+ ASSERT3U(error, ==, ESHUTDOWN);
+ zil_crash(zilog);
+ error = SET_ERROR(EBUSY);
+ }
+ }
- zil_destroy(zilog, B_FALSE);
+ if (error == 0)
+ zil_destroy(zilog, B_FALSE);
mutex_enter(&zilog->zl_lock);
zilog->zl_suspending = B_FALSE;
@@ -4223,7 +4563,8 @@ zil_suspend(const char *osname, void **cookiep)
zil_resume(os);
else
*cookiep = os;
- return (0);
+
+ return (error);
}
void
@@ -4386,7 +4727,7 @@ zil_replay(objset_t *os, void *arg,
zilog->zl_replay = B_TRUE;
zilog->zl_replay_time = ddi_get_lbolt();
- ASSERT(zilog->zl_replay_blks == 0);
+ ASSERT0(zilog->zl_replay_blks);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg, B_TRUE);
vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
diff --git a/sys/contrib/openzfs/module/zfs/zio.c b/sys/contrib/openzfs/module/zfs/zio.c
index 218aec6093e2..3f0ddb63249d 100644
--- a/sys/contrib/openzfs/module/zfs/zio.c
+++ b/sys/contrib/openzfs/module/zfs/zio.c
@@ -339,8 +339,8 @@ zio_fini(void)
}
for (size_t i = 0; i < n; i++) {
- VERIFY3P(zio_buf_cache[i], ==, NULL);
- VERIFY3P(zio_data_buf_cache[i], ==, NULL);
+ VERIFY0P(zio_buf_cache[i]);
+ VERIFY0P(zio_data_buf_cache[i]);
}
if (zio_ksp != NULL) {
@@ -771,7 +771,7 @@ zio_add_child_impl(zio_t *pio, zio_t *cio, boolean_t first)
else
mutex_enter(&cio->io_lock);
- ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
+ ASSERT0(pio->io_state[ZIO_WAIT_DONE]);
uint64_t *countp = pio->io_children[cio->io_child_type];
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
@@ -821,7 +821,7 @@ zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
boolean_t waiting = B_FALSE;
mutex_enter(&zio->io_lock);
- ASSERT(zio->io_stall == NULL);
+ ASSERT0P(zio->io_stall);
for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
continue;
@@ -955,8 +955,8 @@ zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
zio_t *zio;
IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
- ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
- ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
+ ASSERT0(P2PHASE(psize, SPA_MINBLOCKSIZE));
+ ASSERT0(P2PHASE(offset, SPA_MINBLOCKSIZE));
ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
@@ -1451,7 +1451,7 @@ zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
metaslab_check_free(spa, bp);
bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
} else {
- VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
+ VERIFY0P(zio_free_sync(NULL, spa, txg, bp, 0));
}
}
@@ -1559,7 +1559,7 @@ zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
{
zio_t *zio;
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
@@ -1580,7 +1580,7 @@ zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
{
zio_t *zio;
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
@@ -1747,7 +1747,7 @@ zio_flush(zio_t *pio, vdev_t *vd)
void
zio_shrink(zio_t *zio, uint64_t size)
{
- ASSERT3P(zio->io_executor, ==, NULL);
+ ASSERT0P(zio->io_executor);
ASSERT3U(zio->io_orig_size, ==, zio->io_size);
ASSERT3U(size, <=, zio->io_size);
@@ -1941,7 +1941,7 @@ zio_write_compress(zio_t *zio)
}
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
- ASSERT(zio->io_bp_override == NULL);
+ ASSERT0P(zio->io_bp_override);
if (!BP_IS_HOLE(bp) && BP_GET_BIRTH(bp) == zio->io_txg) {
/*
@@ -2436,7 +2436,7 @@ __zio_execute(zio_t *zio)
ASSERT(!MUTEX_HELD(&zio->io_lock));
ASSERT(ISP2(stage));
- ASSERT(zio->io_stall == NULL);
+ ASSERT0P(zio->io_stall);
do {
stage <<= 1;
@@ -2509,7 +2509,7 @@ zio_wait(zio_t *zio)
int error;
ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
- ASSERT3P(zio->io_executor, ==, NULL);
+ ASSERT0P(zio->io_executor);
zio->io_waiter = curthread;
ASSERT0(zio->io_queued_timestamp);
@@ -2551,7 +2551,7 @@ zio_nowait(zio_t *zio)
if (zio == NULL)
return;
- ASSERT3P(zio->io_executor, ==, NULL);
+ ASSERT0P(zio->io_executor);
if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
list_is_empty(&zio->io_parent_list)) {
@@ -2590,8 +2590,8 @@ zio_reexecute(void *arg)
ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
- ASSERT(pio->io_gang_leader == NULL);
- ASSERT(pio->io_gang_tree == NULL);
+ ASSERT0P(pio->io_gang_leader);
+ ASSERT0P(pio->io_gang_tree);
mutex_enter(&pio->io_lock);
pio->io_flags = pio->io_orig_flags;
@@ -2689,7 +2689,7 @@ zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
ASSERT(zio != spa->spa_suspend_zio_root);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
- ASSERT(zio_unique_parent(zio) == NULL);
+ ASSERT0P(zio_unique_parent(zio));
ASSERT(zio->io_stage == ZIO_STAGE_DONE);
zio_add_child(spa->spa_suspend_zio_root, zio);
}
@@ -2908,7 +2908,7 @@ zio_gang_node_alloc(zio_gang_node_t **gnpp, uint64_t gangblocksize)
{
zio_gang_node_t *gn;
- ASSERT(*gnpp == NULL);
+ ASSERT0P(*gnpp);
gn = kmem_zalloc(sizeof (*gn) +
(gbh_nblkptrs(gangblocksize) * sizeof (gn)), KM_SLEEP);
@@ -2925,7 +2925,7 @@ zio_gang_node_free(zio_gang_node_t **gnpp)
zio_gang_node_t *gn = *gnpp;
for (int g = 0; g < gbh_nblkptrs(gn->gn_allocsize); g++)
- ASSERT(gn->gn_child[g] == NULL);
+ ASSERT0P(gn->gn_child[g]);
zio_buf_free(gn->gn_gbh, gn->gn_allocsize);
kmem_free(gn, sizeof (*gn) +
@@ -3362,11 +3362,11 @@ zio_nop_write(zio_t *zio)
zio_prop_t *zp = &zio->io_prop;
ASSERT(BP_IS_HOLE(bp));
- ASSERT(BP_GET_LEVEL(bp) == 0);
+ ASSERT0(BP_GET_LEVEL(bp));
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(zp->zp_nopwrite);
ASSERT(!zp->zp_dedup);
- ASSERT(zio->io_bp_override == NULL);
+ ASSERT0P(zio->io_bp_override);
ASSERT(IO_IS_ALLOCATING(zio));
/*
@@ -3495,7 +3495,7 @@ zio_ddt_read_start(zio_t *zio)
ddt_univ_phys_t *ddp = dde->dde_phys;
blkptr_t blk;
- ASSERT(zio->io_vsd == NULL);
+ ASSERT0P(zio->io_vsd);
zio->io_vsd = dde;
if (v_self == DDT_PHYS_NONE)
@@ -3560,7 +3560,7 @@ zio_ddt_read_done(zio_t *zio)
zio->io_vsd = NULL;
}
- ASSERT(zio->io_vsd == NULL);
+ ASSERT0P(zio->io_vsd);
return (zio);
}
@@ -4415,7 +4415,7 @@ static void
zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
{
ASSERT(BP_GET_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp));
- ASSERT(zio->io_bp_override == NULL);
+ ASSERT0P(zio->io_bp_override);
if (!BP_IS_HOLE(bp)) {
metaslab_free(zio->io_spa, bp, BP_GET_BIRTH(bp), B_TRUE);
@@ -4559,8 +4559,8 @@ zio_vdev_io_start(zio_t *zio)
zio->io_delay = 0;
- ASSERT(zio->io_error == 0);
- ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
+ ASSERT0(zio->io_error);
+ ASSERT0(zio->io_child_error[ZIO_CHILD_VDEV]);
if (vd == NULL) {
if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
@@ -4751,7 +4751,7 @@ zio_vdev_io_done(zio_t *zio)
ops->vdev_op_io_done(zio);
if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
- VERIFY(vdev_probe(vd, zio) == NULL);
+ VERIFY0P(vdev_probe(vd, zio));
return (zio);
}
@@ -4903,7 +4903,7 @@ void
zio_vdev_io_reissue(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
- ASSERT(zio->io_error == 0);
+ ASSERT0(zio->io_error);
zio->io_stage >>= 1;
}
@@ -4920,7 +4920,7 @@ void
zio_vdev_io_bypass(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
- ASSERT(zio->io_error == 0);
+ ASSERT0(zio->io_error);
zio->io_flags |= ZIO_FLAG_IO_BYPASS;
zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
@@ -5298,7 +5298,7 @@ zio_ready(zio_t *zio)
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(BP_GET_BIRTH(bp) == zio->io_txg ||
BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE));
- ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
+ ASSERT0(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY]);
zio->io_ready(zio);
}
@@ -5448,7 +5448,7 @@ zio_done(zio_t *zio)
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
- ASSERT(zio->io_children[c][w] == 0);
+ ASSERT0(zio->io_children[c][w]);
if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
diff --git a/sys/contrib/openzfs/module/zfs/zio_checksum.c b/sys/contrib/openzfs/module/zfs/zio_checksum.c
index 63d0c6dadd46..1d0646a61185 100644
--- a/sys/contrib/openzfs/module/zfs/zio_checksum.c
+++ b/sys/contrib/openzfs/module/zfs/zio_checksum.c
@@ -215,7 +215,7 @@ zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
spa_feature_t
zio_checksum_to_feature(enum zio_checksum cksum)
{
- VERIFY((cksum & ~ZIO_CHECKSUM_MASK) == 0);
+ VERIFY0((cksum & ~ZIO_CHECKSUM_MASK));
switch (cksum) {
case ZIO_CHECKSUM_BLAKE3:
diff --git a/sys/contrib/openzfs/module/zfs/zio_compress.c b/sys/contrib/openzfs/module/zfs/zio_compress.c
index 9f0ac1b63146..89ceeb58ad91 100644
--- a/sys/contrib/openzfs/module/zfs/zio_compress.c
+++ b/sys/contrib/openzfs/module/zfs/zio_compress.c
@@ -38,12 +38,6 @@
#include <sys/zstd/zstd.h>
/*
- * If nonzero, every 1/X decompression attempts will fail, simulating
- * an undetected memory error.
- */
-static unsigned long zio_decompress_fail_fraction = 0;
-
-/*
* Compression vectors.
*/
zio_compress_info_t zio_compress_table[ZIO_COMPRESS_FUNCTIONS] = {
@@ -171,15 +165,6 @@ zio_decompress_data(enum zio_compress c, abd_t *src, abd_t *dst,
else
err = ci->ci_decompress(src, dst, s_len, d_len, ci->ci_level);
- /*
- * Decompression shouldn't fail, because we've already verified
- * the checksum. However, for extra protection (e.g. against bitflips
- * in non-ECC RAM), we handle this error (and test it).
- */
- if (zio_decompress_fail_fraction != 0 &&
- random_in_range(zio_decompress_fail_fraction) == 0)
- err = SET_ERROR(EINVAL);
-
return (err);
}
diff --git a/sys/contrib/openzfs/module/zfs/zio_inject.c b/sys/contrib/openzfs/module/zfs/zio_inject.c
index df7b01ba879e..981a1be4847c 100644
--- a/sys/contrib/openzfs/module/zfs/zio_inject.c
+++ b/sys/contrib/openzfs/module/zfs/zio_inject.c
@@ -1119,7 +1119,7 @@ zio_clear_fault(int id)
kmem_free(handler->zi_lanes, sizeof (*handler->zi_lanes) *
handler->zi_record.zi_nlanes);
} else {
- ASSERT3P(handler->zi_lanes, ==, NULL);
+ ASSERT0P(handler->zi_lanes);
}
if (handler->zi_spa_name != NULL)
diff --git a/sys/contrib/openzfs/module/zfs/zrlock.c b/sys/contrib/openzfs/module/zfs/zrlock.c
index 3c0f1b7bbbc1..09c110945c97 100644
--- a/sys/contrib/openzfs/module/zfs/zrlock.c
+++ b/sys/contrib/openzfs/module/zfs/zrlock.c
@@ -129,7 +129,7 @@ zrl_tryenter(zrlock_t *zrl)
(uint32_t *)&zrl->zr_refcount, 0, ZRL_LOCKED);
if (cas == 0) {
#ifdef ZFS_DEBUG
- ASSERT3P(zrl->zr_owner, ==, NULL);
+ ASSERT0P(zrl->zr_owner);
zrl->zr_owner = curthread;
#endif
return (1);
diff --git a/sys/contrib/openzfs/module/zfs/zthr.c b/sys/contrib/openzfs/module/zfs/zthr.c
index 597a510528ea..d245ce4946e0 100644
--- a/sys/contrib/openzfs/module/zfs/zthr.c
+++ b/sys/contrib/openzfs/module/zfs/zthr.c
@@ -316,7 +316,7 @@ zthr_destroy(zthr_t *t)
{
ASSERT(!MUTEX_HELD(&t->zthr_state_lock));
ASSERT(!MUTEX_HELD(&t->zthr_request_lock));
- VERIFY3P(t->zthr_thread, ==, NULL);
+ VERIFY0P(t->zthr_thread);
mutex_destroy(&t->zthr_request_lock);
mutex_destroy(&t->zthr_state_lock);
cv_destroy(&t->zthr_cv);
diff --git a/sys/contrib/openzfs/module/zfs/zvol.c b/sys/contrib/openzfs/module/zfs/zvol.c
index 7e264f308cf2..29f51e230a37 100644
--- a/sys/contrib/openzfs/module/zfs/zvol.c
+++ b/sys/contrib/openzfs/module/zfs/zvol.c
@@ -215,8 +215,8 @@ zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
int error;
uint64_t volblocksize, volsize;
- VERIFY(nvlist_lookup_uint64(nvprops,
- zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
+ VERIFY0(nvlist_lookup_uint64(nvprops,
+ zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize));
if (nvlist_lookup_uint64(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
@@ -225,21 +225,20 @@ zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
* These properties must be removed from the list so the generic
* property setting step won't apply to them.
*/
- VERIFY(nvlist_remove_all(nvprops,
- zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
+ VERIFY0(nvlist_remove_all(nvprops, zfs_prop_to_name(ZFS_PROP_VOLSIZE)));
(void) nvlist_remove_all(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
DMU_OT_NONE, 0, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
DMU_OT_NONE, 0, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
- ASSERT(error == 0);
+ ASSERT0(error);
}
/*
@@ -254,7 +253,7 @@ zvol_get_stats(objset_t *os, nvlist_t *nv)
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
if (error)
- return (SET_ERROR(error));
+ return (error);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
@@ -267,7 +266,7 @@ zvol_get_stats(objset_t *os, nvlist_t *nv)
kmem_free(doi, sizeof (dmu_object_info_t));
- return (SET_ERROR(error));
+ return (error);
}
/*
@@ -305,7 +304,7 @@ zvol_update_volsize(uint64_t volsize, objset_t *os)
error = dmu_tx_assign(tx, DMU_TX_WAIT);
if (error) {
dmu_tx_abort(tx);
- return (SET_ERROR(error));
+ return (error);
}
txg = dmu_tx_get_txg(tx);
@@ -337,7 +336,7 @@ zvol_set_volsize(const char *name, uint64_t volsize)
error = dsl_prop_get_integer(name,
zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
if (error != 0)
- return (SET_ERROR(error));
+ return (error);
if (readonly)
return (SET_ERROR(EROFS));
@@ -353,7 +352,7 @@ zvol_set_volsize(const char *name, uint64_t volsize)
FTAG, &os)) != 0) {
if (zv != NULL)
mutex_exit(&zv->zv_state_lock);
- return (SET_ERROR(error));
+ return (error);
}
owned = B_TRUE;
if (zv != NULL)
@@ -390,7 +389,7 @@ out:
if (error == 0 && zv != NULL)
zvol_os_update_volsize(zv, volsize);
- return (SET_ERROR(error));
+ return (error);
}
/*
@@ -401,7 +400,7 @@ zvol_set_volthreading(const char *name, boolean_t value)
{
zvol_state_t *zv = zvol_find_by_name(name, RW_NONE);
if (zv == NULL)
- return (ENOENT);
+ return (SET_ERROR(ENOENT));
zv->zv_threading = value;
mutex_exit(&zv->zv_state_lock);
return (0);
@@ -450,8 +449,10 @@ zvol_check_volblocksize(const char *name, uint64_t volblocksize)
* We don't allow setting the property above 1MB,
* unless the tunable has been changed.
*/
- if (volblocksize > zfs_max_recordsize)
+ if (volblocksize > zfs_max_recordsize) {
+ spa_close(spa, FTAG);
return (SET_ERROR(EDOM));
+ }
spa_close(spa, FTAG);
}
@@ -618,7 +619,7 @@ zvol_clone_range(zvol_state_t *zv_src, uint64_t inoff, zvol_state_t *zv_dst,
dmu_tx_t *tx;
blkptr_t *bps;
size_t maxblocks;
- int error = EINVAL;
+ int error = 0;
rw_enter(&zv_dst->zv_suspend_lock, RW_READER);
if (zv_dst->zv_zilog == NULL) {
@@ -644,23 +645,22 @@ zvol_clone_range(zvol_state_t *zv_src, uint64_t inoff, zvol_state_t *zv_dst,
*/
if (!spa_feature_is_enabled(dmu_objset_spa(outos),
SPA_FEATURE_BLOCK_CLONING)) {
- error = EOPNOTSUPP;
+ error = SET_ERROR(EOPNOTSUPP);
goto out;
}
if (dmu_objset_spa(inos) != dmu_objset_spa(outos)) {
- error = EXDEV;
+ error = SET_ERROR(EXDEV);
goto out;
}
if (inos->os_encrypted != outos->os_encrypted) {
- error = EXDEV;
+ error = SET_ERROR(EXDEV);
goto out;
}
if (zv_src->zv_volblocksize != zv_dst->zv_volblocksize) {
- error = EINVAL;
+ error = SET_ERROR(EINVAL);
goto out;
}
if (inoff >= zv_src->zv_volsize || outoff >= zv_dst->zv_volsize) {
- error = 0;
goto out;
}
@@ -671,17 +671,15 @@ zvol_clone_range(zvol_state_t *zv_src, uint64_t inoff, zvol_state_t *zv_dst,
len = zv_src->zv_volsize - inoff;
if (len > zv_dst->zv_volsize - outoff)
len = zv_dst->zv_volsize - outoff;
- if (len == 0) {
- error = 0;
+ if (len == 0)
goto out;
- }
/*
* No overlapping if we are cloning within the same file
*/
if (zv_src == zv_dst) {
if (inoff < outoff + len && outoff < inoff + len) {
- error = EINVAL;
+ error = SET_ERROR(EINVAL);
goto out;
}
}
@@ -691,7 +689,7 @@ zvol_clone_range(zvol_state_t *zv_src, uint64_t inoff, zvol_state_t *zv_dst,
*/
if ((inoff % zv_src->zv_volblocksize) != 0 ||
(outoff % zv_dst->zv_volblocksize) != 0) {
- error = EINVAL;
+ error = SET_ERROR(EINVAL);
goto out;
}
@@ -699,7 +697,7 @@ zvol_clone_range(zvol_state_t *zv_src, uint64_t inoff, zvol_state_t *zv_dst,
* Length must be multiple of block size
*/
if ((len % zv_src->zv_volblocksize) != 0) {
- error = EINVAL;
+ error = SET_ERROR(EINVAL);
goto out;
}
@@ -771,13 +769,13 @@ zvol_clone_range(zvol_state_t *zv_src, uint64_t inoff, zvol_state_t *zv_dst,
zfs_rangelock_exit(outlr);
zfs_rangelock_exit(inlr);
if (error == 0 && zv_dst->zv_objset->os_sync == ZFS_SYNC_ALWAYS) {
- zil_commit(zilog_dst, ZVOL_OBJ);
+ error = zil_commit(zilog_dst, ZVOL_OBJ);
}
out:
if (zv_src != zv_dst)
rw_exit(&zv_src->zv_suspend_lock);
rw_exit(&zv_dst->zv_suspend_lock);
- return (SET_ERROR(error));
+ return (error);
}
/*
@@ -897,7 +895,7 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
if (wr_state == WR_COPIED &&
dmu_read_by_dnode(zv->zv_dn, offset, len, lr + 1,
DMU_READ_NO_PREFETCH | DMU_KEEP_CACHING) != 0) {
- zil_itx_destroy(itx);
+ zil_itx_destroy(itx, 0);
itx = zil_itx_create(TX_WRITE, sizeof (*lr));
lr = (lr_write_t *)&itx->itx_lr;
wr_state = WR_NEED_COPY;
@@ -916,7 +914,7 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
itx->itx_private = zv;
- (void) zil_itx_assign(zilog, itx, tx);
+ zil_itx_assign(zilog, itx, tx);
offset += len;
size -= len;
@@ -1026,7 +1024,7 @@ zvol_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf,
zvol_get_done(zgd, error);
- return (SET_ERROR(error));
+ return (error);
}
/*
@@ -1071,15 +1069,15 @@ zvol_setup_zv(zvol_state_t *zv)
error = dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL);
if (error)
- return (SET_ERROR(error));
+ return (error);
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
- return (SET_ERROR(error));
+ return (error);
error = dnode_hold(os, ZVOL_OBJ, zv, &zv->zv_dn);
if (error)
- return (SET_ERROR(error));
+ return (error);
zvol_os_set_capacity(zv, volsize >> 9);
zv->zv_volsize = volsize;
@@ -1121,7 +1119,7 @@ zvol_shutdown_zv(zvol_state_t *zv)
*/
if (zv->zv_flags & ZVOL_WRITTEN_TO)
txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
- (void) dmu_objset_evict_dbufs(zv->zv_objset);
+ dmu_objset_evict_dbufs(zv->zv_objset);
}
/*
@@ -1198,7 +1196,7 @@ zvol_resume(zvol_state_t *zv)
if (zv->zv_flags & ZVOL_REMOVING)
cv_broadcast(&zv->zv_removing_cv);
- return (SET_ERROR(error));
+ return (error);
}
int
@@ -1214,7 +1212,7 @@ zvol_first_open(zvol_state_t *zv, boolean_t readonly)
boolean_t ro = (readonly || (strchr(zv->zv_name, '@') != NULL));
error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, ro, B_TRUE, zv, &os);
if (error)
- return (SET_ERROR(error));
+ return (error);
zv->zv_objset = os;
@@ -1440,41 +1438,32 @@ zvol_task_update_status(zvol_task_t *task, uint64_t total, uint64_t done,
}
}
-static const char *
-zvol_task_op_msg(zvol_async_op_t op)
-{
- switch (op) {
- case ZVOL_ASYNC_CREATE_MINORS:
- return ("create");
- case ZVOL_ASYNC_REMOVE_MINORS:
- return ("remove");
- case ZVOL_ASYNC_RENAME_MINORS:
- return ("rename");
- case ZVOL_ASYNC_SET_SNAPDEV:
- case ZVOL_ASYNC_SET_VOLMODE:
- return ("set property");
- default:
- return ("unknown");
- }
-
- __builtin_unreachable();
- return (NULL);
-}
-
static void
zvol_task_report_status(zvol_task_t *task)
{
+#ifdef ZFS_DEBUG
+ static const char *const msg[] = {
+ "create",
+ "remove",
+ "rename",
+ "set snapdev",
+ "set volmode",
+ "unknown",
+ };
if (task->zt_status == 0)
return;
+ zvol_async_op_t op = MIN(task->zt_op, ZVOL_ASYNC_MAX);
if (task->zt_error) {
dprintf("The %s minors zvol task was not ok, last error %d\n",
- zvol_task_op_msg(task->zt_op), task->zt_error);
+ msg[op], task->zt_error);
} else {
- dprintf("The %s minors zvol task was not ok\n",
- zvol_task_op_msg(task->zt_op));
+ dprintf("The %s minors zvol task was not ok\n", msg[op]);
}
+#else
+ (void) task;
+#endif
}
/*
@@ -1733,7 +1722,7 @@ zvol_remove_minor_impl(const char *name)
if (zv == NULL) {
rw_exit(&zvol_state_lock);
- return (ENOENT);
+ return (SET_ERROR(ENOENT));
}
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -2212,7 +2201,7 @@ zvol_fini_impl(void)
rw_destroy(&zvol_state_lock);
if (ztqs->tqs_taskq == NULL) {
- ASSERT3U(ztqs->tqs_cnt, ==, 0);
+ ASSERT0(ztqs->tqs_cnt);
} else {
for (uint_t i = 0; i < ztqs->tqs_cnt; i++) {
ASSERT3P(ztqs->tqs_taskq[i], !=, NULL);