aboutsummaryrefslogtreecommitdiff
path: root/sys/contrib/openzfs/module
diff options
context:
space:
mode:
Diffstat (limited to 'sys/contrib/openzfs/module')
-rw-r--r--sys/contrib/openzfs/module/Kbuild.in3
-rw-r--r--sys/contrib/openzfs/module/icp/algs/modes/gcm.c371
-rw-r--r--sys/contrib/openzfs/module/icp/algs/modes/modes.c2
-rw-r--r--sys/contrib/openzfs/module/icp/algs/sha2/sha256_impl.c3
-rw-r--r--sys/contrib/openzfs/module/icp/algs/sha2/sha512_impl.c3
-rw-r--r--sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl253
-rw-r--r--sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl.descrip1
-rw-r--r--sys/contrib/openzfs/module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S1323
-rw-r--r--sys/contrib/openzfs/module/icp/include/modes/modes.h13
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c7
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c244
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c2
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c44
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c3
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c31
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode_os.c6
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c210
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_file_os.c3
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c55
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c153
-rw-r--r--sys/contrib/openzfs/module/zcommon/simd_stat.c4
-rw-r--r--sys/contrib/openzfs/module/zcommon/zfs_deleg.c1
-rw-r--r--sys/contrib/openzfs/module/zcommon/zpool_prop.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/arc.c32
-rw-r--r--sys/contrib/openzfs/module/zfs/dbuf.c28
-rw-r--r--sys/contrib/openzfs/module/zfs/ddt.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/ddt_log.c30
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_objset.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_zfetch.c10
-rw-r--r--sys/contrib/openzfs/module/zfs/dnode.c103
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_deadlist.c3
-rw-r--r--sys/contrib/openzfs/module/zfs/multilist.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_config.c115
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_misc.c14
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev.c119
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_draid.c28
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_file.c3
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_queue.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_raidz.c345
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_removal.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/zfeature.c5
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_crrd.c7
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_ioctl.c23
-rw-r--r--sys/contrib/openzfs/module/zfs/zil.c349
-rw-r--r--sys/contrib/openzfs/module/zfs/zio.c51
-rw-r--r--sys/contrib/openzfs/module/zfs/zvol.c304
-rw-r--r--sys/contrib/openzfs/module/zstd/zfs_zstd.c8
48 files changed, 3230 insertions, 1118 deletions
diff --git a/sys/contrib/openzfs/module/Kbuild.in b/sys/contrib/openzfs/module/Kbuild.in
index 3d6f288fa5da..58a80dc4402c 100644
--- a/sys/contrib/openzfs/module/Kbuild.in
+++ b/sys/contrib/openzfs/module/Kbuild.in
@@ -4,7 +4,7 @@
ZFS_MODULE_CFLAGS += -std=gnu99 -Wno-declaration-after-statement
ZFS_MODULE_CFLAGS += -Wmissing-prototypes
-ZFS_MODULE_CFLAGS += @KERNEL_DEBUG_CFLAGS@ @NO_FORMAT_ZERO_LENGTH@
+ZFS_MODULE_CFLAGS += @KERNEL_DEBUG_CFLAGS@ @KERNEL_NO_FORMAT_ZERO_LENGTH@
ifneq ($(KBUILD_EXTMOD),)
zfs_include = @abs_top_srcdir@/include
@@ -135,6 +135,7 @@ ICP_OBJS_X86_64 := \
asm-x86_64/sha2/sha256-x86_64.o \
asm-x86_64/sha2/sha512-x86_64.o \
asm-x86_64/modes/aesni-gcm-x86_64.o \
+ asm-x86_64/modes/aesni-gcm-avx2-vaes.o \
asm-x86_64/modes/gcm_pclmulqdq.o \
asm-x86_64/modes/ghash-x86_64.o
diff --git a/sys/contrib/openzfs/module/icp/algs/modes/gcm.c b/sys/contrib/openzfs/module/icp/algs/modes/gcm.c
index c2a982b5a376..3cfa5b8165ce 100644
--- a/sys/contrib/openzfs/module/icp/algs/modes/gcm.c
+++ b/sys/contrib/openzfs/module/icp/algs/modes/gcm.c
@@ -46,6 +46,9 @@
#define IMPL_CYCLE (UINT32_MAX-1)
#ifdef CAN_USE_GCM_ASM
#define IMPL_AVX (UINT32_MAX-2)
+#if CAN_USE_GCM_ASM >= 2
+#define IMPL_AVX2 (UINT32_MAX-3)
+#endif
#endif
#define GCM_IMPL_READ(i) (*(volatile uint32_t *) &(i))
static uint32_t icp_gcm_impl = IMPL_FASTEST;
@@ -56,17 +59,16 @@ static uint32_t user_sel_impl = IMPL_FASTEST;
boolean_t gcm_avx_can_use_movbe = B_FALSE;
/*
* Whether to use the optimized openssl gcm and ghash implementations.
- * Set to true if module parameter icp_gcm_impl == "avx".
*/
-static boolean_t gcm_use_avx = B_FALSE;
-#define GCM_IMPL_USE_AVX (*(volatile boolean_t *)&gcm_use_avx)
+static gcm_impl gcm_impl_used = GCM_IMPL_GENERIC;
+#define GCM_IMPL_USED (*(volatile gcm_impl *)&gcm_impl_used)
extern boolean_t ASMABI atomic_toggle_boolean_nv(volatile boolean_t *);
static inline boolean_t gcm_avx_will_work(void);
-static inline void gcm_set_avx(boolean_t);
-static inline boolean_t gcm_toggle_avx(void);
-static inline size_t gcm_simd_get_htab_size(boolean_t);
+static inline boolean_t gcm_avx2_will_work(void);
+static inline void gcm_use_impl(gcm_impl impl);
+static inline gcm_impl gcm_toggle_impl(void);
static int gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *, char *, size_t,
crypto_data_t *, size_t);
@@ -89,7 +91,7 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
void (*xor_block)(uint8_t *, uint8_t *))
{
#ifdef CAN_USE_GCM_ASM
- if (ctx->gcm_use_avx == B_TRUE)
+ if (ctx->impl != GCM_IMPL_GENERIC)
return (gcm_mode_encrypt_contiguous_blocks_avx(
ctx, data, length, out, block_size));
#endif
@@ -208,7 +210,7 @@ gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
{
(void) copy_block;
#ifdef CAN_USE_GCM_ASM
- if (ctx->gcm_use_avx == B_TRUE)
+ if (ctx->impl != GCM_IMPL_GENERIC)
return (gcm_encrypt_final_avx(ctx, out, block_size));
#endif
@@ -374,7 +376,7 @@ gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
void (*xor_block)(uint8_t *, uint8_t *))
{
#ifdef CAN_USE_GCM_ASM
- if (ctx->gcm_use_avx == B_TRUE)
+ if (ctx->impl != GCM_IMPL_GENERIC)
return (gcm_decrypt_final_avx(ctx, out, block_size));
#endif
@@ -631,23 +633,23 @@ gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param,
((aes_key_t *)gcm_ctx->gcm_keysched)->ops->needs_byteswap;
if (GCM_IMPL_READ(icp_gcm_impl) != IMPL_CYCLE) {
- gcm_ctx->gcm_use_avx = GCM_IMPL_USE_AVX;
+ gcm_ctx->impl = GCM_IMPL_USED;
} else {
/*
- * Handle the "cycle" implementation by creating avx and
- * non-avx contexts alternately.
+ * Handle the "cycle" implementation by creating different
+ * contexts, one per implementation.
*/
- gcm_ctx->gcm_use_avx = gcm_toggle_avx();
+ gcm_ctx->impl = gcm_toggle_impl();
- /* The avx impl. doesn't handle byte swapped key schedules. */
- if (gcm_ctx->gcm_use_avx == B_TRUE && needs_bswap == B_TRUE) {
- gcm_ctx->gcm_use_avx = B_FALSE;
+ /* The AVX impl. doesn't handle byte swapped key schedules. */
+ if (needs_bswap == B_TRUE) {
+ gcm_ctx->impl = GCM_IMPL_GENERIC;
}
/*
- * If this is a GCM context, use the MOVBE and the BSWAP
+ * If this is an AVX context, use the MOVBE and the BSWAP
* variants alternately.
*/
- if (gcm_ctx->gcm_use_avx == B_TRUE &&
+ if (gcm_ctx->impl == GCM_IMPL_AVX &&
zfs_movbe_available() == B_TRUE) {
(void) atomic_toggle_boolean_nv(
(volatile boolean_t *)&gcm_avx_can_use_movbe);
@@ -658,12 +660,13 @@ gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param,
* still they could be created by the aes generic implementation.
* Make sure not to use them since we'll corrupt data if we do.
*/
- if (gcm_ctx->gcm_use_avx == B_TRUE && needs_bswap == B_TRUE) {
- gcm_ctx->gcm_use_avx = B_FALSE;
+ if (gcm_ctx->impl != GCM_IMPL_GENERIC && needs_bswap == B_TRUE) {
+ gcm_ctx->impl = GCM_IMPL_GENERIC;
cmn_err_once(CE_WARN,
"ICP: Can't use the aes generic or cycle implementations "
- "in combination with the gcm avx implementation!");
+ "in combination with the gcm avx or avx2-vaes "
+ "implementation!");
cmn_err_once(CE_WARN,
"ICP: Falling back to a compatible implementation, "
"aes-gcm performance will likely be degraded.");
@@ -672,36 +675,20 @@ gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param,
"restore performance.");
}
- /* Allocate Htab memory as needed. */
- if (gcm_ctx->gcm_use_avx == B_TRUE) {
- size_t htab_len = gcm_simd_get_htab_size(gcm_ctx->gcm_use_avx);
-
- if (htab_len == 0) {
- return (CRYPTO_MECHANISM_PARAM_INVALID);
- }
- gcm_ctx->gcm_htab_len = htab_len;
- gcm_ctx->gcm_Htable =
- kmem_alloc(htab_len, KM_SLEEP);
-
- if (gcm_ctx->gcm_Htable == NULL) {
- return (CRYPTO_HOST_MEMORY);
- }
+ /*
+ * AVX implementations use Htable with sizes depending on
+ * implementation.
+ */
+ if (gcm_ctx->impl != GCM_IMPL_GENERIC) {
+ rv = gcm_init_avx(gcm_ctx, iv, iv_len, aad, aad_len,
+ block_size);
}
- /* Avx and non avx context initialization differs from here on. */
- if (gcm_ctx->gcm_use_avx == B_FALSE) {
+ else
#endif /* ifdef CAN_USE_GCM_ASM */
- if (gcm_init(gcm_ctx, iv, iv_len, aad, aad_len, block_size,
- encrypt_block, copy_block, xor_block) != CRYPTO_SUCCESS) {
- rv = CRYPTO_MECHANISM_PARAM_INVALID;
- }
-#ifdef CAN_USE_GCM_ASM
- } else {
- if (gcm_init_avx(gcm_ctx, iv, iv_len, aad, aad_len,
- block_size) != CRYPTO_SUCCESS) {
- rv = CRYPTO_MECHANISM_PARAM_INVALID;
- }
+ if (gcm_init(gcm_ctx, iv, iv_len, aad, aad_len, block_size,
+ encrypt_block, copy_block, xor_block) != CRYPTO_SUCCESS) {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
}
-#endif /* ifdef CAN_USE_GCM_ASM */
return (rv);
}
@@ -767,6 +754,9 @@ gcm_impl_get_ops(void)
break;
#ifdef CAN_USE_GCM_ASM
case IMPL_AVX:
+#if CAN_USE_GCM_ASM >= 2
+ case IMPL_AVX2:
+#endif
/*
* Make sure that we return a valid implementation while
* switching to the avx implementation since there still
@@ -828,6 +818,13 @@ gcm_impl_init(void)
* Use the avx implementation if it's available and the implementation
* hasn't changed from its default value of fastest on module load.
*/
+#if CAN_USE_GCM_ASM >= 2
+ if (gcm_avx2_will_work()) {
+ if (GCM_IMPL_READ(user_sel_impl) == IMPL_FASTEST) {
+ gcm_use_impl(GCM_IMPL_AVX2);
+ }
+ } else
+#endif
if (gcm_avx_will_work()) {
#ifdef HAVE_MOVBE
if (zfs_movbe_available() == B_TRUE) {
@@ -835,7 +832,7 @@ gcm_impl_init(void)
}
#endif
if (GCM_IMPL_READ(user_sel_impl) == IMPL_FASTEST) {
- gcm_set_avx(B_TRUE);
+ gcm_use_impl(GCM_IMPL_AVX);
}
}
#endif
@@ -852,6 +849,7 @@ static const struct {
{ "fastest", IMPL_FASTEST },
#ifdef CAN_USE_GCM_ASM
{ "avx", IMPL_AVX },
+ { "avx2-vaes", IMPL_AVX2 },
#endif
};
@@ -887,7 +885,13 @@ gcm_impl_set(const char *val)
/* Check mandatory options */
for (i = 0; i < ARRAY_SIZE(gcm_impl_opts); i++) {
#ifdef CAN_USE_GCM_ASM
+#if CAN_USE_GCM_ASM >= 2
/* Ignore avx implementation if it won't work. */
+ if (gcm_impl_opts[i].sel == IMPL_AVX2 &&
+ !gcm_avx2_will_work()) {
+ continue;
+ }
+#endif
if (gcm_impl_opts[i].sel == IMPL_AVX && !gcm_avx_will_work()) {
continue;
}
@@ -915,11 +919,17 @@ gcm_impl_set(const char *val)
* Use the avx implementation if available and the requested one is
* avx or fastest.
*/
+#if CAN_USE_GCM_ASM >= 2
+ if (gcm_avx2_will_work() == B_TRUE &&
+ (impl == IMPL_AVX2 || impl == IMPL_FASTEST)) {
+ gcm_use_impl(GCM_IMPL_AVX2);
+ } else
+#endif
if (gcm_avx_will_work() == B_TRUE &&
(impl == IMPL_AVX || impl == IMPL_FASTEST)) {
- gcm_set_avx(B_TRUE);
+ gcm_use_impl(GCM_IMPL_AVX);
} else {
- gcm_set_avx(B_FALSE);
+ gcm_use_impl(GCM_IMPL_GENERIC);
}
#endif
@@ -952,6 +962,12 @@ icp_gcm_impl_get(char *buffer, zfs_kernel_param_t *kp)
for (i = 0; i < ARRAY_SIZE(gcm_impl_opts); i++) {
#ifdef CAN_USE_GCM_ASM
/* Ignore avx implementation if it won't work. */
+#if CAN_USE_GCM_ASM >= 2
+ if (gcm_impl_opts[i].sel == IMPL_AVX2 &&
+ !gcm_avx2_will_work()) {
+ continue;
+ }
+#endif
if (gcm_impl_opts[i].sel == IMPL_AVX && !gcm_avx_will_work()) {
continue;
}
@@ -993,9 +1009,6 @@ MODULE_PARM_DESC(icp_gcm_impl, "Select gcm implementation.");
/* Clear the FPU registers since they hold sensitive internal state. */
#define clear_fpu_regs() clear_fpu_regs_avx()
-#define GHASH_AVX(ctx, in, len) \
- gcm_ghash_avx((ctx)->gcm_ghash, (const uint64_t *)(ctx)->gcm_Htable, \
- in, len)
#define gcm_incr_counter_block(ctx) gcm_incr_counter_block_by(ctx, 1)
@@ -1010,20 +1023,77 @@ MODULE_PARM_DESC(icp_gcm_impl, "Select gcm implementation.");
static uint32_t gcm_avx_chunk_size =
((32 * 1024) / GCM_AVX_MIN_DECRYPT_BYTES) * GCM_AVX_MIN_DECRYPT_BYTES;
+/*
+ * GCM definitions: uint128_t is copied from include/crypto/modes.h
+ * Avoiding u128 because it is already defined in kernel sources.
+ */
+typedef struct {
+ uint64_t hi, lo;
+} uint128_t;
+
extern void ASMABI clear_fpu_regs_avx(void);
extern void ASMABI gcm_xor_avx(const uint8_t *src, uint8_t *dst);
extern void ASMABI aes_encrypt_intel(const uint32_t rk[], int nr,
const uint32_t pt[4], uint32_t ct[4]);
extern void ASMABI gcm_init_htab_avx(uint64_t *Htable, const uint64_t H[2]);
+#if CAN_USE_GCM_ASM >= 2
+extern void ASMABI gcm_init_vpclmulqdq_avx2(uint128_t Htable[16],
+ const uint64_t H[2]);
+#endif
extern void ASMABI gcm_ghash_avx(uint64_t ghash[2], const uint64_t *Htable,
const uint8_t *in, size_t len);
+#if CAN_USE_GCM_ASM >= 2
+extern void ASMABI gcm_ghash_vpclmulqdq_avx2(uint64_t ghash[2],
+ const uint64_t *Htable, const uint8_t *in, size_t len);
+#endif
+static inline void GHASH_AVX(gcm_ctx_t *ctx, const uint8_t *in, size_t len)
+{
+ switch (ctx->impl) {
+#if CAN_USE_GCM_ASM >= 2
+ case GCM_IMPL_AVX2:
+ gcm_ghash_vpclmulqdq_avx2(ctx->gcm_ghash,
+ (const uint64_t *)ctx->gcm_Htable, in, len);
+ break;
+#endif
+
+ case GCM_IMPL_AVX:
+ gcm_ghash_avx(ctx->gcm_ghash,
+ (const uint64_t *)ctx->gcm_Htable, in, len);
+ break;
+
+ default:
+ VERIFY(B_FALSE);
+ }
+}
+typedef size_t ASMABI aesni_gcm_encrypt_impl(const uint8_t *, uint8_t *,
+ size_t, const void *, uint64_t *, const uint64_t *Htable, uint64_t *);
extern size_t ASMABI aesni_gcm_encrypt(const uint8_t *, uint8_t *, size_t,
const void *, uint64_t *, uint64_t *);
+#if CAN_USE_GCM_ASM >= 2
+extern void ASMABI aes_gcm_enc_update_vaes_avx2(const uint8_t *in,
+ uint8_t *out, size_t len, const void *key, const uint8_t ivec[16],
+ const uint128_t Htable[16], uint8_t Xi[16]);
+#endif
+typedef size_t ASMABI aesni_gcm_decrypt_impl(const uint8_t *, uint8_t *,
+ size_t, const void *, uint64_t *, const uint64_t *Htable, uint64_t *);
extern size_t ASMABI aesni_gcm_decrypt(const uint8_t *, uint8_t *, size_t,
const void *, uint64_t *, uint64_t *);
+#if CAN_USE_GCM_ASM >= 2
+extern void ASMABI aes_gcm_dec_update_vaes_avx2(const uint8_t *in,
+ uint8_t *out, size_t len, const void *key, const uint8_t ivec[16],
+ const uint128_t Htable[16], uint8_t Xi[16]);
+#endif
+
+static inline boolean_t
+gcm_avx2_will_work(void)
+{
+ return (kfpu_allowed() &&
+ zfs_avx2_available() && zfs_vaes_available() &&
+ zfs_vpclmulqdq_available());
+}
static inline boolean_t
gcm_avx_will_work(void)
@@ -1035,33 +1105,67 @@ gcm_avx_will_work(void)
}
static inline void
-gcm_set_avx(boolean_t val)
+gcm_use_impl(gcm_impl impl)
{
- if (gcm_avx_will_work() == B_TRUE) {
- atomic_swap_32(&gcm_use_avx, val);
+ switch (impl) {
+#if CAN_USE_GCM_ASM >= 2
+ case GCM_IMPL_AVX2:
+ if (gcm_avx2_will_work() == B_TRUE) {
+ atomic_swap_32(&gcm_impl_used, impl);
+ return;
+ }
+
+ zfs_fallthrough;
+#endif
+
+ case GCM_IMPL_AVX:
+ if (gcm_avx_will_work() == B_TRUE) {
+ atomic_swap_32(&gcm_impl_used, impl);
+ return;
+ }
+
+ zfs_fallthrough;
+
+ default:
+ atomic_swap_32(&gcm_impl_used, GCM_IMPL_GENERIC);
}
}
static inline boolean_t
-gcm_toggle_avx(void)
+gcm_impl_will_work(gcm_impl impl)
{
- if (gcm_avx_will_work() == B_TRUE) {
- return (atomic_toggle_boolean_nv(&GCM_IMPL_USE_AVX));
- } else {
- return (B_FALSE);
+ switch (impl) {
+#if CAN_USE_GCM_ASM >= 2
+ case GCM_IMPL_AVX2:
+ return (gcm_avx2_will_work());
+#endif
+
+ case GCM_IMPL_AVX:
+ return (gcm_avx_will_work());
+
+ default:
+ return (B_TRUE);
}
}
-static inline size_t
-gcm_simd_get_htab_size(boolean_t simd_mode)
+static inline gcm_impl
+gcm_toggle_impl(void)
{
- switch (simd_mode) {
- case B_TRUE:
- return (2 * 6 * 2 * sizeof (uint64_t));
+ gcm_impl current_impl, new_impl;
+ do { /* handle races */
+ current_impl = atomic_load_32(&gcm_impl_used);
+ new_impl = current_impl;
+ while (B_TRUE) { /* handle incompatble implementations */
+ new_impl = (new_impl + 1) % GCM_IMPL_MAX;
+ if (gcm_impl_will_work(new_impl)) {
+ break;
+ }
+ }
- default:
- return (0);
- }
+ } while (atomic_cas_32(&gcm_impl_used, current_impl, new_impl) !=
+ current_impl);
+
+ return (new_impl);
}
@@ -1077,6 +1181,50 @@ gcm_incr_counter_block_by(gcm_ctx_t *ctx, int n)
ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
}
+static size_t aesni_gcm_encrypt_avx(const uint8_t *in, uint8_t *out,
+ size_t len, const void *key, uint64_t *iv, const uint64_t *Htable,
+ uint64_t *Xip)
+{
+ (void) Htable;
+ return (aesni_gcm_encrypt(in, out, len, key, iv, Xip));
+}
+
+#if CAN_USE_GCM_ASM >= 2
+// kSizeTWithoutLower4Bits is a mask that can be used to zero the lower four
+// bits of a |size_t|.
+// This is from boringssl/crypto/fipsmodule/aes/gcm.cc.inc
+static const size_t kSizeTWithoutLower4Bits = (size_t)-16;
+
+/* The following CRYPTO methods are from boringssl/crypto/internal.h */
+static inline uint32_t CRYPTO_bswap4(uint32_t x) {
+ return (__builtin_bswap32(x));
+}
+
+static inline uint32_t CRYPTO_load_u32_be(const void *in) {
+ uint32_t v;
+ memcpy(&v, in, sizeof (v));
+ return (CRYPTO_bswap4(v));
+}
+
+static inline void CRYPTO_store_u32_be(void *out, uint32_t v) {
+ v = CRYPTO_bswap4(v);
+ memcpy(out, &v, sizeof (v));
+}
+
+static size_t aesni_gcm_encrypt_avx2(const uint8_t *in, uint8_t *out,
+ size_t len, const void *key, uint64_t *iv, const uint64_t *Htable,
+ uint64_t *Xip)
+{
+ uint8_t *ivec = (uint8_t *)iv;
+ len &= kSizeTWithoutLower4Bits;
+ aes_gcm_enc_update_vaes_avx2(in, out, len, key, ivec,
+ (const uint128_t *)Htable, (uint8_t *)Xip);
+ CRYPTO_store_u32_be(&ivec[12],
+ CRYPTO_load_u32_be(&ivec[12]) + len / 16);
+ return (len);
+}
+#endif /* if CAN_USE_GCM_ASM >= 2 */
+
/*
* Encrypt multiple blocks of data in GCM mode.
* This is done in gcm_avx_chunk_size chunks, utilizing AVX assembler routines
@@ -1091,8 +1239,15 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
size_t done = 0;
uint8_t *datap = (uint8_t *)data;
size_t chunk_size = (size_t)GCM_CHUNK_SIZE_READ;
+ aesni_gcm_encrypt_impl *encrypt_blocks =
+#if CAN_USE_GCM_ASM >= 2
+ ctx->impl == GCM_IMPL_AVX2 ?
+ aesni_gcm_encrypt_avx2 :
+#endif
+ aesni_gcm_encrypt_avx;
const aes_key_t *key = ((aes_key_t *)ctx->gcm_keysched);
uint64_t *ghash = ctx->gcm_ghash;
+ uint64_t *htable = ctx->gcm_Htable;
uint64_t *cb = ctx->gcm_cb;
uint8_t *ct_buf = NULL;
uint8_t *tmp = (uint8_t *)ctx->gcm_tmp;
@@ -1156,8 +1311,8 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
/* Do the bulk encryption in chunk_size blocks. */
for (; bleft >= chunk_size; bleft -= chunk_size) {
kfpu_begin();
- done = aesni_gcm_encrypt(
- datap, ct_buf, chunk_size, key, cb, ghash);
+ done = encrypt_blocks(
+ datap, ct_buf, chunk_size, key, cb, htable, ghash);
clear_fpu_regs();
kfpu_end();
@@ -1180,7 +1335,8 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
/* Bulk encrypt the remaining data. */
kfpu_begin();
if (bleft >= GCM_AVX_MIN_ENCRYPT_BYTES) {
- done = aesni_gcm_encrypt(datap, ct_buf, bleft, key, cb, ghash);
+ done = encrypt_blocks(datap, ct_buf, bleft, key, cb, htable,
+ ghash);
if (done == 0) {
rv = CRYPTO_FAILED;
goto out;
@@ -1293,6 +1449,29 @@ gcm_encrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
return (CRYPTO_SUCCESS);
}
+static size_t aesni_gcm_decrypt_avx(const uint8_t *in, uint8_t *out,
+ size_t len, const void *key, uint64_t *iv, const uint64_t *Htable,
+ uint64_t *Xip)
+{
+ (void) Htable;
+ return (aesni_gcm_decrypt(in, out, len, key, iv, Xip));
+}
+
+#if CAN_USE_GCM_ASM >= 2
+static size_t aesni_gcm_decrypt_avx2(const uint8_t *in, uint8_t *out,
+ size_t len, const void *key, uint64_t *iv, const uint64_t *Htable,
+ uint64_t *Xip)
+{
+ uint8_t *ivec = (uint8_t *)iv;
+ len &= kSizeTWithoutLower4Bits;
+ aes_gcm_dec_update_vaes_avx2(in, out, len, key, ivec,
+ (const uint128_t *)Htable, (uint8_t *)Xip);
+ CRYPTO_store_u32_be(&ivec[12],
+ CRYPTO_load_u32_be(&ivec[12]) + len / 16);
+ return (len);
+}
+#endif /* if CAN_USE_GCM_ASM >= 2 */
+
/*
* Finalize decryption: We just have accumulated crypto text, so now we
* decrypt it here inplace.
@@ -1306,10 +1485,17 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
B_FALSE);
size_t chunk_size = (size_t)GCM_CHUNK_SIZE_READ;
+ aesni_gcm_decrypt_impl *decrypt_blocks =
+#if CAN_USE_GCM_ASM >= 2
+ ctx->impl == GCM_IMPL_AVX2 ?
+ aesni_gcm_decrypt_avx2 :
+#endif
+ aesni_gcm_decrypt_avx;
size_t pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
uint8_t *datap = ctx->gcm_pt_buf;
const aes_key_t *key = ((aes_key_t *)ctx->gcm_keysched);
uint32_t *cb = (uint32_t *)ctx->gcm_cb;
+ uint64_t *htable = ctx->gcm_Htable;
uint64_t *ghash = ctx->gcm_ghash;
uint32_t *tmp = (uint32_t *)ctx->gcm_tmp;
int rv = CRYPTO_SUCCESS;
@@ -1322,8 +1508,8 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
*/
for (bleft = pt_len; bleft >= chunk_size; bleft -= chunk_size) {
kfpu_begin();
- done = aesni_gcm_decrypt(datap, datap, chunk_size,
- (const void *)key, ctx->gcm_cb, ghash);
+ done = decrypt_blocks(datap, datap, chunk_size,
+ (const void *)key, ctx->gcm_cb, htable, ghash);
clear_fpu_regs();
kfpu_end();
if (done != chunk_size) {
@@ -1334,8 +1520,8 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
/* Decrypt remainder, which is less than chunk size, in one go. */
kfpu_begin();
if (bleft >= GCM_AVX_MIN_DECRYPT_BYTES) {
- done = aesni_gcm_decrypt(datap, datap, bleft,
- (const void *)key, ctx->gcm_cb, ghash);
+ done = decrypt_blocks(datap, datap, bleft,
+ (const void *)key, ctx->gcm_cb, htable, ghash);
if (done == 0) {
clear_fpu_regs();
kfpu_end();
@@ -1424,13 +1610,42 @@ gcm_init_avx(gcm_ctx_t *ctx, const uint8_t *iv, size_t iv_len,
ASSERT3S(((aes_key_t *)ctx->gcm_keysched)->ops->needs_byteswap, ==,
B_FALSE);
+ size_t htab_len = 0;
+#if CAN_USE_GCM_ASM >= 2
+ if (ctx->impl == GCM_IMPL_AVX2) {
+ /*
+ * BoringSSL's API specifies uint128_t[16] for htab; but only
+ * uint128_t[12] are used.
+ * See https://github.com/google/boringssl/blob/
+ * 813840dd094f9e9c1b00a7368aa25e656554221f1/crypto/fipsmodule/
+ * modes/asm/aes-gcm-avx2-x86_64.pl#L198-L200
+ */
+ htab_len = (2 * 8 * sizeof (uint128_t));
+ } else
+#endif /* CAN_USE_GCM_ASM >= 2 */
+ {
+ htab_len = (2 * 6 * sizeof (uint128_t));
+ }
+
+ ctx->gcm_Htable = kmem_alloc(htab_len, KM_SLEEP);
+ if (ctx->gcm_Htable == NULL) {
+ return (CRYPTO_HOST_MEMORY);
+ }
+
/* Init H (encrypt zero block) and create the initial counter block. */
memset(H, 0, sizeof (ctx->gcm_H));
kfpu_begin();
aes_encrypt_intel(keysched, aes_rounds,
(const uint32_t *)H, (uint32_t *)H);
- gcm_init_htab_avx(ctx->gcm_Htable, H);
+#if CAN_USE_GCM_ASM >= 2
+ if (ctx->impl == GCM_IMPL_AVX2) {
+ gcm_init_vpclmulqdq_avx2((uint128_t *)ctx->gcm_Htable, H);
+ } else
+#endif /* if CAN_USE_GCM_ASM >= 2 */
+ {
+ gcm_init_htab_avx(ctx->gcm_Htable, H);
+ }
if (iv_len == 12) {
memcpy(cb, iv, 12);
diff --git a/sys/contrib/openzfs/module/icp/algs/modes/modes.c b/sys/contrib/openzfs/module/icp/algs/modes/modes.c
index 343591cd9691..ef3c1806e4b6 100644
--- a/sys/contrib/openzfs/module/icp/algs/modes/modes.c
+++ b/sys/contrib/openzfs/module/icp/algs/modes/modes.c
@@ -171,7 +171,7 @@ gcm_clear_ctx(gcm_ctx_t *ctx)
explicit_memset(ctx->gcm_remainder, 0, sizeof (ctx->gcm_remainder));
explicit_memset(ctx->gcm_H, 0, sizeof (ctx->gcm_H));
#if defined(CAN_USE_GCM_ASM)
- if (ctx->gcm_use_avx == B_TRUE) {
+ if (ctx->impl != GCM_IMPL_GENERIC) {
ASSERT3P(ctx->gcm_Htable, !=, NULL);
explicit_memset(ctx->gcm_Htable, 0, ctx->gcm_htab_len);
kmem_free(ctx->gcm_Htable, ctx->gcm_htab_len);
diff --git a/sys/contrib/openzfs/module/icp/algs/sha2/sha256_impl.c b/sys/contrib/openzfs/module/icp/algs/sha2/sha256_impl.c
index 6d3bcca9f995..dcb0a391dda4 100644
--- a/sys/contrib/openzfs/module/icp/algs/sha2/sha256_impl.c
+++ b/sys/contrib/openzfs/module/icp/algs/sha2/sha256_impl.c
@@ -38,11 +38,14 @@
kfpu_begin(); E(s, d, b); kfpu_end(); \
}
+#if defined(__x86_64) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__PPC64__)
/* some implementation is always okay */
static inline boolean_t sha2_is_supported(void)
{
return (B_TRUE);
}
+#endif
#if defined(__x86_64)
diff --git a/sys/contrib/openzfs/module/icp/algs/sha2/sha512_impl.c b/sys/contrib/openzfs/module/icp/algs/sha2/sha512_impl.c
index 2efd9fcf4c99..a85a71a83df4 100644
--- a/sys/contrib/openzfs/module/icp/algs/sha2/sha512_impl.c
+++ b/sys/contrib/openzfs/module/icp/algs/sha2/sha512_impl.c
@@ -38,11 +38,14 @@
kfpu_begin(); E(s, d, b); kfpu_end(); \
}
+#if defined(__x86_64) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__aarch64__) || defined(__arm__) || defined(__PPC64__)
/* some implementation is always okay */
static inline boolean_t sha2_is_supported(void)
{
return (B_TRUE);
}
+#endif
#if defined(__x86_64)
diff --git a/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl
new file mode 100644
index 000000000000..04c03a37e0cb
--- /dev/null
+++ b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl
@@ -0,0 +1,253 @@
+BoringSSL is a fork of OpenSSL. As such, large parts of it fall under OpenSSL
+licensing. Files that are completely new have a Google copyright and an ISC
+license. This license is reproduced at the bottom of this file.
+
+Contributors to BoringSSL are required to follow the CLA rules for Chromium:
+https://cla.developers.google.com/clas
+
+Files in third_party/ have their own licenses, as described therein. The MIT
+license, for third_party/fiat, which, unlike other third_party directories, is
+compiled into non-test libraries, is included below.
+
+The OpenSSL toolkit stays under a dual license, i.e. both the conditions of the
+OpenSSL License and the original SSLeay license apply to the toolkit. See below
+for the actual license texts. Actually both licenses are BSD-style Open Source
+licenses. In case of any license issues related to OpenSSL please contact
+openssl-core@openssl.org.
+
+The following are Google-internal bug numbers where explicit permission from
+some authors is recorded for use of their work. (This is purely for our own
+record keeping.)
+ 27287199
+ 27287880
+ 27287883
+ 263291445
+
+
+ OpenSSL License
+ ---------------
+
+/* ====================================================================
+ * Copyright (c) 1998-2011 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+ Original SSLeay License
+ -----------------------
+
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+
+ISC license used for completely new code in BoringSSL:
+
+/* Copyright 2015 The BoringSSL Authors
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+
+The code in third_party/fiat carries the MIT license:
+
+Copyright (c) 2015-2016 the fiat-crypto authors (see
+https://github.com/mit-plv/fiat-crypto/blob/master/AUTHORS).
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Licenses for support code
+-------------------------
+
+Parts of the TLS test suite are under the Go license. This code is not included
+in BoringSSL (i.e. libcrypto and libssl) when compiled, however, so
+distributing code linked against BoringSSL does not trigger this license:
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+BoringSSL uses the Chromium test infrastructure to run a continuous build,
+trybots etc. The scripts which manage this, and the script for generating build
+metadata, are under the Chromium license. Distributing code linked against
+BoringSSL does not trigger this license.
+
+Copyright 2015 The Chromium Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl.descrip b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl.descrip
new file mode 100644
index 000000000000..f63a67a4d2ae
--- /dev/null
+++ b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl.descrip
@@ -0,0 +1 @@
+PORTIONS OF AES GCM and GHASH FUNCTIONALITY
diff --git a/sys/contrib/openzfs/module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S
new file mode 100644
index 000000000000..3d1b045127e2
--- /dev/null
+++ b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S
@@ -0,0 +1,1323 @@
+// SPDX-License-Identifier: Apache-2.0
+// This file is generated from a similarly-named Perl script in the BoringSSL
+// source tree. Do not edit by hand.
+
+#if defined(__x86_64__) && defined(HAVE_AVX) && \
+ defined(HAVE_VAES) && defined(HAVE_VPCLMULQDQ)
+
+#define _ASM
+#include <sys/asm_linkage.h>
+
+/* Windows userland links with OpenSSL */
+#if !defined (_WIN32) || defined (_KERNEL)
+
+.section .rodata
+.balign 16
+
+
+.Lbswap_mask:
+.quad 0x08090a0b0c0d0e0f, 0x0001020304050607
+
+
+
+
+
+
+
+
+.Lgfpoly:
+.quad 1, 0xc200000000000000
+
+
+.Lgfpoly_and_internal_carrybit:
+.quad 1, 0xc200000000000001
+
+.balign 32
+
+.Lctr_pattern:
+.quad 0, 0
+.quad 1, 0
+.Linc_2blocks:
+.quad 2, 0
+.quad 2, 0
+
+ENTRY_ALIGN(gcm_init_vpclmulqdq_avx2, 32)
+.cfi_startproc
+
+ENDBR
+
+
+
+
+
+ vmovdqu (%rsi),%xmm3
+ // KCF/ICP stores H in network byte order with the hi qword first
+ // so we need to swap all bytes, not the 2 qwords.
+ vmovdqu .Lbswap_mask(%rip),%xmm4
+ vpshufb %xmm4,%xmm3,%xmm3
+
+
+
+
+
+ vpshufd $0xd3,%xmm3,%xmm0
+ vpsrad $31,%xmm0,%xmm0
+ vpaddq %xmm3,%xmm3,%xmm3
+ vpand .Lgfpoly_and_internal_carrybit(%rip),%xmm0,%xmm0
+ vpxor %xmm0,%xmm3,%xmm3
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm6
+
+
+ vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0
+ vpclmulqdq $0x11,%xmm3,%xmm3,%xmm5
+ vpclmulqdq $0x01,%xmm0,%xmm6,%xmm1
+ vpshufd $0x4e,%xmm0,%xmm0
+ vpxor %xmm0,%xmm1,%xmm1
+ vpclmulqdq $0x01,%xmm1,%xmm6,%xmm0
+ vpshufd $0x4e,%xmm1,%xmm1
+ vpxor %xmm1,%xmm5,%xmm5
+ vpxor %xmm0,%xmm5,%xmm5
+
+
+
+ vinserti128 $1,%xmm3,%ymm5,%ymm3
+ vinserti128 $1,%xmm5,%ymm5,%ymm5
+
+
+ vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0
+ vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1
+ vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
+ vpshufd $0x4e,%ymm0,%ymm0
+ vpxor %ymm0,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4
+ vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm4,%ymm4
+ vpxor %ymm0,%ymm4,%ymm4
+
+
+
+ vmovdqu %ymm3,96(%rdi)
+ vmovdqu %ymm4,64(%rdi)
+
+
+
+ vpunpcklqdq %ymm3,%ymm4,%ymm0
+ vpunpckhqdq %ymm3,%ymm4,%ymm1
+ vpxor %ymm1,%ymm0,%ymm0
+ vmovdqu %ymm0,128+32(%rdi)
+
+
+ vpclmulqdq $0x00,%ymm5,%ymm4,%ymm0
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm1
+ vpclmulqdq $0x10,%ymm5,%ymm4,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
+ vpshufd $0x4e,%ymm0,%ymm0
+ vpxor %ymm0,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x11,%ymm5,%ymm4,%ymm3
+ vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm3,%ymm3
+ vpxor %ymm0,%ymm3,%ymm3
+
+ vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0
+ vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1
+ vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
+ vpshufd $0x4e,%ymm0,%ymm0
+ vpxor %ymm0,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4
+ vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm4,%ymm4
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vmovdqu %ymm3,32(%rdi)
+ vmovdqu %ymm4,0(%rdi)
+
+
+
+ vpunpcklqdq %ymm3,%ymm4,%ymm0
+ vpunpckhqdq %ymm3,%ymm4,%ymm1
+ vpxor %ymm1,%ymm0,%ymm0
+ vmovdqu %ymm0,128(%rdi)
+
+ vzeroupper
+ RET
+
+.cfi_endproc
+SET_SIZE(gcm_init_vpclmulqdq_avx2)
+ENTRY_ALIGN(gcm_gmult_vpclmulqdq_avx2, 32)
+.cfi_startproc
+
+ENDBR
+
+
+
+ vmovdqu (%rdi),%xmm0
+ vmovdqu .Lbswap_mask(%rip),%xmm1
+ vmovdqu 128-16(%rsi),%xmm2
+ vmovdqu .Lgfpoly(%rip),%xmm3
+ vpshufb %xmm1,%xmm0,%xmm0
+
+ vpclmulqdq $0x00,%xmm2,%xmm0,%xmm4
+ vpclmulqdq $0x01,%xmm2,%xmm0,%xmm5
+ vpclmulqdq $0x10,%xmm2,%xmm0,%xmm6
+ vpxor %xmm6,%xmm5,%xmm5
+ vpclmulqdq $0x01,%xmm4,%xmm3,%xmm6
+ vpshufd $0x4e,%xmm4,%xmm4
+ vpxor %xmm4,%xmm5,%xmm5
+ vpxor %xmm6,%xmm5,%xmm5
+ vpclmulqdq $0x11,%xmm2,%xmm0,%xmm0
+ vpclmulqdq $0x01,%xmm5,%xmm3,%xmm4
+ vpshufd $0x4e,%xmm5,%xmm5
+ vpxor %xmm5,%xmm0,%xmm0
+ vpxor %xmm4,%xmm0,%xmm0
+
+
+ vpshufb %xmm1,%xmm0,%xmm0
+ vmovdqu %xmm0,(%rdi)
+
+
+ RET
+
+.cfi_endproc
+SET_SIZE(gcm_gmult_vpclmulqdq_avx2)
+ENTRY_ALIGN(gcm_ghash_vpclmulqdq_avx2, 32)
+.cfi_startproc
+
+ENDBR
+
+
+
+
+
+
+ vmovdqu .Lbswap_mask(%rip),%xmm6
+ vmovdqu .Lgfpoly(%rip),%xmm7
+
+
+ vmovdqu (%rdi),%xmm5
+ vpshufb %xmm6,%xmm5,%xmm5
+
+
+ cmpq $32,%rcx
+ jb .Lghash_lastblock
+
+
+
+ vinserti128 $1,%xmm6,%ymm6,%ymm6
+ vinserti128 $1,%xmm7,%ymm7,%ymm7
+
+ cmpq $127,%rcx
+ jbe .Lghash_loop_1x
+
+
+ vmovdqu 128(%rsi),%ymm8
+ vmovdqu 128+32(%rsi),%ymm9
+.Lghash_loop_4x:
+
+ vmovdqu 0(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 0(%rsi),%ymm2
+ vpxor %ymm5,%ymm1,%ymm1
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x00,%ymm8,%ymm0,%ymm4
+
+ vmovdqu 32(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 32(%rsi),%ymm2
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x10,%ymm8,%ymm0,%ymm0
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vmovdqu 64(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 64(%rsi),%ymm2
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x00,%ymm9,%ymm0,%ymm0
+ vpxor %ymm0,%ymm4,%ymm4
+
+
+ vmovdqu 96(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 96(%rsi),%ymm2
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x10,%ymm9,%ymm0,%ymm0
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vpxor %ymm3,%ymm4,%ymm4
+ vpxor %ymm5,%ymm4,%ymm4
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm2
+ vpclmulqdq $0x01,%ymm3,%ymm2,%ymm0
+ vpshufd $0x4e,%ymm3,%ymm3
+ vpxor %ymm3,%ymm4,%ymm4
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vpclmulqdq $0x01,%ymm4,%ymm2,%ymm0
+ vpshufd $0x4e,%ymm4,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpxor %ymm0,%ymm5,%ymm5
+ vextracti128 $1,%ymm5,%xmm0
+ vpxor %xmm0,%xmm5,%xmm5
+
+ subq $-128,%rdx
+ addq $-128,%rcx
+ cmpq $127,%rcx
+ ja .Lghash_loop_4x
+
+
+ cmpq $32,%rcx
+ jb .Lghash_loop_1x_done
+.Lghash_loop_1x:
+ vmovdqu (%rdx),%ymm0
+ vpshufb %ymm6,%ymm0,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vmovdqu 128-32(%rsi),%ymm0
+ vpclmulqdq $0x00,%ymm0,%ymm5,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm5,%ymm2
+ vpclmulqdq $0x10,%ymm0,%ymm5,%ymm3
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x01,%ymm1,%ymm7,%ymm3
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm2,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x11,%ymm0,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm7,%ymm1
+ vpshufd $0x4e,%ymm2,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpxor %ymm1,%ymm5,%ymm5
+
+ vextracti128 $1,%ymm5,%xmm0
+ vpxor %xmm0,%xmm5,%xmm5
+ addq $32,%rdx
+ subq $32,%rcx
+ cmpq $32,%rcx
+ jae .Lghash_loop_1x
+.Lghash_loop_1x_done:
+
+
+.Lghash_lastblock:
+ testq %rcx,%rcx
+ jz .Lghash_done
+ vmovdqu (%rdx),%xmm0
+ vpshufb %xmm6,%xmm0,%xmm0
+ vpxor %xmm0,%xmm5,%xmm5
+ vmovdqu 128-16(%rsi),%xmm0
+ vpclmulqdq $0x00,%xmm0,%xmm5,%xmm1
+ vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2
+ vpclmulqdq $0x10,%xmm0,%xmm5,%xmm3
+ vpxor %xmm3,%xmm2,%xmm2
+ vpclmulqdq $0x01,%xmm1,%xmm7,%xmm3
+ vpshufd $0x4e,%xmm1,%xmm1
+ vpxor %xmm1,%xmm2,%xmm2
+ vpxor %xmm3,%xmm2,%xmm2
+ vpclmulqdq $0x11,%xmm0,%xmm5,%xmm5
+ vpclmulqdq $0x01,%xmm2,%xmm7,%xmm1
+ vpshufd $0x4e,%xmm2,%xmm2
+ vpxor %xmm2,%xmm5,%xmm5
+ vpxor %xmm1,%xmm5,%xmm5
+
+
+.Lghash_done:
+
+ vpshufb %xmm6,%xmm5,%xmm5
+ vmovdqu %xmm5,(%rdi)
+
+ vzeroupper
+ RET
+
+.cfi_endproc
+SET_SIZE(gcm_ghash_vpclmulqdq_avx2)
+ENTRY_ALIGN(aes_gcm_enc_update_vaes_avx2, 32)
+.cfi_startproc
+
+ENDBR
+ pushq %r12
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r12,-16
+
+ movq 16(%rsp),%r12
+#ifdef BORINGSSL_DISPATCH_TEST
+.extern BORINGSSL_function_hit
+.hidden BORINGSSL_function_hit
+ movb $1,BORINGSSL_function_hit+6(%rip)
+#endif
+ vbroadcasti128 .Lbswap_mask(%rip),%ymm0
+
+
+
+ vmovdqu (%r12),%xmm1
+ vpshufb %xmm0,%xmm1,%xmm1
+ vbroadcasti128 (%r8),%ymm11
+ vpshufb %ymm0,%ymm11,%ymm11
+
+
+
+ movl 504(%rcx),%r10d // ICP has a larger offset for rounds.
+ leal -24(,%r10,4),%r10d // ICP uses 10,12,14 not 9,11,13 for rounds.
+
+
+
+
+ leaq 96(%rcx,%r10,4),%r11
+ vbroadcasti128 (%rcx),%ymm9
+ vbroadcasti128 (%r11),%ymm10
+
+
+ vpaddd .Lctr_pattern(%rip),%ymm11,%ymm11
+
+
+
+ cmpq $127,%rdx
+ jbe .Lcrypt_loop_4x_done__func1
+
+ vmovdqu 128(%r9),%ymm7
+ vmovdqu 128+32(%r9),%ymm8
+
+
+
+ vmovdqu .Linc_2blocks(%rip),%ymm2
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm14
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm15
+ vpaddd %ymm2,%ymm11,%ymm11
+
+
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ vpxor %ymm9,%ymm14,%ymm14
+ vpxor %ymm9,%ymm15,%ymm15
+
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_first_4_vecs__func1:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_first_4_vecs__func1
+ vpxor 0(%rdi),%ymm10,%ymm2
+ vpxor 32(%rdi),%ymm10,%ymm3
+ vpxor 64(%rdi),%ymm10,%ymm5
+ vpxor 96(%rdi),%ymm10,%ymm6
+ vaesenclast %ymm2,%ymm12,%ymm12
+ vaesenclast %ymm3,%ymm13,%ymm13
+ vaesenclast %ymm5,%ymm14,%ymm14
+ vaesenclast %ymm6,%ymm15,%ymm15
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+ vmovdqu %ymm14,64(%rsi)
+ vmovdqu %ymm15,96(%rsi)
+
+ subq $-128,%rdi
+ addq $-128,%rdx
+ cmpq $127,%rdx
+ jbe .Lghash_last_ciphertext_4x__func1
+.balign 16
+.Lcrypt_loop_4x__func1:
+
+
+
+
+ vmovdqu .Linc_2blocks(%rip),%ymm2
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm14
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm15
+ vpaddd %ymm2,%ymm11,%ymm11
+
+
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ vpxor %ymm9,%ymm14,%ymm14
+ vpxor %ymm9,%ymm15,%ymm15
+
+ cmpl $24,%r10d
+ jl .Laes128__func1
+ je .Laes192__func1
+
+ vbroadcasti128 -208(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -192(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes192__func1:
+ vbroadcasti128 -176(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -160(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes128__func1:
+ prefetcht0 512(%rdi)
+ prefetcht0 512+64(%rdi)
+
+ vmovdqu 0(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 0(%r9),%ymm4
+ vpxor %ymm1,%ymm3,%ymm3
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
+
+ vbroadcasti128 -144(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vbroadcasti128 -128(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 32(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 32(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -112(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 64(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 64(%r9),%ymm4
+
+ vbroadcasti128 -96(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -80(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+
+ vmovdqu 96(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+
+ vbroadcasti128 -64(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vmovdqu 96(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -48(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm1,%ymm6,%ymm6
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm4
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -32(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -16(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vextracti128 $1,%ymm1,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+
+
+ subq $-128,%rsi
+ vpxor 0(%rdi),%ymm10,%ymm2
+ vpxor 32(%rdi),%ymm10,%ymm3
+ vpxor 64(%rdi),%ymm10,%ymm5
+ vpxor 96(%rdi),%ymm10,%ymm6
+ vaesenclast %ymm2,%ymm12,%ymm12
+ vaesenclast %ymm3,%ymm13,%ymm13
+ vaesenclast %ymm5,%ymm14,%ymm14
+ vaesenclast %ymm6,%ymm15,%ymm15
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+ vmovdqu %ymm14,64(%rsi)
+ vmovdqu %ymm15,96(%rsi)
+
+ subq $-128,%rdi
+
+ addq $-128,%rdx
+ cmpq $127,%rdx
+ ja .Lcrypt_loop_4x__func1
+.Lghash_last_ciphertext_4x__func1:
+
+ vmovdqu 0(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 0(%r9),%ymm4
+ vpxor %ymm1,%ymm3,%ymm3
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
+
+ vmovdqu 32(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 32(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vmovdqu 64(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 64(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+
+ vmovdqu 96(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 96(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm1,%ymm6,%ymm6
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm4
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vextracti128 $1,%ymm1,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+
+ subq $-128,%rsi
+.Lcrypt_loop_4x_done__func1:
+
+ testq %rdx,%rdx
+ jz .Ldone__func1
+
+
+
+
+
+ leaq 128(%r9),%r8
+ subq %rdx,%r8
+
+
+ vpxor %xmm5,%xmm5,%xmm5
+ vpxor %xmm6,%xmm6,%xmm6
+ vpxor %xmm7,%xmm7,%xmm7
+
+ cmpq $64,%rdx
+ jb .Llessthan64bytes__func1
+
+
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_1__func1:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_1__func1
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%ymm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %ymm3,%ymm13,%ymm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+
+
+ vpshufb %ymm0,%ymm12,%ymm12
+ vpshufb %ymm0,%ymm13,%ymm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%ymm3
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7
+ vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+ addq $64,%r8
+ addq $64,%rdi
+ addq $64,%rsi
+ subq $64,%rdx
+ jz .Lreduce__func1
+
+ vpxor %xmm1,%xmm1,%xmm1
+
+
+.Llessthan64bytes__func1:
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_2__func1:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_2__func1
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+
+
+ cmpq $32,%rdx
+ jb .Lxor_one_block__func1
+ je .Lxor_two_blocks__func1
+
+.Lxor_three_blocks__func1:
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%xmm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %xmm3,%xmm13,%xmm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %xmm13,32(%rsi)
+
+ vpshufb %ymm0,%ymm12,%ymm12
+ vpshufb %xmm0,%xmm13,%xmm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%xmm3
+ vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm7,%ymm7
+ jmp .Lghash_mul_one_vec_unreduced__func1
+
+.Lxor_two_blocks__func1:
+ vmovdqu (%rdi),%ymm2
+ vpxor %ymm2,%ymm12,%ymm12
+ vmovdqu %ymm12,(%rsi)
+ vpshufb %ymm0,%ymm12,%ymm12
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ jmp .Lghash_mul_one_vec_unreduced__func1
+
+.Lxor_one_block__func1:
+ vmovdqu (%rdi),%xmm2
+ vpxor %xmm2,%xmm12,%xmm12
+ vmovdqu %xmm12,(%rsi)
+ vpshufb %xmm0,%xmm12,%xmm12
+ vpxor %xmm1,%xmm12,%xmm12
+ vmovdqu (%r8),%xmm2
+
+.Lghash_mul_one_vec_unreduced__func1:
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+.Lreduce__func1:
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm2
+ vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm3,%ymm6,%ymm6
+ vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm7,%ymm7
+ vpxor %ymm3,%ymm7,%ymm7
+ vextracti128 $1,%ymm7,%xmm1
+ vpxor %xmm7,%xmm1,%xmm1
+
+.Ldone__func1:
+
+ vpshufb %xmm0,%xmm1,%xmm1
+ vmovdqu %xmm1,(%r12)
+
+ vzeroupper
+ popq %r12
+.cfi_adjust_cfa_offset -8
+.cfi_restore %r12
+ RET
+
+.cfi_endproc
+SET_SIZE(aes_gcm_enc_update_vaes_avx2)
+ENTRY_ALIGN(aes_gcm_dec_update_vaes_avx2, 32)
+.cfi_startproc
+
+ENDBR
+ pushq %r12
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r12,-16
+
+ movq 16(%rsp),%r12
+ vbroadcasti128 .Lbswap_mask(%rip),%ymm0
+
+
+
+ vmovdqu (%r12),%xmm1
+ vpshufb %xmm0,%xmm1,%xmm1
+ vbroadcasti128 (%r8),%ymm11
+ vpshufb %ymm0,%ymm11,%ymm11
+
+
+
+ movl 504(%rcx),%r10d // ICP has a larger offset for rounds.
+ leal -24(,%r10,4),%r10d // ICP uses 10,12,14 not 9,11,13 for rounds.
+
+
+
+
+ leaq 96(%rcx,%r10,4),%r11
+ vbroadcasti128 (%rcx),%ymm9
+ vbroadcasti128 (%r11),%ymm10
+
+
+ vpaddd .Lctr_pattern(%rip),%ymm11,%ymm11
+
+
+
+ cmpq $127,%rdx
+ jbe .Lcrypt_loop_4x_done__func2
+
+ vmovdqu 128(%r9),%ymm7
+ vmovdqu 128+32(%r9),%ymm8
+.balign 16
+.Lcrypt_loop_4x__func2:
+
+
+
+
+ vmovdqu .Linc_2blocks(%rip),%ymm2
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm14
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm15
+ vpaddd %ymm2,%ymm11,%ymm11
+
+
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ vpxor %ymm9,%ymm14,%ymm14
+ vpxor %ymm9,%ymm15,%ymm15
+
+ cmpl $24,%r10d
+ jl .Laes128__func2
+ je .Laes192__func2
+
+ vbroadcasti128 -208(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -192(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes192__func2:
+ vbroadcasti128 -176(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -160(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes128__func2:
+ prefetcht0 512(%rdi)
+ prefetcht0 512+64(%rdi)
+
+ vmovdqu 0(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 0(%r9),%ymm4
+ vpxor %ymm1,%ymm3,%ymm3
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
+
+ vbroadcasti128 -144(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vbroadcasti128 -128(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 32(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 32(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -112(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 64(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 64(%r9),%ymm4
+
+ vbroadcasti128 -96(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -80(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+
+ vmovdqu 96(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+
+ vbroadcasti128 -64(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vmovdqu 96(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -48(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm1,%ymm6,%ymm6
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm4
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -32(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -16(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vextracti128 $1,%ymm1,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+
+
+
+ vpxor 0(%rdi),%ymm10,%ymm2
+ vpxor 32(%rdi),%ymm10,%ymm3
+ vpxor 64(%rdi),%ymm10,%ymm5
+ vpxor 96(%rdi),%ymm10,%ymm6
+ vaesenclast %ymm2,%ymm12,%ymm12
+ vaesenclast %ymm3,%ymm13,%ymm13
+ vaesenclast %ymm5,%ymm14,%ymm14
+ vaesenclast %ymm6,%ymm15,%ymm15
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+ vmovdqu %ymm14,64(%rsi)
+ vmovdqu %ymm15,96(%rsi)
+
+ subq $-128,%rdi
+ subq $-128,%rsi
+ addq $-128,%rdx
+ cmpq $127,%rdx
+ ja .Lcrypt_loop_4x__func2
+.Lcrypt_loop_4x_done__func2:
+
+ testq %rdx,%rdx
+ jz .Ldone__func2
+
+
+
+
+
+ leaq 128(%r9),%r8
+ subq %rdx,%r8
+
+
+ vpxor %xmm5,%xmm5,%xmm5
+ vpxor %xmm6,%xmm6,%xmm6
+ vpxor %xmm7,%xmm7,%xmm7
+
+ cmpq $64,%rdx
+ jb .Llessthan64bytes__func2
+
+
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_1__func2:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_1__func2
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%ymm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %ymm3,%ymm13,%ymm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+
+
+ vpshufb %ymm0,%ymm2,%ymm12
+ vpshufb %ymm0,%ymm3,%ymm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%ymm3
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7
+ vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+ addq $64,%r8
+ addq $64,%rdi
+ addq $64,%rsi
+ subq $64,%rdx
+ jz .Lreduce__func2
+
+ vpxor %xmm1,%xmm1,%xmm1
+
+
+.Llessthan64bytes__func2:
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_2__func2:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_2__func2
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+
+
+ cmpq $32,%rdx
+ jb .Lxor_one_block__func2
+ je .Lxor_two_blocks__func2
+
+.Lxor_three_blocks__func2:
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%xmm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %xmm3,%xmm13,%xmm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %xmm13,32(%rsi)
+
+ vpshufb %ymm0,%ymm2,%ymm12
+ vpshufb %xmm0,%xmm3,%xmm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%xmm3
+ vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm7,%ymm7
+ jmp .Lghash_mul_one_vec_unreduced__func2
+
+.Lxor_two_blocks__func2:
+ vmovdqu (%rdi),%ymm2
+ vpxor %ymm2,%ymm12,%ymm12
+ vmovdqu %ymm12,(%rsi)
+ vpshufb %ymm0,%ymm2,%ymm12
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ jmp .Lghash_mul_one_vec_unreduced__func2
+
+.Lxor_one_block__func2:
+ vmovdqu (%rdi),%xmm2
+ vpxor %xmm2,%xmm12,%xmm12
+ vmovdqu %xmm12,(%rsi)
+ vpshufb %xmm0,%xmm2,%xmm12
+ vpxor %xmm1,%xmm12,%xmm12
+ vmovdqu (%r8),%xmm2
+
+.Lghash_mul_one_vec_unreduced__func2:
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+.Lreduce__func2:
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm2
+ vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm3,%ymm6,%ymm6
+ vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm7,%ymm7
+ vpxor %ymm3,%ymm7,%ymm7
+ vextracti128 $1,%ymm7,%xmm1
+ vpxor %xmm7,%xmm1,%xmm1
+
+.Ldone__func2:
+
+ vpshufb %xmm0,%xmm1,%xmm1
+ vmovdqu %xmm1,(%r12)
+
+ vzeroupper
+ popq %r12
+.cfi_adjust_cfa_offset -8
+.cfi_restore %r12
+ RET
+
+.cfi_endproc
+SET_SIZE(aes_gcm_dec_update_vaes_avx2)
+
+#endif /* !_WIN32 || _KERNEL */
+
+/* Mark the stack non-executable. */
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
+#endif /* defined(__x86_64__) && defined(HAVE_AVX) && defined(HAVE_AES) ... */
diff --git a/sys/contrib/openzfs/module/icp/include/modes/modes.h b/sys/contrib/openzfs/module/icp/include/modes/modes.h
index ca734cf4f045..de11d9eafafb 100644
--- a/sys/contrib/openzfs/module/icp/include/modes/modes.h
+++ b/sys/contrib/openzfs/module/icp/include/modes/modes.h
@@ -42,7 +42,7 @@ extern "C" {
*/
#if defined(__x86_64__) && defined(HAVE_AVX) && \
defined(HAVE_AES) && defined(HAVE_PCLMULQDQ)
-#define CAN_USE_GCM_ASM
+#define CAN_USE_GCM_ASM (HAVE_VAES && HAVE_VPCLMULQDQ ? 2 : 1)
extern boolean_t gcm_avx_can_use_movbe;
#endif
@@ -129,6 +129,15 @@ typedef struct ccm_ctx {
#define ccm_copy_to ccm_common.cc_copy_to
#define ccm_flags ccm_common.cc_flags
+#ifdef CAN_USE_GCM_ASM
+typedef enum gcm_impl {
+ GCM_IMPL_GENERIC = 0,
+ GCM_IMPL_AVX,
+ GCM_IMPL_AVX2,
+ GCM_IMPL_MAX,
+} gcm_impl;
+#endif
+
/*
* gcm_tag_len: Length of authentication tag.
*
@@ -174,7 +183,7 @@ typedef struct gcm_ctx {
uint64_t gcm_len_a_len_c[2];
uint8_t *gcm_pt_buf;
#ifdef CAN_USE_GCM_ASM
- boolean_t gcm_use_avx;
+ enum gcm_impl impl;
#endif
} gcm_ctx_t;
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c b/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c
index c114db14a916..b218c0da8125 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c
@@ -112,7 +112,6 @@ static int zfs__fini(void);
static void zfs_shutdown(void *, int);
static eventhandler_tag zfs_shutdown_event_tag;
-static eventhandler_tag zfs_mountroot_event_tag;
#define ZFS_MIN_KSTACK_PAGES 4
@@ -311,9 +310,6 @@ zfs_modevent(module_t mod, int type, void *unused __unused)
zfs_shutdown_event_tag = EVENTHANDLER_REGISTER(
shutdown_post_sync, zfs_shutdown, NULL,
SHUTDOWN_PRI_FIRST);
- zfs_mountroot_event_tag = EVENTHANDLER_REGISTER(
- mountroot, spa_boot_init, NULL,
- SI_ORDER_ANY);
}
return (err);
case MOD_UNLOAD:
@@ -322,9 +318,6 @@ zfs_modevent(module_t mod, int type, void *unused __unused)
if (zfs_shutdown_event_tag != NULL)
EVENTHANDLER_DEREGISTER(shutdown_post_sync,
zfs_shutdown_event_tag);
- if (zfs_mountroot_event_tag != NULL)
- EVENTHANDLER_DEREGISTER(mountroot,
- zfs_mountroot_event_tag);
}
return (err);
case MOD_SHUTDOWN:
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
index ace2360c032d..393bfaa65ff5 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c
@@ -188,11 +188,6 @@ param_set_arc_max(SYSCTL_HANDLER_ARGS)
return (0);
}
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max,
- CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
- NULL, 0, param_set_arc_max, "LU",
- "Maximum ARC size in bytes (LEGACY)");
-
int
param_set_arc_min(SYSCTL_HANDLER_ARGS)
{
@@ -217,11 +212,6 @@ param_set_arc_min(SYSCTL_HANDLER_ARGS)
return (0);
}
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min,
- CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
- NULL, 0, param_set_arc_min, "LU",
- "Minimum ARC size in bytes (LEGACY)");
-
extern uint_t zfs_arc_free_target;
int
@@ -245,16 +235,6 @@ param_set_arc_free_target(SYSCTL_HANDLER_ARGS)
return (0);
}
-/*
- * NOTE: This sysctl is CTLFLAG_RW not CTLFLAG_RWTUN due to its dependency on
- * pagedaemon initialization.
- */
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
- CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
- NULL, 0, param_set_arc_free_target, "IU",
- "Desired number of free pages below which ARC triggers reclaim"
- " (LEGACY)");
-
int
param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
{
@@ -273,187 +253,6 @@ param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
return (0);
}
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift,
- CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
- NULL, 0, param_set_arc_no_grow_shift, "I",
- "log2(fraction of ARC which must be free to allow growing) (LEGACY)");
-
-extern uint64_t l2arc_write_max;
-
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max,
- CTLFLAG_RWTUN, &l2arc_write_max, 0,
- "Max write bytes per interval (LEGACY)");
-
-extern uint64_t l2arc_write_boost;
-
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost,
- CTLFLAG_RWTUN, &l2arc_write_boost, 0,
- "Extra write bytes during device warmup (LEGACY)");
-
-extern uint64_t l2arc_headroom;
-
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom,
- CTLFLAG_RWTUN, &l2arc_headroom, 0,
- "Number of max device writes to precache (LEGACY)");
-
-extern uint64_t l2arc_headroom_boost;
-
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom_boost,
- CTLFLAG_RWTUN, &l2arc_headroom_boost, 0,
- "Compressed l2arc_headroom multiplier (LEGACY)");
-
-extern uint64_t l2arc_feed_secs;
-
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs,
- CTLFLAG_RWTUN, &l2arc_feed_secs, 0,
- "Seconds between L2ARC writing (LEGACY)");
-
-extern uint64_t l2arc_feed_min_ms;
-
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms,
- CTLFLAG_RWTUN, &l2arc_feed_min_ms, 0,
- "Min feed interval in milliseconds (LEGACY)");
-
-extern int l2arc_noprefetch;
-
-SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch,
- CTLFLAG_RWTUN, &l2arc_noprefetch, 0,
- "Skip caching prefetched buffers (LEGACY)");
-
-extern int l2arc_feed_again;
-
-SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again,
- CTLFLAG_RWTUN, &l2arc_feed_again, 0,
- "Turbo L2ARC warmup (LEGACY)");
-
-extern int l2arc_norw;
-
-SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw,
- CTLFLAG_RWTUN, &l2arc_norw, 0,
- "No reads during writes (LEGACY)");
-
-static int
-param_get_arc_state_size(SYSCTL_HANDLER_ARGS)
-{
- arc_state_t *state = (arc_state_t *)arg1;
- int64_t val;
-
- val = zfs_refcount_count(&state->arcs_size[ARC_BUFC_DATA]) +
- zfs_refcount_count(&state->arcs_size[ARC_BUFC_METADATA]);
- return (sysctl_handle_64(oidp, &val, 0, req));
-}
-
-extern arc_state_t ARC_anon;
-
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, anon_size,
- CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
- &ARC_anon, 0, param_get_arc_state_size, "Q",
- "size of anonymous state");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_esize, CTLFLAG_RD,
- &ARC_anon.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
- "size of evictable metadata in anonymous state");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_esize, CTLFLAG_RD,
- &ARC_anon.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
- "size of evictable data in anonymous state");
-
-extern arc_state_t ARC_mru;
-
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, mru_size,
- CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
- &ARC_mru, 0, param_get_arc_state_size, "Q",
- "size of mru state");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_esize, CTLFLAG_RD,
- &ARC_mru.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
- "size of evictable metadata in mru state");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_esize, CTLFLAG_RD,
- &ARC_mru.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
- "size of evictable data in mru state");
-
-extern arc_state_t ARC_mru_ghost;
-
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, mru_ghost_size,
- CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
- &ARC_mru_ghost, 0, param_get_arc_state_size, "Q",
- "size of mru ghost state");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_esize, CTLFLAG_RD,
- &ARC_mru_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
- "size of evictable metadata in mru ghost state");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_esize, CTLFLAG_RD,
- &ARC_mru_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
- "size of evictable data in mru ghost state");
-
-extern arc_state_t ARC_mfu;
-
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, mfu_size,
- CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
- &ARC_mfu, 0, param_get_arc_state_size, "Q",
- "size of mfu state");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_esize, CTLFLAG_RD,
- &ARC_mfu.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
- "size of evictable metadata in mfu state");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_esize, CTLFLAG_RD,
- &ARC_mfu.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
- "size of evictable data in mfu state");
-
-extern arc_state_t ARC_mfu_ghost;
-
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, mfu_ghost_size,
- CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
- &ARC_mfu_ghost, 0, param_get_arc_state_size, "Q",
- "size of mfu ghost state");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_esize, CTLFLAG_RD,
- &ARC_mfu_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
- "size of evictable metadata in mfu ghost state");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_esize, CTLFLAG_RD,
- &ARC_mfu_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
- "size of evictable data in mfu ghost state");
-
-extern arc_state_t ARC_uncached;
-
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, uncached_size,
- CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
- &ARC_uncached, 0, param_get_arc_state_size, "Q",
- "size of uncached state");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, uncached_metadata_esize, CTLFLAG_RD,
- &ARC_uncached.arcs_esize[ARC_BUFC_METADATA].rc_count, 0,
- "size of evictable metadata in uncached state");
-SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, uncached_data_esize, CTLFLAG_RD,
- &ARC_uncached.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
- "size of evictable data in uncached state");
-
-extern arc_state_t ARC_l2c_only;
-
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, l2c_only_size,
- CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
- &ARC_l2c_only, 0, param_get_arc_state_size, "Q",
- "size of l2c_only state");
-
-/* dbuf.c */
-
-/* dmu.c */
-
-/* dmu_zfetch.c */
-
-SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH (LEGACY)");
-
-extern uint32_t zfetch_max_distance;
-
-SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_distance,
- CTLFLAG_RWTUN, &zfetch_max_distance, 0,
- "Max bytes to prefetch per stream (LEGACY)");
-
-extern uint32_t zfetch_max_idistance;
-
-SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_idistance,
- CTLFLAG_RWTUN, &zfetch_max_idistance, 0,
- "Max bytes to prefetch indirects for per stream (LEGACY)");
-
-/* dsl_pool.c */
-
-/* dnode.c */
-
-/* dsl_scan.c */
-
/* metaslab.c */
int
@@ -514,19 +313,6 @@ SYSCTL_UINT(_vfs_zfs, OID_AUTO, condense_pct,
"Condense on-disk spacemap when it is more than this many percents"
" of in-memory counterpart");
-extern uint_t zfs_remove_max_segment;
-
-SYSCTL_UINT(_vfs_zfs, OID_AUTO, remove_max_segment,
- CTLFLAG_RWTUN, &zfs_remove_max_segment, 0,
- "Largest contiguous segment ZFS will attempt to allocate when removing"
- " a device");
-
-extern int zfs_removal_suspend_progress;
-
-SYSCTL_INT(_vfs_zfs, OID_AUTO, removal_suspend_progress,
- CTLFLAG_RWTUN, &zfs_removal_suspend_progress, 0,
- "Ensures certain actions can happen while in the middle of a removal");
-
/*
* Minimum size which forces the dynamic allocator to change
* it's allocation strategy. Once the space map cannot satisfy
@@ -749,12 +535,6 @@ param_set_min_auto_ashift(SYSCTL_HANDLER_ARGS)
return (0);
}
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift,
- CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
- &zfs_vdev_min_auto_ashift, sizeof (zfs_vdev_min_auto_ashift),
- param_set_min_auto_ashift, "IU",
- "Min ashift used when creating new top-level vdev. (LEGACY)");
-
int
param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS)
{
@@ -774,13 +554,6 @@ param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS)
return (0);
}
-SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift,
- CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
- &zfs_vdev_max_auto_ashift, sizeof (zfs_vdev_max_auto_ashift),
- param_set_max_auto_ashift, "IU",
- "Max ashift used when optimizing for logical -> physical sector size on"
- " new top-level vdevs. (LEGACY)");
-
/*
* Since the DTL space map of a vdev is not expected to have a lot of
* entries, we default its block size to 4K.
@@ -802,23 +575,6 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, standard_sm_blksz,
CTLFLAG_RDTUN, &zfs_vdev_standard_sm_blksz, 0,
"Block size for standard space map. Power of 2 greater than 4096.");
-extern int vdev_validate_skip;
-
-SYSCTL_INT(_vfs_zfs, OID_AUTO, validate_skip,
- CTLFLAG_RDTUN, &vdev_validate_skip, 0,
- "Enable to bypass vdev_validate().");
-
-/* vdev_mirror.c */
-
-/* vdev_queue.c */
-
-extern uint_t zfs_vdev_max_active;
-
-SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight,
- CTLFLAG_RWTUN, &zfs_vdev_max_active, 0,
- "The maximum number of I/Os of all types active for each device."
- " (LEGACY)");
-
/* zio.c */
SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata,
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
index b15a3e6e38c0..cb5787269db2 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
@@ -1175,7 +1175,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
int count = 0;
zfs_acl_phys_t acl_phys;
- if (zp->z_zfsvfs->z_replay == B_FALSE) {
+ if (ZTOV(zp) != NULL && zp->z_zfsvfs->z_replay == B_FALSE) {
ASSERT_VOP_IN_SEQC(ZTOV(zp));
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
index 61d0bb26d1e5..a222c5de4a2a 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
@@ -494,7 +494,7 @@ zfsctl_common_getattr(vnode_t *vp, vattr_t *vap)
vap->va_uid = 0;
vap->va_gid = 0;
- vap->va_rdev = 0;
+ vap->va_rdev = NODEV;
/*
* We are a purely virtual object, so we have no
* blocksize or allocated blocks.
@@ -674,6 +674,7 @@ zfsctl_root_readdir(struct vop_readdir_args *ap)
zfs_uio_t uio;
int *eofp = ap->a_eofflag;
off_t dots_offset;
+ ssize_t orig_resid;
int error;
zfs_uio_init(&uio, ap->a_uio);
@@ -688,16 +689,16 @@ zfsctl_root_readdir(struct vop_readdir_args *ap)
* count to return is 0.
*/
if (zfs_uio_offset(&uio) == 3 * sizeof (entry)) {
+ if (eofp != NULL)
+ *eofp = 1;
return (0);
}
+ orig_resid = zfs_uio_resid(&uio);
error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, &uio,
&dots_offset);
- if (error != 0) {
- if (error == ENAMETOOLONG) /* ran out of destination space */
- error = 0;
- return (error);
- }
+ if (error != 0)
+ goto err;
if (zfs_uio_offset(&uio) != dots_offset)
return (SET_ERROR(EINVAL));
@@ -710,8 +711,11 @@ zfsctl_root_readdir(struct vop_readdir_args *ap)
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio));
if (error != 0) {
- if (error == ENAMETOOLONG)
- error = 0;
+err:
+ if (error == ENAMETOOLONG) {
+ error = orig_resid == zfs_uio_resid(&uio) ?
+ EINVAL : 0;
+ }
return (SET_ERROR(error));
}
if (eofp != NULL)
@@ -1056,17 +1060,21 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
zfs_uio_t uio;
int *eofp = ap->a_eofflag;
off_t dots_offset;
+ ssize_t orig_resid;
int error;
zfs_uio_init(&uio, ap->a_uio);
+ orig_resid = zfs_uio_resid(&uio);
ASSERT3S(vp->v_type, ==, VDIR);
error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap,
&uio, &dots_offset);
if (error != 0) {
- if (error == ENAMETOOLONG) /* ran out of destination space */
- error = 0;
+ if (error == ENAMETOOLONG) { /* ran out of destination space */
+ error = orig_resid == zfs_uio_resid(&uio) ?
+ EINVAL : 0;
+ }
return (error);
}
@@ -1084,9 +1092,13 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
if (error != 0) {
if (error == ENOENT) {
- if (eofp != NULL)
- *eofp = 1;
- error = 0;
+ if (orig_resid == zfs_uio_resid(&uio)) {
+ error = EINVAL;
+ } else {
+ error = 0;
+ if (eofp != NULL)
+ *eofp = 1;
+ }
}
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1099,8 +1111,10 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio));
if (error != 0) {
- if (error == ENAMETOOLONG)
- error = 0;
+ if (error == ENAMETOOLONG) {
+ error = orig_resid == zfs_uio_resid(&uio) ?
+ EINVAL : 0;
+ }
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(error));
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
index 21e5f7938f9f..ca13569a1235 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c
@@ -164,8 +164,9 @@ zfs_file_write(zfs_file_t *fp, const void *buf, size_t count, ssize_t *resid)
int
zfs_file_pwrite(zfs_file_t *fp, const void *buf, size_t count, loff_t off,
- ssize_t *resid)
+ uint8_t ashift, ssize_t *resid)
{
+ (void) ashift;
return (zfs_file_write_impl(fp, buf, count, &off, resid));
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
index 1813c411b013..8dce97baba66 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
@@ -61,6 +61,7 @@
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
+#include <sys/dsl_dataset.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
@@ -388,7 +389,9 @@ zfs_ioctl(vnode_t *vp, ulong_t com, intptr_t data, int flag, cred_t *cred,
error = vn_lock(vp, LK_EXCLUSIVE);
if (error)
return (error);
+ vn_seqc_write_begin(vp);
error = zfs_ioctl_setxattr(vp, fsx, cred);
+ vn_seqc_write_end(vp);
VOP_UNLOCK(vp);
return (error);
}
@@ -1695,6 +1698,7 @@ zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
objset_t *os;
caddr_t outbuf;
size_t bufsize;
+ ssize_t orig_resid;
zap_cursor_t zc;
zap_attribute_t *zap;
uint_t bytes_wanted;
@@ -1735,7 +1739,7 @@ zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
/*
* Quit if directory has been removed (posix)
*/
- if ((*eofp = zp->z_unlinked) != 0) {
+ if ((*eofp = (zp->z_unlinked != 0)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (0);
}
@@ -1743,6 +1747,7 @@ zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
error = 0;
os = zfsvfs->z_os;
offset = zfs_uio_offset(uio);
+ orig_resid = zfs_uio_resid(uio);
prefetch = zp->z_zn_prefetch;
zap = zap_attribute_long_alloc();
@@ -1922,7 +1927,7 @@ update:
kmem_free(outbuf, bufsize);
if (error == ENOENT)
- error = 0;
+ error = orig_resid == zfs_uio_resid(uio) ? EINVAL : 0;
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
@@ -2013,7 +2018,7 @@ zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr)
if (vp->v_type == VBLK || vp->v_type == VCHR)
vap->va_rdev = zfs_cmpldev(rdev);
else
- vap->va_rdev = 0;
+ vap->va_rdev = NODEV;
vap->va_gen = zp->z_gen;
vap->va_flags = 0; /* FreeBSD: Reset chflags(2) flags. */
vap->va_filerev = zp->z_seq;
@@ -2203,6 +2208,7 @@ zfs_setattr_dir(znode_t *dzp)
if (err)
break;
+ vn_seqc_write_begin(ZTOV(zp));
mutex_enter(&dzp->z_lock);
if (zp->z_uid != dzp->z_uid) {
@@ -2252,6 +2258,7 @@ sa_add_projid_err:
dmu_tx_abort(tx);
}
tx = NULL;
+ vn_seqc_write_end(ZTOV(zp));
if (err != 0 && err != ENOENT)
break;
@@ -5727,6 +5734,9 @@ zfs_freebsd_pathconf(struct vop_pathconf_args *ap)
{
ulong_t val;
int error;
+#ifdef _PC_CLONE_BLKSIZE
+ zfsvfs_t *zfsvfs;
+#endif
error = zfs_pathconf(ap->a_vp, ap->a_name, &val,
curthread->td_ucred, NULL);
@@ -5773,6 +5783,21 @@ zfs_freebsd_pathconf(struct vop_pathconf_args *ap)
*ap->a_retval = 1;
return (0);
#endif
+#ifdef _PC_CLONE_BLKSIZE
+ case _PC_CLONE_BLKSIZE:
+ zfsvfs = (zfsvfs_t *)ap->a_vp->v_mount->mnt_data;
+ if (zfs_bclone_enabled &&
+ spa_feature_is_enabled(dmu_objset_spa(zfsvfs->z_os),
+ SPA_FEATURE_BLOCK_CLONING))
+ *ap->a_retval = dsl_dataset_feature_is_active(
+ zfsvfs->z_os->os_dsl_dataset,
+ SPA_FEATURE_LARGE_BLOCKS) ?
+ SPA_MAXBLOCKSIZE :
+ SPA_OLD_MAXBLOCKSIZE;
+ else
+ *ap->a_retval = 0;
+ return (0);
+#endif
default:
return (vop_stdpathconf(ap));
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode_os.c
index 7cd0a153577c..649022ab5bcb 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode_os.c
@@ -817,6 +817,10 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
(*zpp)->z_dnodesize = dnodesize;
(*zpp)->z_projid = projid;
+ vnode_t *vp = ZTOV(*zpp);
+ if (!(flag & IS_ROOT_NODE))
+ vn_seqc_write_begin(vp);
+
if (vap->va_mask & AT_XVATTR)
zfs_xvattr_set(*zpp, (xvattr_t *)vap, tx);
@@ -825,7 +829,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx));
}
if (!(flag & IS_ROOT_NODE)) {
- vnode_t *vp = ZTOV(*zpp);
+ vn_seqc_write_end(vp);
vp->v_vflag |= VV_FORCEINSMQ;
int err = insmntque(vp, zfsvfs->z_vfs);
vp->v_vflag &= ~VV_FORCEINSMQ;
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
index 265dfd55fc4d..0dd2ecd7fd8d 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
@@ -31,7 +31,7 @@
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
- * Copyright (c) 2024, Klara, Inc.
+ * Copyright (c) 2024, 2025, Klara, Inc.
*/
/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
@@ -196,7 +196,6 @@ DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
static int zvol_geom_open(struct g_provider *pp, int flag, int count);
static int zvol_geom_close(struct g_provider *pp, int flag, int count);
-static void zvol_geom_destroy(zvol_state_t *zv);
static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
static void zvol_geom_bio_start(struct bio *bp);
static int zvol_geom_bio_getattr(struct bio *bp);
@@ -226,25 +225,14 @@ zvol_geom_open(struct g_provider *pp, int flag, int count)
}
retry:
- rw_enter(&zvol_state_lock, ZVOL_RW_READER);
- /*
- * Obtain a copy of private under zvol_state_lock to make sure either
- * the result of zvol free code setting private to NULL is observed,
- * or the zv is protected from being freed because of the positive
- * zv_open_count.
- */
- zv = pp->private;
- if (zv == NULL) {
- rw_exit(&zvol_state_lock);
- err = SET_ERROR(ENXIO);
- goto out_locked;
- }
+ zv = atomic_load_ptr(&pp->private);
+ if (zv == NULL)
+ return (SET_ERROR(ENXIO));
mutex_enter(&zv->zv_state_lock);
if (zv->zv_zso->zso_dying || zv->zv_flags & ZVOL_REMOVING) {
- rw_exit(&zvol_state_lock);
err = SET_ERROR(ENXIO);
- goto out_zv_locked;
+ goto out_locked;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
@@ -257,8 +245,24 @@ retry:
drop_suspend = B_TRUE;
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
+
+ /*
+ * Removal may happen while the locks are down, so
+ * we can't trust zv any longer; we have to start over.
+ */
+ zv = atomic_load_ptr(&pp->private);
+ if (zv == NULL)
+ return (SET_ERROR(ENXIO));
+
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ if (zv->zv_zso->zso_dying ||
+ zv->zv_flags & ZVOL_REMOVING) {
+ err = SET_ERROR(ENXIO);
+ goto out_locked;
+ }
+
/* Check to see if zv_suspend_lock is needed. */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
@@ -266,7 +270,6 @@ retry:
}
}
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -294,7 +297,7 @@ retry:
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
if (err)
- goto out_zv_locked;
+ goto out_locked;
pp->mediasize = zv->zv_volsize;
pp->stripeoffset = 0;
pp->stripesize = zv->zv_volblocksize;
@@ -329,9 +332,8 @@ out_opened:
zvol_last_close(zv);
wakeup(zv);
}
-out_zv_locked:
- mutex_exit(&zv->zv_state_lock);
out_locked:
+ mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (err);
@@ -345,12 +347,9 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
boolean_t drop_suspend = B_TRUE;
int new_open_count;
- rw_enter(&zvol_state_lock, ZVOL_RW_READER);
- zv = pp->private;
- if (zv == NULL) {
- rw_exit(&zvol_state_lock);
+ zv = atomic_load_ptr(&pp->private);
+ if (zv == NULL)
return (SET_ERROR(ENXIO));
- }
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
@@ -377,6 +376,15 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ /*
+ * Unlike in zvol_geom_open(), we don't check if
+ * removal started here, because we might be one of the
+ * openers that needs to be thrown out! If we're the
+ * last, we need to call zvol_last_close() below to
+ * finish cleanup. So, no special treatment for us.
+ */
+
/* Check to see if zv_suspend_lock is needed. */
new_open_count = zv->zv_open_count - count;
if (new_open_count != 0) {
@@ -387,7 +395,6 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
} else {
drop_suspend = B_FALSE;
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -408,20 +415,6 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
return (0);
}
-static void
-zvol_geom_destroy(zvol_state_t *zv)
-{
- struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
- struct g_provider *pp = zsg->zsg_provider;
-
- ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
-
- g_topology_assert();
-
- zsg->zsg_provider = NULL;
- g_wither_geom(pp->geom, ENXIO);
-}
-
void
zvol_wait_close(zvol_state_t *zv)
{
@@ -454,7 +447,7 @@ zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
pp->name, acr, acw, ace));
- if (pp->private == NULL) {
+ if (atomic_load_ptr(&pp->private) == NULL) {
if (acr <= 0 && acw <= 0 && ace <= 0)
return (0);
return (pp->error);
@@ -921,25 +914,14 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
boolean_t drop_suspend = B_FALSE;
retry:
- rw_enter(&zvol_state_lock, ZVOL_RW_READER);
- /*
- * Obtain a copy of si_drv2 under zvol_state_lock to make sure either
- * the result of zvol free code setting si_drv2 to NULL is observed,
- * or the zv is protected from being freed because of the positive
- * zv_open_count.
- */
- zv = dev->si_drv2;
- if (zv == NULL) {
- rw_exit(&zvol_state_lock);
- err = SET_ERROR(ENXIO);
- goto out_locked;
- }
+ zv = atomic_load_ptr(&dev->si_drv2);
+ if (zv == NULL)
+ return (SET_ERROR(ENXIO));
mutex_enter(&zv->zv_state_lock);
- if (zv->zv_zso->zso_dying) {
- rw_exit(&zvol_state_lock);
+ if (zv->zv_zso->zso_dying || zv->zv_flags & ZVOL_REMOVING) {
err = SET_ERROR(ENXIO);
- goto out_zv_locked;
+ goto out_locked;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV);
@@ -954,6 +936,13 @@ retry:
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
+ /* Removal started while locks were down. */
+ err = SET_ERROR(ENXIO);
+ goto out_locked;
+ }
+
/* Check to see if zv_suspend_lock is needed. */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
@@ -961,7 +950,6 @@ retry:
}
}
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -989,7 +977,7 @@ retry:
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
if (err)
- goto out_zv_locked;
+ goto out_locked;
}
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -1016,9 +1004,8 @@ out_opened:
zvol_last_close(zv);
wakeup(zv);
}
-out_zv_locked:
- mutex_exit(&zv->zv_state_lock);
out_locked:
+ mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (err);
@@ -1030,12 +1017,9 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE;
- rw_enter(&zvol_state_lock, ZVOL_RW_READER);
- zv = dev->si_drv2;
- if (zv == NULL) {
- rw_exit(&zvol_state_lock);
+ zv = atomic_load_ptr(&dev->si_drv2);
+ if (zv == NULL)
return (SET_ERROR(ENXIO));
- }
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
@@ -1060,6 +1044,15 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ /*
+ * Unlike in zvol_cdev_open(), we don't check if
+ * removal started here, because we might be one of the
+ * openers that needs to be thrown out! If we're the
+ * last, we need to call zvol_last_close() below to
+ * finish cleanup. So, no special treatment for us.
+ */
+
/* Check to see if zv_suspend_lock is needed. */
if (zv->zv_open_count != 1) {
rw_exit(&zv->zv_suspend_lock);
@@ -1069,7 +1062,6 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
} else {
drop_suspend = B_FALSE;
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -1101,7 +1093,8 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
int error;
boolean_t sync;
- zv = dev->si_drv2;
+ zv = atomic_load_ptr(&dev->si_drv2);
+ ASSERT3P(zv, !=, NULL);
error = 0;
KASSERT(zv->zv_open_count > 0,
@@ -1162,6 +1155,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
*(off_t *)data = 0;
break;
case DIOCGATTR: {
+ rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
spa_t *spa = dmu_objset_spa(zv->zv_objset);
struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
uint64_t refd, avail, usedobjs, availobjs;
@@ -1186,6 +1180,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
arg->value.off = refd / DEV_BSIZE;
} else
error = SET_ERROR(ENOIOCTL);
+ rw_exit(&zv->zv_suspend_lock);
break;
}
case FIOSEEKHOLE:
@@ -1196,10 +1191,12 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
hole = (cmd == FIOSEEKHOLE);
noff = *off;
+ rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
lr = zfs_rangelock_enter(&zv->zv_rangelock, 0, UINT64_MAX,
RL_READER);
error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff);
zfs_rangelock_exit(lr);
+ rw_exit(&zv->zv_suspend_lock);
*off = noff;
break;
}
@@ -1400,42 +1397,65 @@ zvol_alloc(const char *name, uint64_t volsize, uint64_t volblocksize,
* Remove minor node for the specified volume.
*/
void
-zvol_os_free(zvol_state_t *zv)
+zvol_os_remove_minor(zvol_state_t *zv)
{
- ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
- ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
+ ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT0(zv->zv_open_count);
+ ASSERT0(atomic_read(&zv->zv_suspend_ref));
+ ASSERT(zv->zv_flags & ZVOL_REMOVING);
- ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
-
- rw_destroy(&zv->zv_suspend_lock);
- zfs_rangelock_fini(&zv->zv_rangelock);
+ struct zvol_state_os *zso = zv->zv_zso;
+ zv->zv_zso = NULL;
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
- struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
- struct g_provider *pp __maybe_unused = zsg->zsg_provider;
-
- ASSERT0P(pp->private);
+ struct zvol_state_geom *zsg = &zso->zso_geom;
+ struct g_provider *pp = zsg->zsg_provider;
+ atomic_store_ptr(&pp->private, NULL);
+ mutex_exit(&zv->zv_state_lock);
g_topology_lock();
- zvol_geom_destroy(zv);
+ g_wither_geom(pp->geom, ENXIO);
g_topology_unlock();
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
- struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
+ struct zvol_state_dev *zsd = &zso->zso_dev;
struct cdev *dev = zsd->zsd_cdev;
+ if (dev != NULL)
+ atomic_store_ptr(&dev->si_drv2, NULL);
+ mutex_exit(&zv->zv_state_lock);
+
if (dev != NULL) {
- ASSERT0P(dev->si_drv2);
destroy_dev(dev);
knlist_clear(&zsd->zsd_selinfo.si_note, 0);
knlist_destroy(&zsd->zsd_selinfo.si_note);
}
}
+ kmem_free(zso, sizeof (struct zvol_state_os));
+
+ mutex_enter(&zv->zv_state_lock);
+}
+
+void
+zvol_os_free(zvol_state_t *zv)
+{
+ ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
+ ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
+ ASSERT0(zv->zv_open_count);
+ ASSERT0P(zv->zv_zso);
+
+ ASSERT0P(zv->zv_objset);
+ ASSERT0P(zv->zv_zilog);
+ ASSERT0P(zv->zv_dn);
+
+ ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
+
+ rw_destroy(&zv->zv_suspend_lock);
+ zfs_rangelock_fini(&zv->zv_rangelock);
+
mutex_destroy(&zv->zv_state_lock);
cv_destroy(&zv->zv_removing_cv);
dataset_kstats_destroy(&zv->zv_kstat);
- kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
zvol_minors--;
}
@@ -1538,28 +1558,6 @@ out_doi:
return (error);
}
-void
-zvol_os_clear_private(zvol_state_t *zv)
-{
- ASSERT(RW_LOCK_HELD(&zvol_state_lock));
- if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
- struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
- struct g_provider *pp = zsg->zsg_provider;
-
- if (pp->private == NULL) /* already cleared */
- return;
-
- pp->private = NULL;
- ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
- } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
- struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
- struct cdev *dev = zsd->zsd_cdev;
-
- if (dev != NULL)
- dev->si_drv2 = NULL;
- }
-}
-
int
zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize)
{
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_file_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_file_os.c
index c729947369c2..3fdcdbac6f68 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_file_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_file_os.c
@@ -115,8 +115,9 @@ zfs_file_write(zfs_file_t *fp, const void *buf, size_t count, ssize_t *resid)
*/
int
zfs_file_pwrite(zfs_file_t *fp, const void *buf, size_t count, loff_t off,
- ssize_t *resid)
+ uint8_t ashift, ssize_t *resid)
{
+ (void) ashift;
ssize_t rc;
rc = kernel_write(fp, buf, count, &off);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c
index 48dae79a2373..81ac26cb0c93 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c
@@ -202,7 +202,7 @@ zpl_snapdir_revalidate(struct dentry *dentry, unsigned int flags)
return (!!dentry->d_inode);
}
-static dentry_operations_t zpl_dops_snapdirs = {
+static const struct dentry_operations zpl_dops_snapdirs = {
/*
* Auto mounting of snapshots is only supported for 2.6.37 and
* newer kernels. Prior to this kernel the ops->follow_link()
@@ -215,6 +215,51 @@ static dentry_operations_t zpl_dops_snapdirs = {
.d_revalidate = zpl_snapdir_revalidate,
};
+/*
+ * For the .zfs control directory to work properly we must be able to override
+ * the default operations table and register custom .d_automount and
+ * .d_revalidate callbacks.
+ */
+static void
+set_snapdir_dentry_ops(struct dentry *dentry, unsigned int extraflags) {
+ static const unsigned int op_flags =
+ DCACHE_OP_HASH | DCACHE_OP_COMPARE |
+ DCACHE_OP_REVALIDATE | DCACHE_OP_DELETE |
+ DCACHE_OP_PRUNE | DCACHE_OP_WEAK_REVALIDATE | DCACHE_OP_REAL;
+
+#ifdef HAVE_D_SET_D_OP
+ /*
+ * d_set_d_op() will set the DCACHE_OP_ flags according to what it
+ * finds in the passed dentry_operations, so we don't have to.
+ *
+ * We clear the flags and the old op table before calling d_set_d_op()
+ * because issues a warning when the dentry operations table is already
+ * set.
+ */
+ dentry->d_op = NULL;
+ dentry->d_flags &= ~op_flags;
+ d_set_d_op(dentry, &zpl_dops_snapdirs);
+ dentry->d_flags |= extraflags;
+#else
+ /*
+ * Since 6.17 there's no exported way to modify dentry ops, so we have
+ * to reach in and do it ourselves. This should be safe for our very
+ * narrow use case, which is to create or splice in an entry to give
+ * access to a snapshot.
+ *
+ * We need to set the op flags directly. We hardcode
+ * DCACHE_OP_REVALIDATE because that's the only operation we have; if
+ * we ever extend zpl_dops_snapdirs we will need to update the op flags
+ * to match.
+ */
+ spin_lock(&dentry->d_lock);
+ dentry->d_op = &zpl_dops_snapdirs;
+ dentry->d_flags &= ~op_flags;
+ dentry->d_flags |= DCACHE_OP_REVALIDATE | extraflags;
+ spin_unlock(&dentry->d_lock);
+#endif
+}
+
static struct dentry *
zpl_snapdir_lookup(struct inode *dip, struct dentry *dentry,
unsigned int flags)
@@ -236,10 +281,7 @@ zpl_snapdir_lookup(struct inode *dip, struct dentry *dentry,
return (ERR_PTR(error));
ASSERT(error == 0 || ip == NULL);
- d_clear_d_op(dentry);
- d_set_d_op(dentry, &zpl_dops_snapdirs);
- dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
-
+ set_snapdir_dentry_ops(dentry, DCACHE_NEED_AUTOMOUNT);
return (d_splice_alias(ip, dentry));
}
@@ -373,8 +415,7 @@ zpl_snapdir_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
error = -zfsctl_snapdir_mkdir(dip, dname(dentry), vap, &ip, cr, 0);
if (error == 0) {
- d_clear_d_op(dentry);
- d_set_d_op(dentry, &zpl_dops_snapdirs);
+ set_snapdir_dentry_ops(dentry, 0);
d_instantiate(dentry, ip);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
index a73acdad34ae..bac166fcd89e 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
@@ -22,7 +22,7 @@
/*
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2024, Rob Norris <robn@despairlabs.com>
- * Copyright (c) 2024, Klara, Inc.
+ * Copyright (c) 2024, 2025, Klara, Inc.
*/
#include <sys/dataset_kstats.h>
@@ -679,28 +679,19 @@ zvol_open(struct block_device *bdev, fmode_t flag)
retry:
#endif
- rw_enter(&zvol_state_lock, RW_READER);
- /*
- * Obtain a copy of private_data under the zvol_state_lock to make
- * sure that either the result of zvol free code path setting
- * disk->private_data to NULL is observed, or zvol_os_free()
- * is not called on this zv because of the positive zv_open_count.
- */
+
#ifdef HAVE_BLK_MODE_T
- zv = disk->private_data;
+ zv = atomic_load_ptr(&disk->private_data);
#else
- zv = bdev->bd_disk->private_data;
+ zv = atomic_load_ptr(&bdev->bd_disk->private_data);
#endif
if (zv == NULL) {
- rw_exit(&zvol_state_lock);
return (-SET_ERROR(ENXIO));
}
mutex_enter(&zv->zv_state_lock);
-
if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
mutex_exit(&zv->zv_state_lock);
- rw_exit(&zvol_state_lock);
return (-SET_ERROR(ENXIO));
}
@@ -712,8 +703,28 @@ retry:
if (zv->zv_open_count == 0) {
if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
mutex_exit(&zv->zv_state_lock);
+
+ /*
+ * Removal may happen while the locks are down, so
+ * we can't trust zv any longer; we have to start over.
+ */
+#ifdef HAVE_BLK_MODE_T
+ zv = atomic_load_ptr(&disk->private_data);
+#else
+ zv = atomic_load_ptr(&bdev->bd_disk->private_data);
+#endif
+ if (zv == NULL)
+ return (-SET_ERROR(ENXIO));
+
rw_enter(&zv->zv_suspend_lock, RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
+ mutex_exit(&zv->zv_state_lock);
+ rw_exit(&zv->zv_suspend_lock);
+ return (-SET_ERROR(ENXIO));
+ }
+
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
@@ -724,7 +735,6 @@ retry:
drop_suspend = B_TRUE;
}
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -821,11 +831,11 @@ zvol_release(struct gendisk *disk, fmode_t unused)
#if !defined(HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG)
(void) unused;
#endif
- zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE;
- rw_enter(&zvol_state_lock, RW_READER);
- zv = disk->private_data;
+ zvol_state_t *zv = atomic_load_ptr(&disk->private_data);
+ if (zv == NULL)
+ return;
mutex_enter(&zv->zv_state_lock);
ASSERT3U(zv->zv_open_count, >, 0);
@@ -839,6 +849,15 @@ zvol_release(struct gendisk *disk, fmode_t unused)
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ /*
+ * Unlike in zvol_open(), we don't check if removal
+ * started here, because we might be one of the openers
+ * that needs to be thrown out! If we're the last, we
+ * need to call zvol_last_close() below to finish
+ * cleanup. So, no special treatment for us.
+ */
+
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 1) {
rw_exit(&zv->zv_suspend_lock);
@@ -848,7 +867,6 @@ zvol_release(struct gendisk *disk, fmode_t unused)
} else {
drop_suspend = B_FALSE;
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -868,9 +886,10 @@ static int
zvol_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- zvol_state_t *zv = bdev->bd_disk->private_data;
int error = 0;
+ zvol_state_t *zv = atomic_load_ptr(&bdev->bd_disk->private_data);
+ ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
switch (cmd) {
@@ -923,9 +942,8 @@ zvol_check_events(struct gendisk *disk, unsigned int clearing)
{
unsigned int mask = 0;
- rw_enter(&zvol_state_lock, RW_READER);
+ zvol_state_t *zv = atomic_load_ptr(&disk->private_data);
- zvol_state_t *zv = disk->private_data;
if (zv != NULL) {
mutex_enter(&zv->zv_state_lock);
mask = zv->zv_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
@@ -933,17 +951,14 @@ zvol_check_events(struct gendisk *disk, unsigned int clearing)
mutex_exit(&zv->zv_state_lock);
}
- rw_exit(&zvol_state_lock);
-
return (mask);
}
static int
zvol_revalidate_disk(struct gendisk *disk)
{
- rw_enter(&zvol_state_lock, RW_READER);
+ zvol_state_t *zv = atomic_load_ptr(&disk->private_data);
- zvol_state_t *zv = disk->private_data;
if (zv != NULL) {
mutex_enter(&zv->zv_state_lock);
set_capacity(zv->zv_zso->zvo_disk,
@@ -951,8 +966,6 @@ zvol_revalidate_disk(struct gendisk *disk)
mutex_exit(&zv->zv_state_lock);
}
- rw_exit(&zvol_state_lock);
-
return (0);
}
@@ -971,16 +984,6 @@ zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize)
return (0);
}
-void
-zvol_os_clear_private(zvol_state_t *zv)
-{
- /*
- * Cleared while holding zvol_state_lock as a writer
- * which will prevent zvol_open() from opening it.
- */
- zv->zv_zso->zvo_disk->private_data = NULL;
-}
-
/*
* Provide a simple virtual geometry for legacy compatibility. For devices
* smaller than 1 MiB a small head and sector count is used to allow very
@@ -990,9 +993,10 @@ zvol_os_clear_private(zvol_state_t *zv)
static int
zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- zvol_state_t *zv = bdev->bd_disk->private_data;
sector_t sectors;
+ zvol_state_t *zv = atomic_load_ptr(&bdev->bd_disk->private_data);
+ ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
sectors = get_capacity(zv->zv_zso->zvo_disk);
@@ -1417,53 +1421,70 @@ out_kmem:
return (ret);
}
-/*
- * Cleanup then free a zvol_state_t which was created by zvol_alloc().
- * At this time, the structure is not opened by anyone, is taken off
- * the zvol_state_list, and has its private data set to NULL.
- * The zvol_state_lock is dropped.
- *
- * This function may take many milliseconds to complete (e.g. we've seen
- * it take over 256ms), due to the calls to "blk_cleanup_queue" and
- * "del_gendisk". Thus, consumers need to be careful to account for this
- * latency when calling this function.
- */
void
-zvol_os_free(zvol_state_t *zv)
+zvol_os_remove_minor(zvol_state_t *zv)
{
-
- ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
- ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
+ ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT0(zv->zv_open_count);
- ASSERT0P(zv->zv_zso->zvo_disk->private_data);
+ ASSERT0(atomic_read(&zv->zv_suspend_ref));
+ ASSERT(zv->zv_flags & ZVOL_REMOVING);
- rw_destroy(&zv->zv_suspend_lock);
- zfs_rangelock_fini(&zv->zv_rangelock);
+ struct zvol_state_os *zso = zv->zv_zso;
+ zv->zv_zso = NULL;
+
+ /* Clearing private_data will make new callers return immediately. */
+ atomic_store_ptr(&zso->zvo_disk->private_data, NULL);
+
+ /*
+ * Drop the state lock before calling del_gendisk(). There may be
+ * callers waiting to acquire it, but del_gendisk() will block until
+ * they exit, which would deadlock.
+ */
+ mutex_exit(&zv->zv_state_lock);
- del_gendisk(zv->zv_zso->zvo_disk);
+ del_gendisk(zso->zvo_disk);
#if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
(defined(HAVE_BLK_ALLOC_DISK) || defined(HAVE_BLK_ALLOC_DISK_2ARG))
#if defined(HAVE_BLK_CLEANUP_DISK)
- blk_cleanup_disk(zv->zv_zso->zvo_disk);
+ blk_cleanup_disk(zso->zvo_disk);
#else
- put_disk(zv->zv_zso->zvo_disk);
+ put_disk(zso->zvo_disk);
#endif
#else
- blk_cleanup_queue(zv->zv_zso->zvo_queue);
- put_disk(zv->zv_zso->zvo_disk);
+ blk_cleanup_queue(zso->zvo_queue);
+ put_disk(zso->zvo_disk);
#endif
- if (zv->zv_zso->use_blk_mq)
- blk_mq_free_tag_set(&zv->zv_zso->tag_set);
+ if (zso->use_blk_mq)
+ blk_mq_free_tag_set(&zso->tag_set);
+
+ ida_simple_remove(&zvol_ida, MINOR(zso->zvo_dev) >> ZVOL_MINOR_BITS);
- ida_simple_remove(&zvol_ida,
- MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS);
+ kmem_free(zso, sizeof (struct zvol_state_os));
+
+ mutex_enter(&zv->zv_state_lock);
+}
+
+void
+zvol_os_free(zvol_state_t *zv)
+{
+
+ ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
+ ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
+ ASSERT0(zv->zv_open_count);
+ ASSERT0P(zv->zv_zso);
+
+ ASSERT0P(zv->zv_objset);
+ ASSERT0P(zv->zv_zilog);
+ ASSERT0P(zv->zv_dn);
+
+ rw_destroy(&zv->zv_suspend_lock);
+ zfs_rangelock_fini(&zv->zv_rangelock);
cv_destroy(&zv->zv_removing_cv);
mutex_destroy(&zv->zv_state_lock);
dataset_kstats_destroy(&zv->zv_kstat);
- kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
}
diff --git a/sys/contrib/openzfs/module/zcommon/simd_stat.c b/sys/contrib/openzfs/module/zcommon/simd_stat.c
index 11e2080ff9f2..007ae9e4fbbc 100644
--- a/sys/contrib/openzfs/module/zcommon/simd_stat.c
+++ b/sys/contrib/openzfs/module/zcommon/simd_stat.c
@@ -118,6 +118,10 @@ simd_stat_kstat_data(char *buf, size_t size, void *data)
"pclmulqdq", zfs_pclmulqdq_available());
off += SIMD_STAT_PRINT(simd_stat_kstat_payload,
"movbe", zfs_movbe_available());
+ off += SIMD_STAT_PRINT(simd_stat_kstat_payload,
+ "vaes", zfs_vaes_available());
+ off += SIMD_STAT_PRINT(simd_stat_kstat_payload,
+ "vpclmulqdq", zfs_vpclmulqdq_available());
off += SIMD_STAT_PRINT(simd_stat_kstat_payload,
"osxsave", boot_cpu_has(X86_FEATURE_OSXSAVE));
diff --git a/sys/contrib/openzfs/module/zcommon/zfs_deleg.c b/sys/contrib/openzfs/module/zcommon/zfs_deleg.c
index 49bb534ca26c..87596558c9a1 100644
--- a/sys/contrib/openzfs/module/zcommon/zfs_deleg.c
+++ b/sys/contrib/openzfs/module/zcommon/zfs_deleg.c
@@ -59,6 +59,7 @@ const zfs_deleg_perm_tab_t zfs_deleg_perm_tab[] = {
{ZFS_DELEG_PERM_SNAPSHOT},
{ZFS_DELEG_PERM_SHARE},
{ZFS_DELEG_PERM_SEND},
+ {ZFS_DELEG_PERM_SEND_RAW},
{ZFS_DELEG_PERM_USERPROP},
{ZFS_DELEG_PERM_USERQUOTA},
{ZFS_DELEG_PERM_GROUPQUOTA},
diff --git a/sys/contrib/openzfs/module/zcommon/zpool_prop.c b/sys/contrib/openzfs/module/zcommon/zpool_prop.c
index 04ae9f986d8f..07819ba2be8b 100644
--- a/sys/contrib/openzfs/module/zcommon/zpool_prop.c
+++ b/sys/contrib/openzfs/module/zcommon/zpool_prop.c
@@ -467,9 +467,15 @@ vdev_prop_init(void)
zprop_register_index(VDEV_PROP_RAIDZ_EXPANDING, "raidz_expanding", 0,
PROP_READONLY, ZFS_TYPE_VDEV, "on | off", "RAIDZ_EXPANDING",
boolean_table, sfeatures);
+ zprop_register_index(VDEV_PROP_SIT_OUT, "sit_out", 0,
+ PROP_DEFAULT, ZFS_TYPE_VDEV, "on | off", "SIT_OUT", boolean_table,
+ sfeatures);
zprop_register_index(VDEV_PROP_TRIM_SUPPORT, "trim_support", 0,
PROP_READONLY, ZFS_TYPE_VDEV, "on | off", "TRIMSUP",
boolean_table, sfeatures);
+ zprop_register_index(VDEV_PROP_AUTOSIT, "autosit", 0,
+ PROP_DEFAULT, ZFS_TYPE_VDEV, "on | off", "AUTOSIT", boolean_table,
+ sfeatures);
/* default index properties */
zprop_register_index(VDEV_PROP_FAILFAST, "failfast", B_TRUE,
diff --git a/sys/contrib/openzfs/module/zfs/arc.c b/sys/contrib/openzfs/module/zfs/arc.c
index df41e3b49204..bd6dc8edd8ca 100644
--- a/sys/contrib/openzfs/module/zfs/arc.c
+++ b/sys/contrib/openzfs/module/zfs/arc.c
@@ -486,13 +486,13 @@ static taskq_t *arc_flush_taskq;
static uint_t zfs_arc_evict_threads = 0;
/* The 7 states: */
-arc_state_t ARC_anon;
-arc_state_t ARC_mru;
-arc_state_t ARC_mru_ghost;
-arc_state_t ARC_mfu;
-arc_state_t ARC_mfu_ghost;
-arc_state_t ARC_l2c_only;
-arc_state_t ARC_uncached;
+static arc_state_t ARC_anon;
+/* */ arc_state_t ARC_mru;
+static arc_state_t ARC_mru_ghost;
+/* */ arc_state_t ARC_mfu;
+static arc_state_t ARC_mfu_ghost;
+static arc_state_t ARC_l2c_only;
+static arc_state_t ARC_uncached;
arc_stats_t arc_stats = {
{ "hits", KSTAT_DATA_UINT64 },
@@ -832,15 +832,15 @@ typedef struct arc_async_flush {
#define L2ARC_FEED_TYPES 4
/* L2ARC Performance Tunables */
-uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */
-uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */
-uint64_t l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */
-uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
-uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
-uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */
-int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
-int l2arc_feed_again = B_TRUE; /* turbo warmup */
-int l2arc_norw = B_FALSE; /* no reads during writes */
+static uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */
+static uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */
+static uint64_t l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */
+static uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
+static uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
+static uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */
+static int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
+static int l2arc_feed_again = B_TRUE; /* turbo warmup */
+static int l2arc_norw = B_FALSE; /* no reads during writes */
static uint_t l2arc_meta_percent = 33; /* limit on headers size */
/*
diff --git a/sys/contrib/openzfs/module/zfs/dbuf.c b/sys/contrib/openzfs/module/zfs/dbuf.c
index 3d0f88b36336..fccc4c5b5b94 100644
--- a/sys/contrib/openzfs/module/zfs/dbuf.c
+++ b/sys/contrib/openzfs/module/zfs/dbuf.c
@@ -2270,14 +2270,6 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
#endif
- /*
- * We make this assert for private objects as well, but after we
- * check if we're already dirty. They are allowed to re-dirty
- * in syncing context.
- */
- ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
- dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
- (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
mutex_enter(&db->db_mtx);
/*
@@ -2289,12 +2281,6 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
db->db_state == DB_CACHED || db->db_state == DB_FILL ||
db->db_state == DB_NOFILL);
- mutex_enter(&dn->dn_mtx);
- dnode_set_dirtyctx(dn, tx, db);
- if (tx->tx_txg > dn->dn_dirty_txg)
- dn->dn_dirty_txg = tx->tx_txg;
- mutex_exit(&dn->dn_mtx);
-
if (db->db_blkid == DMU_SPILL_BLKID)
dn->dn_have_spill = B_TRUE;
@@ -2313,13 +2299,6 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
return (dr_next);
}
- /*
- * Only valid if not already dirty.
- */
- ASSERT(dn->dn_object == 0 ||
- dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
- (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
-
ASSERT3U(dn->dn_nlevels, >, db->db_level);
/*
@@ -2557,12 +2536,13 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
/*
* Due to our use of dn_nlevels below, this can only be called
- * in open context, unless we are operating on the MOS.
- * From syncing context, dn_nlevels may be different from the
- * dn_nlevels used when dbuf was dirtied.
+ * in open context, unless we are operating on the MOS or it's
+ * a special object. From syncing context, dn_nlevels may be
+ * different from the dn_nlevels used when dbuf was dirtied.
*/
ASSERT(db->db_objset ==
dmu_objset_pool(db->db_objset)->dp_meta_objset ||
+ DMU_OBJECT_IS_SPECIAL(db->db.db_object) ||
txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT0(db->db_level);
diff --git a/sys/contrib/openzfs/module/zfs/ddt.c b/sys/contrib/openzfs/module/zfs/ddt.c
index d6658375f810..0dc9adc7fd4f 100644
--- a/sys/contrib/openzfs/module/zfs/ddt.c
+++ b/sys/contrib/openzfs/module/zfs/ddt.c
@@ -1701,9 +1701,11 @@ ddt_load(spa_t *spa)
}
}
- error = ddt_log_load(ddt);
- if (error != 0 && error != ENOENT)
- return (error);
+ if (ddt->ddt_flags & DDT_FLAG_LOG) {
+ error = ddt_log_load(ddt);
+ if (error != 0 && error != ENOENT)
+ return (error);
+ }
DDT_KSTAT_SET(ddt, dds_log_active_entries,
avl_numnodes(&ddt->ddt_log_active->ddl_tree));
diff --git a/sys/contrib/openzfs/module/zfs/ddt_log.c b/sys/contrib/openzfs/module/zfs/ddt_log.c
index 3d30e244c1f7..c7a2426f3a77 100644
--- a/sys/contrib/openzfs/module/zfs/ddt_log.c
+++ b/sys/contrib/openzfs/module/zfs/ddt_log.c
@@ -176,11 +176,13 @@ ddt_log_update_stats(ddt_t *ddt)
* that's reasonable to expect anyway.
*/
dmu_object_info_t doi;
- uint64_t nblocks;
- dmu_object_info(ddt->ddt_os, ddt->ddt_log_active->ddl_object, &doi);
- nblocks = doi.doi_physical_blocks_512;
- dmu_object_info(ddt->ddt_os, ddt->ddt_log_flushing->ddl_object, &doi);
- nblocks += doi.doi_physical_blocks_512;
+ uint64_t nblocks = 0;
+ if (dmu_object_info(ddt->ddt_os, ddt->ddt_log_active->ddl_object,
+ &doi) == 0)
+ nblocks += doi.doi_physical_blocks_512;
+ if (dmu_object_info(ddt->ddt_os, ddt->ddt_log_flushing->ddl_object,
+ &doi) == 0)
+ nblocks += doi.doi_physical_blocks_512;
ddt_object_t *ddo = &ddt->ddt_log_stats;
ddo->ddo_count =
@@ -243,6 +245,13 @@ ddt_log_alloc_entry(ddt_t *ddt)
}
static void
+ddt_log_free_entry(ddt_t *ddt, ddt_log_entry_t *ddle)
+{
+ kmem_cache_free(ddt->ddt_flags & DDT_FLAG_FLAT ?
+ ddt_log_entry_flat_cache : ddt_log_entry_trad_cache, ddle);
+}
+
+static void
ddt_log_update_entry(ddt_t *ddt, ddt_log_t *ddl, ddt_lightweight_entry_t *ddlwe)
{
/* Create the log tree entry from a live or stored entry */
@@ -347,8 +356,7 @@ ddt_log_take_first(ddt_t *ddt, ddt_log_t *ddl, ddt_lightweight_entry_t *ddlwe)
ddt_histogram_sub_entry(ddt, &ddt->ddt_log_histogram, ddlwe);
avl_remove(&ddl->ddl_tree, ddle);
- kmem_cache_free(ddt->ddt_flags & DDT_FLAG_FLAT ?
- ddt_log_entry_flat_cache : ddt_log_entry_trad_cache, ddle);
+ ddt_log_free_entry(ddt, ddle);
return (B_TRUE);
}
@@ -365,8 +373,7 @@ ddt_log_remove_key(ddt_t *ddt, ddt_log_t *ddl, const ddt_key_t *ddk)
ddt_histogram_sub_entry(ddt, &ddt->ddt_log_histogram, &ddlwe);
avl_remove(&ddl->ddl_tree, ddle);
- kmem_cache_free(ddt->ddt_flags & DDT_FLAG_FLAT ?
- ddt_log_entry_flat_cache : ddt_log_entry_trad_cache, ddle);
+ ddt_log_free_entry(ddt, ddle);
return (B_TRUE);
}
@@ -527,8 +534,7 @@ ddt_log_empty(ddt_t *ddt, ddt_log_t *ddl)
IMPLY(ddt->ddt_version == UINT64_MAX, avl_is_empty(&ddl->ddl_tree));
while ((ddle =
avl_destroy_nodes(&ddl->ddl_tree, &cookie)) != NULL) {
- kmem_cache_free(ddt->ddt_flags & DDT_FLAG_FLAT ?
- ddt_log_entry_flat_cache : ddt_log_entry_trad_cache, ddle);
+ ddt_log_free_entry(ddt, ddle);
}
ASSERT(avl_is_empty(&ddl->ddl_tree));
}
@@ -727,7 +733,7 @@ ddt_log_load(ddt_t *ddt)
ddle = fe;
fe = AVL_NEXT(fl, fe);
avl_remove(fl, ddle);
-
+ ddt_log_free_entry(ddt, ddle);
ddle = ae;
ae = AVL_NEXT(al, ae);
}
diff --git a/sys/contrib/openzfs/module/zfs/dmu.c b/sys/contrib/openzfs/module/zfs/dmu.c
index f7f808d5b8f7..a7a5c89bdafb 100644
--- a/sys/contrib/openzfs/module/zfs/dmu.c
+++ b/sys/contrib/openzfs/module/zfs/dmu.c
@@ -759,6 +759,8 @@ dmu_prefetch_by_dnode(dnode_t *dn, int64_t level, uint64_t offset,
*/
uint8_t ibps = ibs - SPA_BLKPTRSHIFT;
limit = P2ROUNDUP(dmu_prefetch_max, 1 << ibs) >> ibs;
+ if (limit == 0)
+ end2 = start2;
do {
level2++;
start2 = P2ROUNDUP(start2, 1 << ibps) >> ibps;
@@ -1689,8 +1691,8 @@ dmu_object_cached_size(objset_t *os, uint64_t object,
dmu_object_info_from_dnode(dn, &doi);
- for (uint64_t off = 0; off < doi.doi_max_offset;
- off += dmu_prefetch_max) {
+ for (uint64_t off = 0; off < doi.doi_max_offset &&
+ dmu_prefetch_max > 0; off += dmu_prefetch_max) {
/* dbuf_read doesn't prefetch L1 blocks. */
dmu_prefetch_by_dnode(dn, 1, off,
dmu_prefetch_max, ZIO_PRIORITY_SYNC_READ);
diff --git a/sys/contrib/openzfs/module/zfs/dmu_objset.c b/sys/contrib/openzfs/module/zfs/dmu_objset.c
index a77f338bdfd3..8e6b569c2100 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_objset.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_objset.c
@@ -2037,6 +2037,8 @@ userquota_updates_task(void *arg)
dn->dn_id_flags |= DN_ID_CHKED_BONUS;
}
dn->dn_id_flags &= ~(DN_ID_NEW_EXIST);
+ ASSERT3U(dn->dn_dirtycnt, >, 0);
+ dn->dn_dirtycnt--;
mutex_exit(&dn->dn_mtx);
multilist_sublist_remove(list, dn);
@@ -2070,6 +2072,10 @@ dnode_rele_task(void *arg)
dnode_t *dn;
while ((dn = multilist_sublist_head(list)) != NULL) {
+ mutex_enter(&dn->dn_mtx);
+ ASSERT3U(dn->dn_dirtycnt, >, 0);
+ dn->dn_dirtycnt--;
+ mutex_exit(&dn->dn_mtx);
multilist_sublist_remove(list, dn);
dnode_rele(dn, &os->os_synced_dnodes);
}
diff --git a/sys/contrib/openzfs/module/zfs/dmu_zfetch.c b/sys/contrib/openzfs/module/zfs/dmu_zfetch.c
index 51165d0bf723..3d3a9c713568 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_zfetch.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_zfetch.c
@@ -57,19 +57,19 @@ static unsigned int zfetch_max_sec_reap = 2;
/* min bytes to prefetch per stream (default 2MB) */
static unsigned int zfetch_min_distance = 2 * 1024 * 1024;
/* max bytes to prefetch per stream (default 8MB) */
-unsigned int zfetch_max_distance = 8 * 1024 * 1024;
+static unsigned int zfetch_max_distance = 8 * 1024 * 1024;
#else
/* min bytes to prefetch per stream (default 4MB) */
static unsigned int zfetch_min_distance = 4 * 1024 * 1024;
/* max bytes to prefetch per stream (default 64MB) */
-unsigned int zfetch_max_distance = 64 * 1024 * 1024;
+static unsigned int zfetch_max_distance = 64 * 1024 * 1024;
#endif
/* max bytes to prefetch indirects for per stream (default 128MB) */
-unsigned int zfetch_max_idistance = 128 * 1024 * 1024;
+static unsigned int zfetch_max_idistance = 128 * 1024 * 1024;
/* max request reorder distance within a stream (default 16MB) */
-unsigned int zfetch_max_reorder = 16 * 1024 * 1024;
+static unsigned int zfetch_max_reorder = 16 * 1024 * 1024;
/* Max log2 fraction of holes in a stream */
-unsigned int zfetch_hole_shift = 2;
+static unsigned int zfetch_hole_shift = 2;
typedef struct zfetch_stats {
kstat_named_t zfetchstat_hits;
diff --git a/sys/contrib/openzfs/module/zfs/dnode.c b/sys/contrib/openzfs/module/zfs/dnode.c
index 963ff41232a3..6c150d31c669 100644
--- a/sys/contrib/openzfs/module/zfs/dnode.c
+++ b/sys/contrib/openzfs/module/zfs/dnode.c
@@ -173,9 +173,7 @@ dnode_cons(void *arg, void *unused, int kmflag)
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
- dn->dn_dirty_txg = 0;
- dn->dn_dirtyctx = 0;
- dn->dn_dirtyctx_firstset = NULL;
+ dn->dn_dirtycnt = 0;
dn->dn_bonus = NULL;
dn->dn_have_spill = B_FALSE;
dn->dn_zio = NULL;
@@ -229,9 +227,7 @@ dnode_dest(void *arg, void *unused)
ASSERT0(dn->dn_allocated_txg);
ASSERT0(dn->dn_free_txg);
ASSERT0(dn->dn_assigned_txg);
- ASSERT0(dn->dn_dirty_txg);
- ASSERT0(dn->dn_dirtyctx);
- ASSERT0P(dn->dn_dirtyctx_firstset);
+ ASSERT0(dn->dn_dirtycnt);
ASSERT0P(dn->dn_bonus);
ASSERT(!dn->dn_have_spill);
ASSERT0P(dn->dn_zio);
@@ -692,10 +688,8 @@ dnode_destroy(dnode_t *dn)
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
- dn->dn_dirty_txg = 0;
+ dn->dn_dirtycnt = 0;
- dn->dn_dirtyctx = 0;
- dn->dn_dirtyctx_firstset = NULL;
if (dn->dn_bonus != NULL) {
mutex_enter(&dn->dn_bonus->db_mtx);
dbuf_destroy(dn->dn_bonus);
@@ -800,11 +794,9 @@ dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
dn->dn_bonuslen = bonuslen;
dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
dn->dn_compress = ZIO_COMPRESS_INHERIT;
- dn->dn_dirtyctx = 0;
dn->dn_free_txg = 0;
- dn->dn_dirtyctx_firstset = NULL;
- dn->dn_dirty_txg = 0;
+ dn->dn_dirtycnt = 0;
dn->dn_allocated_txg = tx->tx_txg;
dn->dn_id_flags = 0;
@@ -955,9 +947,7 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
ndn->dn_allocated_txg = odn->dn_allocated_txg;
ndn->dn_free_txg = odn->dn_free_txg;
ndn->dn_assigned_txg = odn->dn_assigned_txg;
- ndn->dn_dirty_txg = odn->dn_dirty_txg;
- ndn->dn_dirtyctx = odn->dn_dirtyctx;
- ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
+ ndn->dn_dirtycnt = odn->dn_dirtycnt;
ASSERT0(zfs_refcount_count(&odn->dn_tx_holds));
zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
ASSERT(avl_is_empty(&ndn->dn_dbufs));
@@ -1020,9 +1010,7 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
odn->dn_allocated_txg = 0;
odn->dn_free_txg = 0;
odn->dn_assigned_txg = 0;
- odn->dn_dirty_txg = 0;
- odn->dn_dirtyctx = 0;
- odn->dn_dirtyctx_firstset = NULL;
+ odn->dn_dirtycnt = 0;
odn->dn_have_spill = B_FALSE;
odn->dn_zio = NULL;
odn->dn_oldused = 0;
@@ -1273,8 +1261,8 @@ dnode_check_slots_free(dnode_children_t *children, int idx, int slots)
} else if (DN_SLOT_IS_PTR(dn)) {
mutex_enter(&dn->dn_mtx);
boolean_t can_free = (dn->dn_type == DMU_OT_NONE &&
- zfs_refcount_is_zero(&dn->dn_holds) &&
- !DNODE_IS_DIRTY(dn));
+ dn->dn_dirtycnt == 0 &&
+ zfs_refcount_is_zero(&dn->dn_holds));
mutex_exit(&dn->dn_mtx);
if (!can_free)
@@ -1757,17 +1745,23 @@ dnode_hold(objset_t *os, uint64_t object, const void *tag, dnode_t **dnp)
* reference on the dnode. Returns FALSE if unable to add a
* new reference.
*/
+static boolean_t
+dnode_add_ref_locked(dnode_t *dn, const void *tag)
+{
+ ASSERT(MUTEX_HELD(&dn->dn_mtx));
+ if (zfs_refcount_is_zero(&dn->dn_holds))
+ return (FALSE);
+ VERIFY(1 < zfs_refcount_add(&dn->dn_holds, tag));
+ return (TRUE);
+}
+
boolean_t
dnode_add_ref(dnode_t *dn, const void *tag)
{
mutex_enter(&dn->dn_mtx);
- if (zfs_refcount_is_zero(&dn->dn_holds)) {
- mutex_exit(&dn->dn_mtx);
- return (FALSE);
- }
- VERIFY(1 < zfs_refcount_add(&dn->dn_holds, tag));
+ boolean_t r = dnode_add_ref_locked(dn, tag);
mutex_exit(&dn->dn_mtx);
- return (TRUE);
+ return (r);
}
void
@@ -1830,31 +1824,20 @@ dnode_try_claim(objset_t *os, uint64_t object, int slots)
}
/*
- * Checks if the dnode itself is dirty, or is carrying any uncommitted records.
- * It is important to check both conditions, as some operations (eg appending
- * to a file) can dirty both as a single logical unit, but they are not synced
- * out atomically, so checking one and not the other can result in an object
- * appearing to be clean mid-way through a commit.
+ * Test if the dnode is dirty, or carrying uncommitted records.
*
- * Do not change this lightly! If you get it wrong, dmu_offset_next() can
- * detect a hole where there is really data, leading to silent corruption.
+ * dn_dirtycnt is the number of txgs this dnode is dirty on. It's incremented
+ * in dnode_setdirty() the first time the dnode is dirtied on a txg, and
+ * decremented in either dnode_rele_task() or userquota_updates_task() when the
+ * txg is synced out.
*/
boolean_t
dnode_is_dirty(dnode_t *dn)
{
mutex_enter(&dn->dn_mtx);
-
- for (int i = 0; i < TXG_SIZE; i++) {
- if (multilist_link_active(&dn->dn_dirty_link[i]) ||
- !list_is_empty(&dn->dn_dirty_records[i])) {
- mutex_exit(&dn->dn_mtx);
- return (B_TRUE);
- }
- }
-
+ boolean_t dirty = (dn->dn_dirtycnt != 0);
mutex_exit(&dn->dn_mtx);
-
- return (B_FALSE);
+ return (dirty);
}
void
@@ -1916,7 +1899,11 @@ dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
* dnode will hang around after we finish processing its
* children.
*/
- VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
+ mutex_enter(&dn->dn_mtx);
+ VERIFY(dnode_add_ref_locked(dn, (void *)(uintptr_t)tx->tx_txg));
+ dn->dn_dirtycnt++;
+ ASSERT3U(dn->dn_dirtycnt, <=, 3);
+ mutex_exit(&dn->dn_mtx);
(void) dbuf_dirty(dn->dn_dbuf, tx);
@@ -2221,32 +2208,6 @@ dnode_dirty_l1range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
mutex_exit(&dn->dn_dbufs_mtx);
}
-void
-dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, const void *tag)
-{
- /*
- * Don't set dirtyctx to SYNC if we're just modifying this as we
- * initialize the objset.
- */
- if (dn->dn_dirtyctx == DN_UNDIRTIED) {
- dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
-
- if (ds != NULL) {
- rrw_enter(&ds->ds_bp_rwlock, RW_READER, tag);
- }
- if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
- if (dmu_tx_is_syncing(tx))
- dn->dn_dirtyctx = DN_DIRTY_SYNC;
- else
- dn->dn_dirtyctx = DN_DIRTY_OPEN;
- dn->dn_dirtyctx_firstset = tag;
- }
- if (ds != NULL) {
- rrw_exit(&ds->ds_bp_rwlock, tag);
- }
- }
-}
-
static void
dnode_partial_zero(dnode_t *dn, uint64_t off, uint64_t blkoff, uint64_t len,
dmu_tx_t *tx)
diff --git a/sys/contrib/openzfs/module/zfs/dsl_deadlist.c b/sys/contrib/openzfs/module/zfs/dsl_deadlist.c
index 475db3c89508..41ac72bf1c16 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_deadlist.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_deadlist.c
@@ -1049,7 +1049,8 @@ dsl_livelist_iterate(void *arg, const blkptr_t *bp, boolean_t bp_freed,
ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(&found->le_bp));
ASSERT3U(BP_GET_CHECKSUM(bp), ==,
BP_GET_CHECKSUM(&found->le_bp));
- ASSERT3U(BP_GET_BIRTH(bp), ==, BP_GET_BIRTH(&found->le_bp));
+ ASSERT3U(BP_GET_PHYSICAL_BIRTH(bp), ==,
+ BP_GET_PHYSICAL_BIRTH(&found->le_bp));
}
if (bp_freed) {
if (found == NULL) {
diff --git a/sys/contrib/openzfs/module/zfs/multilist.c b/sys/contrib/openzfs/module/zfs/multilist.c
index 7b85d19e19ee..46fb79269310 100644
--- a/sys/contrib/openzfs/module/zfs/multilist.c
+++ b/sys/contrib/openzfs/module/zfs/multilist.c
@@ -81,7 +81,7 @@ multilist_create_impl(multilist_t *ml, size_t size, size_t offset,
ml->ml_num_sublists = num;
ml->ml_index_func = index_func;
- ml->ml_sublists = kmem_zalloc(sizeof (multilist_sublist_t) *
+ ml->ml_sublists = vmem_zalloc(sizeof (multilist_sublist_t) *
ml->ml_num_sublists, KM_SLEEP);
ASSERT3P(ml->ml_sublists, !=, NULL);
@@ -134,7 +134,7 @@ multilist_destroy(multilist_t *ml)
}
ASSERT3P(ml->ml_sublists, !=, NULL);
- kmem_free(ml->ml_sublists,
+ vmem_free(ml->ml_sublists,
sizeof (multilist_sublist_t) * ml->ml_num_sublists);
ml->ml_num_sublists = 0;
diff --git a/sys/contrib/openzfs/module/zfs/spa_config.c b/sys/contrib/openzfs/module/zfs/spa_config.c
index 7d4d06659146..cf28955b0c50 100644
--- a/sys/contrib/openzfs/module/zfs/spa_config.c
+++ b/sys/contrib/openzfs/module/zfs/spa_config.c
@@ -48,18 +48,17 @@
/*
* Pool configuration repository.
*
- * Pool configuration is stored as a packed nvlist on the filesystem. By
- * default, all pools are stored in /etc/zfs/zpool.cache and loaded on boot
- * (when the ZFS module is loaded). Pools can also have the 'cachefile'
- * property set that allows them to be stored in an alternate location until
- * the control of external software.
+ * Pool configuration is stored as a packed nvlist on the filesystem. When
+ * pools are imported they are added to the /etc/zfs/zpool.cache file and
+ * removed from it when exported. For each cache file, we have a single nvlist
+ * which holds all the configuration information. Pools can also have the
+ * 'cachefile' property set which allows this config to be stored in an
+ * alternate location under the control of external software.
*
- * For each cache file, we have a single nvlist which holds all the
- * configuration information. When the module loads, we read this information
- * from /etc/zfs/zpool.cache and populate the SPA namespace. This namespace is
- * maintained independently in spa.c. Whenever the namespace is modified, or
- * the configuration of a pool is changed, we call spa_write_cachefile(), which
- * walks through all the active pools and writes the configuration to disk.
+ * The kernel independantly maintains an AVL tree of imported pools. See the
+ * "SPA locking" comment in spa.c. Whenever a pool configuration is modified
+ * we call spa_write_cachefile() which walks through all the active pools and
+ * writes the updated configuration to to /etc/zfs/zpool.cache file.
*/
static uint64_t spa_config_generation = 1;
@@ -69,94 +68,6 @@ static uint64_t spa_config_generation = 1;
* userland pools when doing testing.
*/
char *spa_config_path = (char *)ZPOOL_CACHE;
-#ifdef _KERNEL
-static int zfs_autoimport_disable = B_TRUE;
-#endif
-
-/*
- * Called when the module is first loaded, this routine loads the configuration
- * file into the SPA namespace. It does not actually open or load the pools; it
- * only populates the namespace.
- */
-void
-spa_config_load(void)
-{
- void *buf = NULL;
- nvlist_t *nvlist, *child;
- nvpair_t *nvpair;
- char *pathname;
- zfs_file_t *fp;
- zfs_file_attr_t zfa;
- uint64_t fsize;
- int err;
-
-#ifdef _KERNEL
- if (zfs_autoimport_disable)
- return;
-#endif
-
- /*
- * Open the configuration file.
- */
- pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
-
- (void) snprintf(pathname, MAXPATHLEN, "%s", spa_config_path);
-
- err = zfs_file_open(pathname, O_RDONLY, 0, &fp);
-
-#ifdef __FreeBSD__
- if (err)
- err = zfs_file_open(ZPOOL_CACHE_BOOT, O_RDONLY, 0, &fp);
-#endif
- kmem_free(pathname, MAXPATHLEN);
-
- if (err)
- return;
-
- if (zfs_file_getattr(fp, &zfa))
- goto out;
-
- fsize = zfa.zfa_size;
- buf = kmem_alloc(fsize, KM_SLEEP);
-
- /*
- * Read the nvlist from the file.
- */
- if (zfs_file_read(fp, buf, fsize, NULL) < 0)
- goto out;
-
- /*
- * Unpack the nvlist.
- */
- if (nvlist_unpack(buf, fsize, &nvlist, KM_SLEEP) != 0)
- goto out;
-
- /*
- * Iterate over all elements in the nvlist, creating a new spa_t for
- * each one with the specified configuration.
- */
- mutex_enter(&spa_namespace_lock);
- nvpair = NULL;
- while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) {
- if (nvpair_type(nvpair) != DATA_TYPE_NVLIST)
- continue;
-
- child = fnvpair_value_nvlist(nvpair);
-
- if (spa_lookup(nvpair_name(nvpair)) != NULL)
- continue;
- (void) spa_add(nvpair_name(nvpair), child, NULL);
- }
- mutex_exit(&spa_namespace_lock);
-
- nvlist_free(nvlist);
-
-out:
- if (buf != NULL)
- kmem_free(buf, fsize);
-
- zfs_file_close(fp);
-}
static int
spa_config_remove(spa_config_dirent_t *dp)
@@ -623,7 +534,6 @@ spa_config_update(spa_t *spa, int what)
spa_config_update(spa, SPA_CONFIG_UPDATE_VDEVS);
}
-EXPORT_SYMBOL(spa_config_load);
EXPORT_SYMBOL(spa_all_configs);
EXPORT_SYMBOL(spa_config_set);
EXPORT_SYMBOL(spa_config_generate);
@@ -634,8 +544,3 @@ EXPORT_SYMBOL(spa_config_update);
ZFS_MODULE_PARAM(zfs_spa, spa_, config_path, STRING, ZMOD_RD,
"SPA config file (/etc/zfs/zpool.cache)");
#endif
-
-#ifdef _KERNEL
-ZFS_MODULE_PARAM(zfs, zfs_, autoimport_disable, INT, ZMOD_RW,
- "Disable pool import at module load");
-#endif
diff --git a/sys/contrib/openzfs/module/zfs/spa_misc.c b/sys/contrib/openzfs/module/zfs/spa_misc.c
index cce772eae598..6f7c060f97f8 100644
--- a/sys/contrib/openzfs/module/zfs/spa_misc.c
+++ b/sys/contrib/openzfs/module/zfs/spa_misc.c
@@ -251,11 +251,11 @@ spa_mode_t spa_mode_global = SPA_MODE_UNINIT;
#ifdef ZFS_DEBUG
/*
- * Everything except dprintf, set_error, spa, and indirect_remap is on
- * by default in debug builds.
+ * Everything except dprintf, set_error, indirect_remap, and raidz_reconstruct
+ * is on by default in debug builds.
*/
int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
- ZFS_DEBUG_INDIRECT_REMAP);
+ ZFS_DEBUG_INDIRECT_REMAP | ZFS_DEBUG_RAIDZ_RECONSTRUCT);
#else
int zfs_flags = 0;
#endif
@@ -2548,13 +2548,6 @@ spa_name_compare(const void *a1, const void *a2)
}
void
-spa_boot_init(void *unused)
-{
- (void) unused;
- spa_config_load();
-}
-
-void
spa_init(spa_mode_t mode)
{
mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
@@ -2607,7 +2600,6 @@ spa_init(spa_mode_t mode)
chksum_init();
zpool_prop_init();
zpool_feature_init();
- spa_config_load();
vdev_prop_init();
l2arc_start();
scan_init();
diff --git a/sys/contrib/openzfs/module/zfs/vdev.c b/sys/contrib/openzfs/module/zfs/vdev.c
index 9cf35e379000..fc6d445f9785 100644
--- a/sys/contrib/openzfs/module/zfs/vdev.c
+++ b/sys/contrib/openzfs/module/zfs/vdev.c
@@ -29,7 +29,7 @@
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Datto Inc. All rights reserved.
- * Copyright (c) 2021, Klara Inc.
+ * Copyright (c) 2021, 2025, Klara, Inc.
* Copyright (c) 2021, 2023 Hewlett Packard Enterprise Development LP.
*/
@@ -100,7 +100,7 @@ static uint_t zfs_vdev_default_ms_shift = 29;
/* upper limit for metaslab size (16G) */
static uint_t zfs_vdev_max_ms_shift = 34;
-int vdev_validate_skip = B_FALSE;
+static int vdev_validate_skip = B_FALSE;
/*
* Since the DTL space map of a vdev is not expected to have a lot of
@@ -1086,6 +1086,10 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
}
}
+ if (top_level && (ops == &vdev_raidz_ops || ops == &vdev_draid_ops))
+ vd->vdev_autosit =
+ vdev_prop_default_numeric(VDEV_PROP_AUTOSIT);
+
/*
* Add ourselves to the parent's list of children.
*/
@@ -1187,6 +1191,9 @@ vdev_free(vdev_t *vd)
spa_spare_remove(vd);
if (vd->vdev_isl2cache)
spa_l2cache_remove(vd);
+ if (vd->vdev_prev_histo)
+ kmem_free(vd->vdev_prev_histo,
+ sizeof (uint64_t) * VDEV_L_HISTO_BUCKETS);
txg_list_destroy(&vd->vdev_ms_list);
txg_list_destroy(&vd->vdev_dtl_list);
@@ -3857,6 +3864,26 @@ vdev_load(vdev_t *vd)
}
}
+ if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
+ spa_t *spa = vd->vdev_spa;
+ uint64_t autosit;
+
+ error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
+ vdev_prop_to_name(VDEV_PROP_AUTOSIT), sizeof (autosit),
+ 1, &autosit);
+ if (error == 0) {
+ vd->vdev_autosit = autosit == 1;
+ } else if (error == ENOENT) {
+ vd->vdev_autosit = vdev_prop_default_numeric(
+ VDEV_PROP_AUTOSIT);
+ } else {
+ vdev_dbgmsg(vd,
+ "vdev_load: zap_lookup(top_zap=%llu) "
+ "failed [error=%d]",
+ (u_longlong_t)vd->vdev_top_zap, error);
+ }
+ }
+
/*
* Load any rebuild state from the top-level vdev zap.
*/
@@ -4616,6 +4643,8 @@ vdev_clear(spa_t *spa, vdev_t *vd)
vd->vdev_stat.vs_checksum_errors = 0;
vd->vdev_stat.vs_dio_verify_errors = 0;
vd->vdev_stat.vs_slow_ios = 0;
+ atomic_store_64(&vd->vdev_outlier_count, 0);
+ vd->vdev_read_sit_out_expire = 0;
for (int c = 0; c < vd->vdev_children; c++)
vdev_clear(spa, vd->vdev_child[c]);
@@ -6107,6 +6136,56 @@ vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
}
vd->vdev_failfast = intval & 1;
break;
+ case VDEV_PROP_SIT_OUT:
+ /* Only expose this for a draid or raidz leaf */
+ if (!vd->vdev_ops->vdev_op_leaf ||
+ vd->vdev_top == NULL ||
+ (vd->vdev_top->vdev_ops != &vdev_raidz_ops &&
+ vd->vdev_top->vdev_ops != &vdev_draid_ops)) {
+ error = ENOTSUP;
+ break;
+ }
+ if (nvpair_value_uint64(elem, &intval) != 0) {
+ error = EINVAL;
+ break;
+ }
+ if (intval == 1) {
+ vdev_t *ancestor = vd;
+ while (ancestor->vdev_parent != vd->vdev_top)
+ ancestor = ancestor->vdev_parent;
+ vdev_t *pvd = vd->vdev_top;
+ uint_t sitouts = 0;
+ for (int i = 0; i < pvd->vdev_children; i++) {
+ if (pvd->vdev_child[i] == ancestor)
+ continue;
+ if (vdev_sit_out_reads(
+ pvd->vdev_child[i], 0)) {
+ sitouts++;
+ }
+ }
+ if (sitouts >= vdev_get_nparity(pvd)) {
+ error = ZFS_ERR_TOO_MANY_SITOUTS;
+ break;
+ }
+ if (error == 0)
+ vdev_raidz_sit_child(vd,
+ INT64_MAX - gethrestime_sec());
+ } else {
+ vdev_raidz_unsit_child(vd);
+ }
+ break;
+ case VDEV_PROP_AUTOSIT:
+ if (vd->vdev_ops != &vdev_raidz_ops &&
+ vd->vdev_ops != &vdev_draid_ops) {
+ error = ENOTSUP;
+ break;
+ }
+ if (nvpair_value_uint64(elem, &intval) != 0) {
+ error = EINVAL;
+ break;
+ }
+ vd->vdev_autosit = intval == 1;
+ break;
case VDEV_PROP_CHECKSUM_N:
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
@@ -6456,6 +6535,19 @@ vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
ZPROP_SRC_NONE);
}
continue;
+ case VDEV_PROP_SIT_OUT:
+ /* Only expose this for a draid or raidz leaf */
+ if (vd->vdev_ops->vdev_op_leaf &&
+ vd->vdev_top != NULL &&
+ (vd->vdev_top->vdev_ops ==
+ &vdev_raidz_ops ||
+ vd->vdev_top->vdev_ops ==
+ &vdev_draid_ops)) {
+ vdev_prop_add_list(outnvl, propname,
+ NULL, vdev_sit_out_reads(vd, 0),
+ ZPROP_SRC_NONE);
+ }
+ continue;
case VDEV_PROP_TRIM_SUPPORT:
/* only valid for leaf vdevs */
if (vd->vdev_ops->vdev_op_leaf) {
@@ -6506,6 +6598,29 @@ vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
vdev_prop_add_list(outnvl, propname, strval,
intval, src);
break;
+ case VDEV_PROP_AUTOSIT:
+ /* Only raidz vdevs cannot have this property */
+ if (vd->vdev_ops != &vdev_raidz_ops &&
+ vd->vdev_ops != &vdev_draid_ops) {
+ src = ZPROP_SRC_NONE;
+ intval = ZPROP_BOOLEAN_NA;
+ } else {
+ err = vdev_prop_get_int(vd, prop,
+ &intval);
+ if (err && err != ENOENT)
+ break;
+
+ if (intval ==
+ vdev_prop_default_numeric(prop))
+ src = ZPROP_SRC_DEFAULT;
+ else
+ src = ZPROP_SRC_LOCAL;
+ }
+
+ vdev_prop_add_list(outnvl, propname, NULL,
+ intval, src);
+ break;
+
case VDEV_PROP_CHECKSUM_N:
case VDEV_PROP_CHECKSUM_T:
case VDEV_PROP_IO_N:
diff --git a/sys/contrib/openzfs/module/zfs/vdev_draid.c b/sys/contrib/openzfs/module/zfs/vdev_draid.c
index a05289102af2..8588cfee3f7d 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_draid.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_draid.c
@@ -22,6 +22,7 @@
/*
* Copyright (c) 2018 Intel Corporation.
* Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
+ * Copyright (c) 2025, Klara, Inc.
*/
#include <sys/zfs_context.h>
@@ -1996,6 +1997,33 @@ vdev_draid_io_start_read(zio_t *zio, raidz_row_t *rr)
rc->rc_allow_repair = 1;
}
}
+
+ if (vdev_sit_out_reads(cvd, zio->io_flags)) {
+ rr->rr_outlier_cnt++;
+ ASSERT0(rc->rc_latency_outlier);
+ rc->rc_latency_outlier = 1;
+ }
+ }
+
+ /*
+ * When the row contains a latency outlier and sufficient parity
+ * exists to reconstruct the column data, then skip reading the
+ * known slow child vdev as a performance optimization.
+ */
+ if (rr->rr_outlier_cnt > 0 &&
+ (rr->rr_firstdatacol - rr->rr_missingparity) >=
+ (rr->rr_missingdata + 1)) {
+
+ for (int c = rr->rr_cols - 1; c >= rr->rr_firstdatacol; c--) {
+ raidz_col_t *rc = &rr->rr_col[c];
+
+ if (rc->rc_error == 0 && rc->rc_latency_outlier) {
+ rr->rr_missingdata++;
+ rc->rc_error = SET_ERROR(EAGAIN);
+ rc->rc_skipped = 1;
+ break;
+ }
+ }
}
/*
diff --git a/sys/contrib/openzfs/module/zfs/vdev_file.c b/sys/contrib/openzfs/module/zfs/vdev_file.c
index f457669bc809..20b4db65ec06 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_file.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_file.c
@@ -228,7 +228,8 @@ vdev_file_io_strategy(void *arg)
abd_return_buf_copy(zio->io_abd, buf, size);
} else {
buf = abd_borrow_buf_copy(zio->io_abd, zio->io_size);
- err = zfs_file_pwrite(vf->vf_file, buf, size, off, &resid);
+ err = zfs_file_pwrite(vf->vf_file, buf, size, off,
+ vd->vdev_ashift, &resid);
abd_return_buf(zio->io_abd, buf, size);
}
zio->io_error = err;
diff --git a/sys/contrib/openzfs/module/zfs/vdev_queue.c b/sys/contrib/openzfs/module/zfs/vdev_queue.c
index c12713b107bf..e69e5598939e 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_queue.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_queue.c
@@ -122,7 +122,7 @@
* The maximum number of i/os active to each device. Ideally, this will be >=
* the sum of each queue's max_active.
*/
-uint_t zfs_vdev_max_active = 1000;
+static uint_t zfs_vdev_max_active = 1000;
/*
* Per-queue limits on the number of i/os active to each device. If the
diff --git a/sys/contrib/openzfs/module/zfs/vdev_raidz.c b/sys/contrib/openzfs/module/zfs/vdev_raidz.c
index b597d6daefde..80727b0d8f91 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_raidz.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_raidz.c
@@ -24,6 +24,7 @@
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2016 Gvozden Nešković. All rights reserved.
+ * Copyright (c) 2025, Klara, Inc.
*/
#include <sys/zfs_context.h>
@@ -356,6 +357,32 @@ unsigned long raidz_expand_max_reflow_bytes = 0;
uint_t raidz_expand_pause_point = 0;
/*
+ * This represents the duration for a slow drive read sit out.
+ */
+static unsigned long vdev_read_sit_out_secs = 600;
+
+/*
+ * How often each RAID-Z and dRAID vdev will check for slow disk outliers.
+ * Increasing this interval will reduce the sensitivity of detection (since all
+ * I/Os since the last check are included in the statistics), but will slow the
+ * response to a disk developing a problem.
+ *
+ * Defaults to once per second; setting extremely small values may cause
+ * negative performance effects.
+ */
+static hrtime_t vdev_raidz_outlier_check_interval_ms = 1000;
+
+/*
+ * When performing slow outlier checks for RAID-Z and dRAID vdevs, this value is
+ * used to determine how far out an outlier must be before it counts as an event
+ * worth consdering.
+ *
+ * Smaller values will result in more aggressive sitting out of disks that may
+ * have problems, but may significantly increase the rate of spurious sit-outs.
+ */
+static uint32_t vdev_raidz_outlier_insensitivity = 50;
+
+/*
* Maximum amount of copy io's outstanding at once.
*/
#ifdef _ILP32
@@ -2311,6 +2338,41 @@ vdev_raidz_min_asize(vdev_t *vd)
vd->vdev_children);
}
+/*
+ * return B_TRUE if a read should be skipped due to being too slow.
+ *
+ * In vdev_child_slow_outlier() it looks for outliers based on disk
+ * latency from the most recent child reads. Here we're checking if,
+ * over time, a disk has has been an outlier too many times and is
+ * now in a sit out period.
+ */
+boolean_t
+vdev_sit_out_reads(vdev_t *vd, zio_flag_t io_flags)
+{
+ if (vdev_read_sit_out_secs == 0)
+ return (B_FALSE);
+
+ /* Avoid skipping a data column read when scrubbing */
+ if (io_flags & ZIO_FLAG_SCRUB)
+ return (B_FALSE);
+
+ if (!vd->vdev_ops->vdev_op_leaf) {
+ boolean_t sitting = B_FALSE;
+ for (int c = 0; c < vd->vdev_children; c++) {
+ sitting |= vdev_sit_out_reads(vd->vdev_child[c],
+ io_flags);
+ }
+ return (sitting);
+ }
+
+ if (vd->vdev_read_sit_out_expire >= gethrestime_sec())
+ return (B_TRUE);
+
+ vd->vdev_read_sit_out_expire = 0;
+
+ return (B_FALSE);
+}
+
void
vdev_raidz_child_done(zio_t *zio)
{
@@ -2475,6 +2537,45 @@ vdev_raidz_io_start_read_row(zio_t *zio, raidz_row_t *rr, boolean_t forceparity)
rc->rc_skipped = 1;
continue;
}
+
+ if (vdev_sit_out_reads(cvd, zio->io_flags)) {
+ rr->rr_outlier_cnt++;
+ ASSERT0(rc->rc_latency_outlier);
+ rc->rc_latency_outlier = 1;
+ }
+ }
+
+ /*
+ * When the row contains a latency outlier and sufficient parity
+ * exists to reconstruct the column data, then skip reading the
+ * known slow child vdev as a performance optimization.
+ */
+ if (rr->rr_outlier_cnt > 0 &&
+ (rr->rr_firstdatacol - rr->rr_missingparity) >=
+ (rr->rr_missingdata + 1)) {
+
+ for (int c = rr->rr_cols - 1; c >= 0; c--) {
+ raidz_col_t *rc = &rr->rr_col[c];
+
+ if (rc->rc_error == 0 && rc->rc_latency_outlier) {
+ if (c >= rr->rr_firstdatacol)
+ rr->rr_missingdata++;
+ else
+ rr->rr_missingparity++;
+ rc->rc_error = SET_ERROR(EAGAIN);
+ rc->rc_skipped = 1;
+ break;
+ }
+ }
+ }
+
+ for (int c = rr->rr_cols - 1; c >= 0; c--) {
+ raidz_col_t *rc = &rr->rr_col[c];
+ vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
+
+ if (rc->rc_error || rc->rc_size == 0)
+ continue;
+
if (forceparity ||
c >= rr->rr_firstdatacol || rr->rr_missingdata > 0 ||
(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
@@ -2498,6 +2599,7 @@ vdev_raidz_io_start_read_phys_cols(zio_t *zio, raidz_map_t *rm)
ASSERT3U(prc->rc_devidx, ==, i);
vdev_t *cvd = vd->vdev_child[i];
+
if (!vdev_readable(cvd)) {
prc->rc_error = SET_ERROR(ENXIO);
prc->rc_tried = 1; /* don't even try */
@@ -2774,6 +2876,239 @@ vdev_raidz_worst_error(raidz_row_t *rr)
return (error);
}
+/*
+ * Find the median value from a set of n values
+ */
+static uint64_t
+latency_median_value(const uint64_t *data, size_t n)
+{
+ uint64_t m;
+
+ if (n % 2 == 0)
+ m = (data[(n >> 1) - 1] + data[n >> 1]) >> 1;
+ else
+ m = data[((n + 1) >> 1) - 1];
+
+ return (m);
+}
+
+/*
+ * Calculate the outlier fence from a set of n latency values
+ *
+ * fence = Q3 + vdev_raidz_outlier_insensitivity x (Q3 - Q1)
+ */
+static uint64_t
+latency_quartiles_fence(const uint64_t *data, size_t n, uint64_t *iqr)
+{
+ uint64_t q1 = latency_median_value(&data[0], n >> 1);
+ uint64_t q3 = latency_median_value(&data[(n + 1) >> 1], n >> 1);
+
+ /*
+ * To avoid detecting false positive outliers when N is small and
+ * and the latencies values are very close, make sure the IQR
+ * is at least 25% larger than Q1.
+ */
+ *iqr = MAX(q3 - q1, q1 / 4);
+
+ return (q3 + (*iqr * vdev_raidz_outlier_insensitivity));
+}
+#define LAT_CHILDREN_MIN 5
+#define LAT_OUTLIER_LIMIT 20
+
+static int
+latency_compare(const void *arg1, const void *arg2)
+{
+ const uint64_t *l1 = (uint64_t *)arg1;
+ const uint64_t *l2 = (uint64_t *)arg2;
+
+ return (TREE_CMP(*l1, *l2));
+}
+
+void
+vdev_raidz_sit_child(vdev_t *svd, uint64_t secs)
+{
+ for (int c = 0; c < svd->vdev_children; c++)
+ vdev_raidz_sit_child(svd->vdev_child[c], secs);
+
+ if (!svd->vdev_ops->vdev_op_leaf)
+ return;
+
+ /* Begin a sit out period for this slow drive */
+ svd->vdev_read_sit_out_expire = gethrestime_sec() +
+ secs;
+
+ /* Count each slow io period */
+ mutex_enter(&svd->vdev_stat_lock);
+ svd->vdev_stat.vs_slow_ios++;
+ mutex_exit(&svd->vdev_stat_lock);
+}
+
+void
+vdev_raidz_unsit_child(vdev_t *vd)
+{
+ for (int c = 0; c < vd->vdev_children; c++)
+ vdev_raidz_unsit_child(vd->vdev_child[c]);
+
+ if (!vd->vdev_ops->vdev_op_leaf)
+ return;
+
+ vd->vdev_read_sit_out_expire = 0;
+}
+
+/*
+ * Check for any latency outlier from latest set of child reads.
+ *
+ * Uses a Tukey's fence, with K = 50, for detecting extreme outliers. This
+ * rule defines extreme outliers as data points outside the fence of the
+ * third quartile plus fifty times the Interquartile Range (IQR). This range
+ * is the distance between the first and third quartile.
+ *
+ * Fifty is an extremely large value for Tukey's fence, but the outliers we're
+ * attempting to detect here are orders of magnitude times larger than the
+ * median. This large value should capture any truly fault disk quickly,
+ * without causing spurious sit-outs.
+ *
+ * To further avoid spurious sit-outs, vdevs must be detected multiple times
+ * as an outlier before they are sat, and outlier counts will gradually decay.
+ * Every nchildren times we have detected an outlier, we subtract 2 from the
+ * outlier count of all children. If detected outliers are close to uniformly
+ * distributed, this will result in the outlier count remaining close to 0
+ * (in expectation; over long enough time-scales, spurious sit-outs are still
+ * possible).
+ */
+static void
+vdev_child_slow_outlier(zio_t *zio)
+{
+ vdev_t *vd = zio->io_vd;
+ if (!vd->vdev_autosit || vdev_read_sit_out_secs == 0 ||
+ vd->vdev_children < LAT_CHILDREN_MIN)
+ return;
+
+ hrtime_t now = getlrtime();
+ uint64_t last = atomic_load_64(&vd->vdev_last_latency_check);
+
+ if ((now - last) < MSEC2NSEC(vdev_raidz_outlier_check_interval_ms))
+ return;
+
+ /* Allow a single winner when there are racing callers. */
+ if (atomic_cas_64(&vd->vdev_last_latency_check, last, now) != last)
+ return;
+
+ int children = vd->vdev_children;
+ uint64_t *lat_data = kmem_alloc(sizeof (uint64_t) * children, KM_SLEEP);
+
+ for (int c = 0; c < children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
+ if (cvd->vdev_prev_histo == NULL) {
+ mutex_enter(&cvd->vdev_stat_lock);
+ size_t size =
+ sizeof (cvd->vdev_stat_ex.vsx_disk_histo[0]);
+ cvd->vdev_prev_histo = kmem_zalloc(size, KM_SLEEP);
+ memcpy(cvd->vdev_prev_histo,
+ cvd->vdev_stat_ex.vsx_disk_histo[ZIO_TYPE_READ],
+ size);
+ mutex_exit(&cvd->vdev_stat_lock);
+ }
+ }
+ uint64_t max = 0;
+ vdev_t *svd = NULL;
+ uint_t sitouts = 0;
+ boolean_t skip = B_FALSE, svd_sitting = B_FALSE;
+ for (int c = 0; c < children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
+ boolean_t sitting = vdev_sit_out_reads(cvd, 0) ||
+ cvd->vdev_state != VDEV_STATE_HEALTHY;
+
+ /* We can't sit out more disks than we have parity */
+ if (sitting && ++sitouts >= vdev_get_nparity(vd))
+ skip = B_TRUE;
+
+ mutex_enter(&cvd->vdev_stat_lock);
+
+ uint64_t *prev_histo = cvd->vdev_prev_histo;
+ uint64_t *histo =
+ cvd->vdev_stat_ex.vsx_disk_histo[ZIO_TYPE_READ];
+ if (skip) {
+ size_t size =
+ sizeof (cvd->vdev_stat_ex.vsx_disk_histo[0]);
+ memcpy(prev_histo, histo, size);
+ mutex_exit(&cvd->vdev_stat_lock);
+ continue;
+ }
+ uint64_t count = 0;
+ lat_data[c] = 0;
+ for (int i = 0; i < VDEV_L_HISTO_BUCKETS; i++) {
+ uint64_t this_count = histo[i] - prev_histo[i];
+ lat_data[c] += (1ULL << i) * this_count;
+ count += this_count;
+ }
+ size_t size = sizeof (cvd->vdev_stat_ex.vsx_disk_histo[0]);
+ memcpy(prev_histo, histo, size);
+ mutex_exit(&cvd->vdev_stat_lock);
+ lat_data[c] /= MAX(1, count);
+
+ /* Wait until all disks have been read from */
+ if (lat_data[c] == 0 && !sitting) {
+ skip = B_TRUE;
+ continue;
+ }
+
+ /* Keep track of the vdev with largest value */
+ if (lat_data[c] > max) {
+ max = lat_data[c];
+ svd = cvd;
+ svd_sitting = sitting;
+ }
+ }
+
+ if (skip) {
+ kmem_free(lat_data, sizeof (uint64_t) * children);
+ return;
+ }
+
+ qsort((void *)lat_data, children, sizeof (uint64_t), latency_compare);
+
+ uint64_t iqr;
+ uint64_t fence = latency_quartiles_fence(lat_data, children, &iqr);
+
+ ASSERT3U(lat_data[children - 1], ==, max);
+ if (max > fence && !svd_sitting) {
+ ASSERT3U(iqr, >, 0);
+ uint64_t incr = MAX(1, MIN((max - fence) / iqr,
+ LAT_OUTLIER_LIMIT / 4));
+ vd->vdev_outlier_count += incr;
+ if (vd->vdev_outlier_count >= children) {
+ for (int c = 0; c < children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
+ cvd->vdev_outlier_count -= 2;
+ cvd->vdev_outlier_count = MAX(0,
+ cvd->vdev_outlier_count);
+ }
+ vd->vdev_outlier_count = 0;
+ }
+ /*
+ * Keep track of how many times this child has had
+ * an outlier read. A disk that persitently has a
+ * higher than peers outlier count will be considered
+ * a slow disk.
+ */
+ svd->vdev_outlier_count += incr;
+ if (svd->vdev_outlier_count > LAT_OUTLIER_LIMIT) {
+ ASSERT0(svd->vdev_read_sit_out_expire);
+ vdev_raidz_sit_child(svd, vdev_read_sit_out_secs);
+ (void) zfs_ereport_post(FM_EREPORT_ZFS_SITOUT,
+ zio->io_spa, svd, NULL, NULL, 0);
+ vdev_dbgmsg(svd, "begin read sit out for %d secs",
+ (int)vdev_read_sit_out_secs);
+
+ for (int c = 0; c < vd->vdev_children; c++)
+ vd->vdev_child[c]->vdev_outlier_count = 0;
+ }
+ }
+
+ kmem_free(lat_data, sizeof (uint64_t) * children);
+}
+
static void
vdev_raidz_io_done_verified(zio_t *zio, raidz_row_t *rr)
{
@@ -3515,6 +3850,9 @@ vdev_raidz_io_done(zio_t *zio)
raidz_row_t *rr = rm->rm_row[i];
vdev_raidz_io_done_verified(zio, rr);
}
+ /* Periodically check for a read outlier */
+ if (zio->io_type == ZIO_TYPE_READ)
+ vdev_child_slow_outlier(zio);
zio_checksum_verified(zio);
} else {
/*
@@ -5155,3 +5493,10 @@ ZFS_MODULE_PARAM(zfs_vdev, raidz_, io_aggregate_rows, ULONG, ZMOD_RW,
ZFS_MODULE_PARAM(zfs, zfs_, scrub_after_expand, INT, ZMOD_RW,
"For expanded RAIDZ, automatically start a pool scrub when expansion "
"completes");
+ZFS_MODULE_PARAM(zfs_vdev, vdev_, read_sit_out_secs, ULONG, ZMOD_RW,
+ "Raidz/draid slow disk sit out time period in seconds");
+ZFS_MODULE_PARAM(zfs_vdev, vdev_, raidz_outlier_check_interval_ms, ULONG,
+ ZMOD_RW, "Interval to check for slow raidz/draid children");
+ZFS_MODULE_PARAM(zfs_vdev, vdev_, raidz_outlier_insensitivity, UINT,
+ ZMOD_RW, "How insensitive the slow raidz/draid child check should be");
+/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/vdev_removal.c b/sys/contrib/openzfs/module/zfs/vdev_removal.c
index 2f7a739da241..2ce0121324ad 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_removal.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_removal.c
@@ -105,7 +105,7 @@ static const uint_t zfs_remove_max_copy_bytes = 64 * 1024 * 1024;
*
* See also the accessor function spa_remove_max_segment().
*/
-uint_t zfs_remove_max_segment = SPA_MAXBLOCKSIZE;
+static uint_t zfs_remove_max_segment = SPA_MAXBLOCKSIZE;
/*
* Ignore hard IO errors during device removal. When set if a device
@@ -137,7 +137,7 @@ uint_t vdev_removal_max_span = 32 * 1024;
* This is used by the test suite so that it can ensure that certain
* actions happen while in the middle of a removal.
*/
-int zfs_removal_suspend_progress = 0;
+static int zfs_removal_suspend_progress = 0;
#define VDEV_REMOVAL_ZAP_OBJS "lzap"
diff --git a/sys/contrib/openzfs/module/zfs/zfeature.c b/sys/contrib/openzfs/module/zfs/zfeature.c
index 0816ea134bf3..4cf9e0dbb405 100644
--- a/sys/contrib/openzfs/module/zfs/zfeature.c
+++ b/sys/contrib/openzfs/module/zfs/zfeature.c
@@ -308,6 +308,7 @@ feature_sync(spa_t *spa, zfeature_info_t *feature, uint64_t refcount,
ASSERT(VALID_FEATURE_OR_NONE(feature->fi_feature));
uint64_t zapobj = (feature->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
spa->spa_feat_for_write_obj : spa->spa_feat_for_read_obj;
+ ASSERT(MUTEX_HELD(&spa->spa_feat_stats_lock));
VERIFY0(zap_update(spa->spa_meta_objset, zapobj, feature->fi_guid,
sizeof (uint64_t), 1, &refcount, tx));
@@ -360,7 +361,9 @@ feature_enable_sync(spa_t *spa, zfeature_info_t *feature, dmu_tx_t *tx)
feature->fi_guid, 1, strlen(feature->fi_desc) + 1,
feature->fi_desc, tx));
+ mutex_enter(&spa->spa_feat_stats_lock);
feature_sync(spa, feature, initial_refcount, tx);
+ mutex_exit(&spa->spa_feat_stats_lock);
if (spa_feature_is_enabled(spa, SPA_FEATURE_ENABLED_TXG)) {
uint64_t enabling_txg = dmu_tx_get_txg(tx);
@@ -416,6 +419,7 @@ feature_do_action(spa_t *spa, spa_feature_t fid, feature_action_t action,
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3U(spa_version(spa), >=, SPA_VERSION_FEATURES);
+ mutex_enter(&spa->spa_feat_stats_lock);
VERIFY3U(feature_get_refcount(spa, feature, &refcount), !=, ENOTSUP);
switch (action) {
@@ -433,6 +437,7 @@ feature_do_action(spa_t *spa, spa_feature_t fid, feature_action_t action,
}
feature_sync(spa, feature, refcount, tx);
+ mutex_exit(&spa->spa_feat_stats_lock);
}
void
diff --git a/sys/contrib/openzfs/module/zfs/zfs_crrd.c b/sys/contrib/openzfs/module/zfs/zfs_crrd.c
index f9267ed41d71..30d4c7c36897 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_crrd.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_crrd.c
@@ -162,9 +162,9 @@ dbrrd_add(dbrrd_t *db, hrtime_t time, uint64_t txg)
daydiff = time - rrd_tail(&db->dbr_days);
monthdiff = time - rrd_tail(&db->dbr_months);
- if (monthdiff >= 0 && monthdiff >= SEC2NSEC(30 * 24 * 60 * 60))
+ if (monthdiff >= 0 && monthdiff >= 30 * 24 * 60 * 60)
rrd_add(&db->dbr_months, time, txg);
- else if (daydiff >= 0 && daydiff >= SEC2NSEC(24 * 60 * 60))
+ else if (daydiff >= 0 && daydiff >= 24 * 60 * 60)
rrd_add(&db->dbr_days, time, txg);
else if (minutedif >= 0)
rrd_add(&db->dbr_minutes, time, txg);
@@ -208,7 +208,8 @@ dbrrd_closest(hrtime_t tv, const rrd_data_t *r1, const rrd_data_t *r2)
if (r2 == NULL)
return (r1);
- return (ABS(tv - r1->rrdd_time) < ABS(tv - r2->rrdd_time) ? r1 : r2);
+ return (ABS(tv - (hrtime_t)r1->rrdd_time) <
+ ABS(tv - (hrtime_t)r2->rrdd_time) ? r1 : r2);
}
uint64_t
diff --git a/sys/contrib/openzfs/module/zfs/zfs_ioctl.c b/sys/contrib/openzfs/module/zfs/zfs_ioctl.c
index 121b966b9864..5ca7c2320c4e 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_ioctl.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_ioctl.c
@@ -683,6 +683,7 @@ zfs_secpolicy_send(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
dsl_dataset_t *ds;
const char *cp;
int error;
+ boolean_t rawok = (zc->zc_flags & 0x8);
/*
* Generate the current snapshot name from the given objsetid, then
@@ -705,6 +706,10 @@ zfs_secpolicy_send(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
error = zfs_secpolicy_write_perms_ds(zc->zc_name, ds,
ZFS_DELEG_PERM_SEND, cr);
+ if (error != 0 && rawok == B_TRUE) {
+ error = zfs_secpolicy_write_perms_ds(zc->zc_name, ds,
+ ZFS_DELEG_PERM_SEND_RAW, cr);
+ }
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
@@ -714,9 +719,17 @@ zfs_secpolicy_send(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
static int
zfs_secpolicy_send_new(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
+ boolean_t rawok = nvlist_exists(innvl, "rawok");
+ int error;
+
(void) innvl;
- return (zfs_secpolicy_write_perms(zc->zc_name,
- ZFS_DELEG_PERM_SEND, cr));
+ error = zfs_secpolicy_write_perms(zc->zc_name,
+ ZFS_DELEG_PERM_SEND, cr);
+ if (error != 0 && rawok == B_TRUE) {
+ error = zfs_secpolicy_write_perms(zc->zc_name,
+ ZFS_DELEG_PERM_SEND_RAW, cr);
+ }
+ return (error);
}
static int
@@ -4726,7 +4739,7 @@ zfs_ioc_rollback(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
error = error ? error : resume_err;
}
zfs_vfs_rele(zfsvfs);
- } else if ((zv = zvol_suspend(fsname)) != NULL) {
+ } else if (zvol_suspend(fsname, &zv) == 0) {
error = dsl_dataset_rollback(fsname, target, zvol_tag(zv),
outnvl);
zvol_resume(zv);
@@ -5448,7 +5461,7 @@ zfs_ioc_recv_impl(char *tofs, char *tosnap, const char *origin,
}
error = error ? error : end_err;
zfs_vfs_rele(zfsvfs);
- } else if ((zv = zvol_suspend(tofs)) != NULL) {
+ } else if (zvol_suspend(tofs, &zv) == 0) {
error = dmu_recv_end(&drc, zvol_tag(zv));
zvol_resume(zv);
} else {
@@ -7619,7 +7632,7 @@ zfs_ioctl_init(void)
zfs_ioctl_register("scrub", ZFS_IOC_POOL_SCRUB,
zfs_ioc_pool_scrub, zfs_secpolicy_config, POOL_NAME,
- POOL_CHECK_NONE, B_TRUE, B_TRUE,
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_pool_scrub, ARRAY_SIZE(zfs_keys_pool_scrub));
zfs_ioctl_register("get_props", ZFS_IOC_POOL_GET_PROPS,
diff --git a/sys/contrib/openzfs/module/zfs/zil.c b/sys/contrib/openzfs/module/zfs/zil.c
index 31b59c55f17b..0307df55aa21 100644
--- a/sys/contrib/openzfs/module/zfs/zil.c
+++ b/sys/contrib/openzfs/module/zfs/zil.c
@@ -819,34 +819,37 @@ zil_lwb_vdev_compare(const void *x1, const void *x2)
* we choose them here and later make the block allocation match.
*/
static lwb_t *
-zil_alloc_lwb(zilog_t *zilog, int sz, blkptr_t *bp, boolean_t slog,
- uint64_t txg, lwb_state_t state)
+zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, int min_sz, int sz,
+ boolean_t slog, uint64_t txg)
{
lwb_t *lwb;
lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
+ lwb->lwb_flags = 0;
lwb->lwb_zilog = zilog;
if (bp) {
lwb->lwb_blk = *bp;
- lwb->lwb_slim = (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2);
+ if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2)
+ lwb->lwb_flags |= LWB_FLAG_SLIM;
sz = BP_GET_LSIZE(bp);
+ lwb->lwb_min_sz = sz;
} else {
BP_ZERO(&lwb->lwb_blk);
- lwb->lwb_slim = (spa_version(zilog->zl_spa) >=
- SPA_VERSION_SLIM_ZIL);
+ if (spa_version(zilog->zl_spa) >= SPA_VERSION_SLIM_ZIL)
+ lwb->lwb_flags |= LWB_FLAG_SLIM;
+ lwb->lwb_min_sz = min_sz;
}
- lwb->lwb_slog = slog;
+ if (slog)
+ lwb->lwb_flags |= LWB_FLAG_SLOG;
lwb->lwb_error = 0;
- if (lwb->lwb_slim) {
- lwb->lwb_nmax = sz;
- lwb->lwb_nused = lwb->lwb_nfilled = sizeof (zil_chain_t);
- } else {
- lwb->lwb_nmax = sz - sizeof (zil_chain_t);
- lwb->lwb_nused = lwb->lwb_nfilled = 0;
- }
+ /*
+ * Buffer allocation and capacity setup will be done in
+ * zil_lwb_write_open() when the LWB is opened for ITX assignment.
+ */
+ lwb->lwb_nmax = lwb->lwb_nused = lwb->lwb_nfilled = 0;
lwb->lwb_sz = sz;
- lwb->lwb_state = state;
- lwb->lwb_buf = zio_buf_alloc(sz);
+ lwb->lwb_buf = NULL;
+ lwb->lwb_state = LWB_STATE_NEW;
lwb->lwb_child_zio = NULL;
lwb->lwb_write_zio = NULL;
lwb->lwb_root_zio = NULL;
@@ -857,8 +860,6 @@ zil_alloc_lwb(zilog_t *zilog, int sz, blkptr_t *bp, boolean_t slog,
mutex_enter(&zilog->zl_lock);
list_insert_tail(&zilog->zl_lwb_list, lwb);
- if (state != LWB_STATE_NEW)
- zilog->zl_last_lwb_opened = lwb;
mutex_exit(&zilog->zl_lock);
return (lwb);
@@ -878,7 +879,7 @@ zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
VERIFY(list_is_empty(&lwb->lwb_itxs));
VERIFY(list_is_empty(&lwb->lwb_waiters));
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
- ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
+ ASSERT(!MUTEX_HELD(&lwb->lwb_lock));
/*
* Clear the zilog's field to indicate this lwb is no longer
@@ -1019,7 +1020,7 @@ zil_create(zilog_t *zilog)
}
error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk,
- ZIL_MIN_BLKSZ, &slog);
+ ZIL_MIN_BLKSZ, ZIL_MIN_BLKSZ, &slog, B_TRUE);
if (error == 0)
zil_init_log_chain(zilog, &blk);
}
@@ -1028,7 +1029,7 @@ zil_create(zilog_t *zilog)
* Allocate a log write block (lwb) for the first log block.
*/
if (error == 0)
- lwb = zil_alloc_lwb(zilog, 0, &blk, slog, txg, LWB_STATE_NEW);
+ lwb = zil_alloc_lwb(zilog, &blk, 0, 0, slog, txg);
/*
* If we just allocated the first log block, commit our transaction
@@ -1324,10 +1325,12 @@ zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
* zil_commit() is racing with spa_sync().
*/
static void
-zil_commit_waiter_skip(zil_commit_waiter_t *zcw)
+zil_commit_waiter_done(zil_commit_waiter_t *zcw, int err)
{
mutex_enter(&zcw->zcw_lock);
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
+ zcw->zcw_lwb = NULL;
+ zcw->zcw_error = err;
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
@@ -1389,7 +1392,7 @@ zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
if (zil_nocacheflush)
return;
- mutex_enter(&lwb->lwb_vdev_lock);
+ mutex_enter(&lwb->lwb_lock);
for (i = 0; i < ndvas; i++) {
zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
if (avl_find(t, &zvsearch, &where) == NULL) {
@@ -1398,7 +1401,7 @@ zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
avl_insert(t, zv, where);
}
}
- mutex_exit(&lwb->lwb_vdev_lock);
+ mutex_exit(&lwb->lwb_lock);
}
static void
@@ -1415,12 +1418,12 @@ zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb)
/*
* While 'lwb' is at a point in its lifetime where lwb_vdev_tree does
- * not need the protection of lwb_vdev_lock (it will only be modified
+ * not need the protection of lwb_lock (it will only be modified
* while holding zilog->zl_lock) as its writes and those of its
* children have all completed. The younger 'nlwb' may be waiting on
* future writes to additional vdevs.
*/
- mutex_enter(&nlwb->lwb_vdev_lock);
+ mutex_enter(&nlwb->lwb_lock);
/*
* Tear down the 'lwb' vdev tree, ensuring that entries which do not
* exist in 'nlwb' are moved to it, freeing any would-be duplicates.
@@ -1434,7 +1437,7 @@ zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb)
kmem_free(zv, sizeof (*zv));
}
}
- mutex_exit(&nlwb->lwb_vdev_lock);
+ mutex_exit(&nlwb->lwb_lock);
}
void
@@ -1491,10 +1494,6 @@ zil_lwb_flush_vdevs_done(zio_t *zio)
zil_itx_destroy(itx, 0);
while ((zcw = list_remove_head(&lwb->lwb_waiters)) != NULL) {
- mutex_enter(&zcw->zcw_lock);
-
- ASSERT3P(zcw->zcw_lwb, ==, lwb);
- zcw->zcw_lwb = NULL;
/*
* We expect any ZIO errors from child ZIOs to have been
* propagated "up" to this specific LWB's root ZIO, in
@@ -1509,14 +1508,7 @@ zil_lwb_flush_vdevs_done(zio_t *zio)
* errors not being handled correctly here. See the
* comment above the call to "zio_flush" for details.
*/
-
- zcw->zcw_zio_error = zio->io_error;
-
- ASSERT3B(zcw->zcw_done, ==, B_FALSE);
- zcw->zcw_done = B_TRUE;
- cv_broadcast(&zcw->zcw_cv);
-
- mutex_exit(&zcw->zcw_lock);
+ zil_commit_waiter_done(zcw, zio->io_error);
}
uint64_t txg = lwb->lwb_issued_txg;
@@ -1588,7 +1580,7 @@ zil_lwb_write_done(zio_t *zio)
avl_tree_t *t = &lwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
- lwb_t *nlwb;
+ lwb_t *nlwb = NULL;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0);
@@ -1608,9 +1600,11 @@ zil_lwb_write_done(zio_t *zio)
* its write ZIO a parent this ZIO. In such case we can not defer
* our flushes or below may be a race between the done callbacks.
*/
- nlwb = list_next(&zilog->zl_lwb_list, lwb);
- if (nlwb && nlwb->lwb_state != LWB_STATE_ISSUED)
- nlwb = NULL;
+ if (!(lwb->lwb_flags & LWB_FLAG_CRASHED)) {
+ nlwb = list_next(&zilog->zl_lwb_list, lwb);
+ if (nlwb && nlwb->lwb_state != LWB_STATE_ISSUED)
+ nlwb = NULL;
+ }
mutex_exit(&zilog->zl_lock);
if (avl_numnodes(t) == 0)
@@ -1624,12 +1618,17 @@ zil_lwb_write_done(zio_t *zio)
* written out.
*
* Additionally, we don't perform any further error handling at
- * this point (e.g. setting "zcw_zio_error" appropriately), as
- * we expect that to occur in "zil_lwb_flush_vdevs_done" (thus,
- * we expect any error seen here, to have been propagated to
- * that function).
+ * this point (e.g. setting "zcw_error" appropriately), as we
+ * expect that to occur in "zil_lwb_flush_vdevs_done" (thus, we
+ * expect any error seen here, to have been propagated to that
+ * function).
+ *
+ * Note that we treat a "crashed" LWB as though it was in error,
+ * even if it did appear to succeed, because we've already
+ * signaled error and cleaned up waiters and committers in
+ * zil_crash(); we just want to clean up and get out of here.
*/
- if (zio->io_error != 0) {
+ if (zio->io_error != 0 || (lwb->lwb_flags & LWB_FLAG_CRASHED)) {
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zv, sizeof (*zv));
return;
@@ -1742,10 +1741,26 @@ zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb)
return;
}
+ mutex_enter(&lwb->lwb_lock);
mutex_enter(&zilog->zl_lock);
lwb->lwb_state = LWB_STATE_OPENED;
zilog->zl_last_lwb_opened = lwb;
mutex_exit(&zilog->zl_lock);
+ mutex_exit(&lwb->lwb_lock);
+
+ /*
+ * Allocate buffer and set up LWB capacities.
+ */
+ ASSERT0P(lwb->lwb_buf);
+ ASSERT3U(lwb->lwb_sz, >, 0);
+ lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz);
+ if (lwb->lwb_flags & LWB_FLAG_SLIM) {
+ lwb->lwb_nmax = lwb->lwb_sz;
+ lwb->lwb_nused = lwb->lwb_nfilled = sizeof (zil_chain_t);
+ } else {
+ lwb->lwb_nmax = lwb->lwb_sz - sizeof (zil_chain_t);
+ lwb->lwb_nused = lwb->lwb_nfilled = 0;
+ }
}
/*
@@ -1762,6 +1777,8 @@ static uint_t
zil_lwb_plan(zilog_t *zilog, uint64_t size, uint_t *minsize)
{
uint_t md = zilog->zl_max_block_size - sizeof (zil_chain_t);
+ uint_t waste = zil_max_waste_space(zilog);
+ waste = MAX(waste, zilog->zl_cur_max);
if (size <= md) {
/*
@@ -1772,9 +1789,10 @@ zil_lwb_plan(zilog_t *zilog, uint64_t size, uint_t *minsize)
} else if (size > 8 * md) {
/*
* Big bursts use maximum blocks. The first block size
- * is hard to predict, but it does not really matter.
+ * is hard to predict, but we need at least enough space
+ * to make reasonable progress.
*/
- *minsize = 0;
+ *minsize = waste;
return (md);
}
@@ -1787,57 +1805,52 @@ zil_lwb_plan(zilog_t *zilog, uint64_t size, uint_t *minsize)
uint_t s = size;
uint_t n = DIV_ROUND_UP(s, md - sizeof (lr_write_t));
uint_t chunk = DIV_ROUND_UP(s, n);
- uint_t waste = zil_max_waste_space(zilog);
- waste = MAX(waste, zilog->zl_cur_max);
if (chunk <= md - waste) {
*minsize = MAX(s - (md - waste) * (n - 1), waste);
return (chunk);
} else {
- *minsize = 0;
+ *minsize = waste;
return (md);
}
}
/*
* Try to predict next block size based on previous history. Make prediction
- * sufficient for 7 of 8 previous bursts. Don't try to save if the saving is
- * less then 50%, extra writes may cost more, but we don't want single spike
- * to badly affect our predictions.
+ * sufficient for 7 of 8 previous bursts, but don't try to save if the saving
+ * is less then 50%. Extra writes may cost more, but we don't want single
+ * spike to badly affect our predictions.
*/
-static uint_t
-zil_lwb_predict(zilog_t *zilog)
+static void
+zil_lwb_predict(zilog_t *zilog, uint64_t *min_predict, uint64_t *max_predict)
{
- uint_t m, o;
+ uint_t m1 = 0, m2 = 0, o;
- /* If we are in the middle of a burst, take it into account also. */
- if (zilog->zl_cur_size > 0) {
- o = zil_lwb_plan(zilog, zilog->zl_cur_size, &m);
- } else {
+ /* If we are in the middle of a burst, take it as another data point. */
+ if (zilog->zl_cur_size > 0)
+ o = zil_lwb_plan(zilog, zilog->zl_cur_size, &m1);
+ else
o = UINT_MAX;
- m = 0;
- }
- /* Find minimum optimal size. We don't need to go below that. */
- for (int i = 0; i < ZIL_BURSTS; i++)
- o = MIN(o, zilog->zl_prev_opt[i]);
-
- /* Find two biggest minimal first block sizes above the optimal. */
- uint_t m1 = MAX(m, o), m2 = o;
+ /* Find two largest minimal first block sizes. */
for (int i = 0; i < ZIL_BURSTS; i++) {
- m = zilog->zl_prev_min[i];
- if (m >= m1) {
+ uint_t cur = zilog->zl_prev_min[i];
+ if (cur >= m1) {
m2 = m1;
- m1 = m;
- } else if (m > m2) {
- m2 = m;
+ m1 = cur;
+ } else if (cur > m2) {
+ m2 = cur;
}
}
- /*
- * If second minimum size gives 50% saving -- use it. It may cost us
- * one additional write later, but the space saving is just too big.
- */
- return ((m1 < m2 * 2) ? m1 : m2);
+ /* Minimum should guarantee progress in most cases. */
+ *min_predict = (m1 < m2 * 2) ? m1 : m2;
+
+ /* Maximum doesn't need to go below the minimum optimal size. */
+ for (int i = 0; i < ZIL_BURSTS; i++)
+ o = MIN(o, zilog->zl_prev_opt[i]);
+ m1 = MAX(m1, o);
+ m2 = MAX(m2, o);
+ *max_predict = (m1 < m2 * 2) ? m1 : m2;
}
/*
@@ -1845,12 +1858,13 @@ zil_lwb_predict(zilog_t *zilog)
* Has to be called under zl_issuer_lock to chain more lwbs.
*/
static lwb_t *
-zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, lwb_state_t state)
+zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb)
{
- uint64_t blksz, plan, plan2;
+ uint64_t minbs, maxbs;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
+ membar_producer();
lwb->lwb_state = LWB_STATE_CLOSED;
/*
@@ -1875,27 +1889,34 @@ zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, lwb_state_t state)
* Try to predict what can it be and plan for the worst case.
*/
uint_t m;
- plan = zil_lwb_plan(zilog, zilog->zl_cur_left, &m);
+ maxbs = zil_lwb_plan(zilog, zilog->zl_cur_left, &m);
+ minbs = m;
if (zilog->zl_parallel) {
- plan2 = zil_lwb_plan(zilog, zilog->zl_cur_left +
- zil_lwb_predict(zilog), &m);
- if (plan < plan2)
- plan = plan2;
+ uint64_t minp, maxp;
+ zil_lwb_predict(zilog, &minp, &maxp);
+ maxp = zil_lwb_plan(zilog, zilog->zl_cur_left + maxp,
+ &m);
+ if (maxbs < maxp)
+ maxbs = maxp;
}
} else {
/*
* The previous burst is done and we can only predict what
* will come next.
*/
- plan = zil_lwb_predict(zilog);
+ zil_lwb_predict(zilog, &minbs, &maxbs);
}
- blksz = plan + sizeof (zil_chain_t);
- blksz = P2ROUNDUP_TYPED(blksz, ZIL_MIN_BLKSZ, uint64_t);
- blksz = MIN(blksz, zilog->zl_max_block_size);
- DTRACE_PROBE3(zil__block__size, zilog_t *, zilog, uint64_t, blksz,
- uint64_t, plan);
- return (zil_alloc_lwb(zilog, blksz, NULL, 0, 0, state));
+ minbs += sizeof (zil_chain_t);
+ maxbs += sizeof (zil_chain_t);
+ minbs = P2ROUNDUP_TYPED(minbs, ZIL_MIN_BLKSZ, uint64_t);
+ maxbs = P2ROUNDUP_TYPED(maxbs, ZIL_MIN_BLKSZ, uint64_t);
+ maxbs = MIN(maxbs, zilog->zl_max_block_size);
+ minbs = MIN(minbs, maxbs);
+ DTRACE_PROBE3(zil__block__size, zilog_t *, zilog, uint64_t, minbs,
+ uint64_t, maxbs);
+
+ return (zil_alloc_lwb(zilog, NULL, minbs, maxbs, 0, 0));
}
/*
@@ -1944,14 +1965,16 @@ zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
mutex_exit(&zilog->zl_lock);
next_lwb:
- if (lwb->lwb_slim)
+ if (lwb->lwb_flags & LWB_FLAG_SLIM)
zilc = (zil_chain_t *)lwb->lwb_buf;
else
zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_nmax);
- int wsz = lwb->lwb_sz;
+ uint64_t alloc_size = BP_GET_LSIZE(&lwb->lwb_blk);
+ int wsz = alloc_size;
if (lwb->lwb_error == 0) {
abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, lwb->lwb_sz);
- if (!lwb->lwb_slog || zilog->zl_cur_size <= zil_slog_bulk)
+ if (!(lwb->lwb_flags & LWB_FLAG_SLOG) ||
+ zilog->zl_cur_size <= zil_slog_bulk)
prio = ZIO_PRIORITY_SYNC_WRITE;
else
prio = ZIO_PRIORITY_ASYNC_WRITE;
@@ -1959,16 +1982,17 @@ next_lwb:
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, spa, 0,
- &lwb->lwb_blk, lwb_abd, lwb->lwb_sz, zil_lwb_write_done,
+ &lwb->lwb_blk, lwb_abd, alloc_size, zil_lwb_write_done,
lwb, prio, ZIO_FLAG_CANFAIL, &zb);
zil_lwb_add_block(lwb, &lwb->lwb_blk);
- if (lwb->lwb_slim) {
+ if (lwb->lwb_flags & LWB_FLAG_SLIM) {
/* For Slim ZIL only write what is used. */
wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ,
int);
- ASSERT3S(wsz, <=, lwb->lwb_sz);
- zio_shrink(lwb->lwb_write_zio, wsz);
+ ASSERT3S(wsz, <=, alloc_size);
+ if (wsz < alloc_size)
+ zio_shrink(lwb->lwb_write_zio, wsz);
wsz = lwb->lwb_write_zio->io_size;
}
memset(lwb->lwb_buf + lwb->lwb_nused, 0, wsz - lwb->lwb_nused);
@@ -2004,13 +2028,53 @@ next_lwb:
BP_ZERO(bp);
error = lwb->lwb_error;
if (error == 0) {
- error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, nlwb->lwb_sz,
- &slog);
+ /*
+ * Allocation flexibility depends on LWB state:
+ * if NEW: allow range allocation and larger sizes;
+ * if OPENED: use fixed predetermined allocation size;
+ * if CLOSED + Slim: allocate precisely for actual usage.
+ */
+ boolean_t flexible = (nlwb->lwb_state == LWB_STATE_NEW);
+ if (flexible) {
+ /* We need to prevent opening till we update lwb_sz. */
+ mutex_enter(&nlwb->lwb_lock);
+ flexible = (nlwb->lwb_state == LWB_STATE_NEW);
+ if (!flexible)
+ mutex_exit(&nlwb->lwb_lock); /* We lost. */
+ }
+ boolean_t closed_slim = (nlwb->lwb_state == LWB_STATE_CLOSED &&
+ (lwb->lwb_flags & LWB_FLAG_SLIM));
+
+ uint64_t min_size, max_size;
+ if (closed_slim) {
+ /* This transition is racy, but only one way. */
+ membar_consumer();
+ min_size = max_size = P2ROUNDUP_TYPED(nlwb->lwb_nused,
+ ZIL_MIN_BLKSZ, uint64_t);
+ } else if (flexible) {
+ min_size = nlwb->lwb_min_sz;
+ max_size = nlwb->lwb_sz;
+ } else {
+ min_size = max_size = nlwb->lwb_sz;
+ }
+
+ error = zio_alloc_zil(spa, zilog->zl_os, txg, bp,
+ min_size, max_size, &slog, flexible);
+ if (error == 0) {
+ if (closed_slim)
+ ASSERT3U(BP_GET_LSIZE(bp), ==, max_size);
+ else if (flexible)
+ nlwb->lwb_sz = BP_GET_LSIZE(bp);
+ else
+ ASSERT3U(BP_GET_LSIZE(bp), ==, nlwb->lwb_sz);
+ }
+ if (flexible)
+ mutex_exit(&nlwb->lwb_lock);
}
if (error == 0) {
ASSERT3U(BP_GET_BIRTH(bp), ==, txg);
- BP_SET_CHECKSUM(bp, nlwb->lwb_slim ? ZIO_CHECKSUM_ZILOG2 :
- ZIO_CHECKSUM_ZILOG);
+ BP_SET_CHECKSUM(bp, (nlwb->lwb_flags & LWB_FLAG_SLIM) ?
+ ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
bp->blk_cksum = lwb->lwb_blk.blk_cksum;
bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
}
@@ -2039,14 +2103,15 @@ next_lwb:
if (nlwb) {
nlwb->lwb_blk = *bp;
nlwb->lwb_error = error;
- nlwb->lwb_slog = slog;
+ if (slog)
+ nlwb->lwb_flags |= LWB_FLAG_SLOG;
nlwb->lwb_alloc_txg = txg;
if (nlwb->lwb_state != LWB_STATE_READY)
nlwb = NULL;
}
mutex_exit(&zilog->zl_lock);
- if (lwb->lwb_slog) {
+ if (lwb->lwb_flags & LWB_FLAG_SLOG) {
ZIL_STAT_BUMP(zilog, zil_itx_metaslab_slog_count);
ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_bytes,
lwb->lwb_nused);
@@ -2220,7 +2285,6 @@ zil_lwb_assign(zilog_t *zilog, lwb_t *lwb, itx_t *itx, list_t *ilwbs)
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
- ASSERT3P(lwb->lwb_buf, !=, NULL);
zil_lwb_write_open(zilog, lwb);
@@ -2262,9 +2326,10 @@ cont:
(dlen % max_log_data == 0 ||
lwb_sp < reclen + dlen % max_log_data))) {
list_insert_tail(ilwbs, lwb);
- lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_OPENED);
+ lwb = zil_lwb_write_close(zilog, lwb);
if (lwb == NULL)
return (NULL);
+ zil_lwb_write_open(zilog, lwb);
lwb_sp = lwb->lwb_nmax - lwb->lwb_nused;
}
@@ -2554,7 +2619,7 @@ zil_itxg_clean(void *arg)
* called) we will hit this case.
*/
if (itx->itx_lr.lrc_txtype == TX_COMMIT)
- zil_commit_waiter_skip(itx->itx_private);
+ zil_commit_waiter_done(itx->itx_private, 0);
zil_itx_destroy(itx, 0);
}
@@ -2742,6 +2807,7 @@ zil_crash_clean(zilog_t *zilog, uint64_t synced_txg)
}
/* This LWB is from the past, so we can clean it up now. */
+ ASSERT(lwb->lwb_flags & LWB_FLAG_CRASHED);
list_remove(&zilog->zl_lwb_crash_list, lwb);
if (lwb->lwb_buf != NULL)
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
@@ -2981,7 +3047,7 @@ zil_prune_commit_list(zilog_t *zilog)
* never any itx's for it to wait on), so it's
* safe to skip this waiter and mark it done.
*/
- zil_commit_waiter_skip(itx->itx_private);
+ zil_commit_waiter_done(itx->itx_private, 0);
} else {
zil_commit_waiter_link_lwb(itx->itx_private, last_lwb);
}
@@ -3212,15 +3278,21 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
* "next" lwb on-disk. When this happens, we must stall
* the ZIL write pipeline; see the comment within
* zil_commit_writer_stall() for more details.
+ *
+ * ESHUTDOWN has to be handled carefully here. If we get it,
+ * then the pool suspended and zil_crash() was called, so we
+ * need to stop trying and just get an error back to the
+ * callers.
*/
int err = 0;
while ((lwb = list_remove_head(ilwbs)) != NULL) {
- err = zil_lwb_write_issue(zilog, lwb);
- if (err != 0)
- break;
+ if (err == 0)
+ err = zil_lwb_write_issue(zilog, lwb);
}
- if (err == 0)
+ if (err != ESHUTDOWN)
err = zil_commit_writer_stall(zilog);
+ if (err == ESHUTDOWN)
+ err = SET_ERROR(EIO);
/*
* Additionally, we have to signal and mark the "nolwb"
@@ -3230,7 +3302,7 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
*/
zil_commit_waiter_t *zcw;
while ((zcw = list_remove_head(&nolwb_waiters)) != NULL)
- zil_commit_waiter_skip(zcw);
+ zil_commit_waiter_done(zcw, err);
/*
* And finally, we have to destroy the itx's that
@@ -3238,7 +3310,7 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
* the itx's callback if one exists for the itx.
*/
while ((itx = list_remove_head(&nolwb_itxs)) != NULL)
- zil_itx_destroy(itx, 0);
+ zil_itx_destroy(itx, err);
} else {
ASSERT(list_is_empty(&nolwb_waiters));
ASSERT3P(lwb, !=, NULL);
@@ -3292,17 +3364,17 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
(!zilog->zl_parallel || zilog->zl_suspend > 0)) {
zil_burst_done(zilog);
list_insert_tail(ilwbs, lwb);
- lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW);
+ lwb = zil_lwb_write_close(zilog, lwb);
if (lwb == NULL) {
int err = 0;
while ((lwb =
list_remove_head(ilwbs)) != NULL) {
- err = zil_lwb_write_issue(zilog, lwb);
- if (err != 0)
- break;
+ if (err == 0)
+ err = zil_lwb_write_issue(
+ zilog, lwb);
}
- if (err == 0)
- zil_commit_writer_stall(zilog);
+ if (err != ESHUTDOWN)
+ (void) zil_commit_writer_stall(zilog);
}
}
}
@@ -3470,7 +3542,7 @@ zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
* hasn't been issued.
*/
zil_burst_done(zilog);
- lwb_t *nlwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW);
+ lwb_t *nlwb = zil_lwb_write_close(zilog, lwb);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_CLOSED);
@@ -3546,7 +3618,7 @@ zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw)
* commit itxs. When this occurs, the commit waiters linked
* off of these commit itxs will not be committed to an
* lwb. Additionally, these commit waiters will not be
- * marked done until zil_commit_waiter_skip() is called via
+ * marked done until zil_commit_waiter_done() is called via
* zil_itxg_clean().
*
* Thus, it's possible for this commit waiter (i.e. the
@@ -3624,7 +3696,7 @@ zil_alloc_commit_waiter(void)
list_link_init(&zcw->zcw_node);
zcw->zcw_lwb = NULL;
zcw->zcw_done = B_FALSE;
- zcw->zcw_zio_error = 0;
+ zcw->zcw_error = 0;
return (zcw);
}
@@ -3728,6 +3800,9 @@ zil_crash(zilog_t *zilog)
*/
for (lwb_t *lwb = list_head(&zilog->zl_lwb_crash_list); lwb != NULL;
lwb = list_next(&zilog->zl_lwb_crash_list, lwb)) {
+ ASSERT(!(lwb->lwb_flags & LWB_FLAG_CRASHED));
+ lwb->lwb_flags |= LWB_FLAG_CRASHED;
+
itx_t *itx;
while ((itx = list_remove_head(&lwb->lwb_itxs)) != NULL)
zil_itx_destroy(itx, EIO);
@@ -3736,7 +3811,7 @@ zil_crash(zilog_t *zilog)
while ((zcw = list_remove_head(&lwb->lwb_waiters)) != NULL) {
mutex_enter(&zcw->zcw_lock);
zcw->zcw_lwb = NULL;
- zcw->zcw_zio_error = EIO;
+ zcw->zcw_error = EIO;
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
@@ -4014,7 +4089,7 @@ zil_commit_impl(zilog_t *zilog, uint64_t foid)
zil_commit_waiter(zilog, zcw);
int err = 0;
- if (zcw->zcw_zio_error != 0) {
+ if (zcw->zcw_error != 0) {
/*
* If there was an error writing out the ZIL blocks that
* this thread is waiting on, then we fallback to
@@ -4149,7 +4224,7 @@ zil_lwb_cons(void *vbuf, void *unused, int kmflag)
offsetof(zil_commit_waiter_t, zcw_node));
avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare,
sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
- mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&lwb->lwb_lock, NULL, MUTEX_DEFAULT, NULL);
return (0);
}
@@ -4158,7 +4233,7 @@ zil_lwb_dest(void *vbuf, void *unused)
{
(void) unused;
lwb_t *lwb = vbuf;
- mutex_destroy(&lwb->lwb_vdev_lock);
+ mutex_destroy(&lwb->lwb_lock);
avl_destroy(&lwb->lwb_vdev_tree);
list_destroy(&lwb->lwb_waiters);
list_destroy(&lwb->lwb_itxs);
@@ -4381,7 +4456,7 @@ zil_close(zilog_t *zilog)
if (lwb != NULL) {
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_NEW);
- zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
+ ASSERT0P(lwb->lwb_buf);
zil_free_lwb(zilog, lwb);
}
mutex_exit(&zilog->zl_lock);
@@ -4472,16 +4547,16 @@ zil_suspend(const char *osname, void **cookiep)
cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
mutex_exit(&zilog->zl_lock);
- if (cookiep == NULL)
+ if (zilog->zl_restart_txg > 0) {
+ /* ZIL crashed while we were waiting. */
+ zil_resume(os);
+ error = SET_ERROR(EBUSY);
+ } else if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
- if (zilog->zl_restart_txg > 0)
- /* ZIL crashed while we were waiting. */
- return (SET_ERROR(EBUSY));
-
- return (0);
+ return (error);
}
/*
diff --git a/sys/contrib/openzfs/module/zfs/zio.c b/sys/contrib/openzfs/module/zfs/zio.c
index 3f0ddb63249d..4cf8912d4269 100644
--- a/sys/contrib/openzfs/module/zfs/zio.c
+++ b/sys/contrib/openzfs/module/zfs/zio.c
@@ -4434,12 +4434,15 @@ zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
*/
int
zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
- uint64_t size, boolean_t *slog)
+ uint64_t min_size, uint64_t max_size, boolean_t *slog,
+ boolean_t allow_larger)
{
int error;
zio_alloc_list_t io_alloc_list;
+ uint64_t alloc_size = 0;
ASSERT(txg > spa_syncing_txg(spa));
+ ASSERT3U(min_size, <=, max_size);
metaslab_trace_init(&io_alloc_list);
@@ -4448,7 +4451,7 @@ zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
* Fill in the obvious ones before calling into metaslab_alloc().
*/
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
- BP_SET_PSIZE(new_bp, size);
+ BP_SET_PSIZE(new_bp, max_size);
BP_SET_LEVEL(new_bp, 0);
/*
@@ -4463,43 +4466,51 @@ zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
ZIOSTAT_BUMP(ziostat_total_allocations);
/* Try log class (dedicated slog devices) first */
- error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
- txg, NULL, flags, &io_alloc_list, allocator, NULL);
+ error = metaslab_alloc_range(spa, spa_log_class(spa), min_size,
+ max_size, new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
+ NULL, &alloc_size);
*slog = (error == 0);
/* Try special_embedded_log class (reserved on special vdevs) */
if (error != 0) {
- error = metaslab_alloc(spa, spa_special_embedded_log_class(spa),
- size, new_bp, 1, txg, NULL, flags, &io_alloc_list,
- allocator, NULL);
+ error = metaslab_alloc_range(spa,
+ spa_special_embedded_log_class(spa), min_size, max_size,
+ new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
+ NULL, &alloc_size);
}
/* Try special class (general special vdev allocation) */
if (error != 0) {
- error = metaslab_alloc(spa, spa_special_class(spa), size,
- new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
- NULL);
+ error = metaslab_alloc_range(spa, spa_special_class(spa),
+ min_size, max_size, new_bp, 1, txg, NULL, flags,
+ &io_alloc_list, allocator, NULL, &alloc_size);
}
/* Try embedded_log class (reserved on normal vdevs) */
if (error != 0) {
- error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
- new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
- NULL);
+ error = metaslab_alloc_range(spa, spa_embedded_log_class(spa),
+ min_size, max_size, new_bp, 1, txg, NULL, flags,
+ &io_alloc_list, allocator, NULL, &alloc_size);
}
/* Finally fall back to normal class */
if (error != 0) {
ZIOSTAT_BUMP(ziostat_alloc_class_fallbacks);
- error = metaslab_alloc(spa, spa_normal_class(spa), size,
- new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
- NULL);
+ error = metaslab_alloc_range(spa, spa_normal_class(spa),
+ min_size, max_size, new_bp, 1, txg, NULL, flags,
+ &io_alloc_list, allocator, NULL, &alloc_size);
}
metaslab_trace_fini(&io_alloc_list);
if (error == 0) {
- BP_SET_LSIZE(new_bp, size);
- BP_SET_PSIZE(new_bp, size);
+ if (!allow_larger)
+ alloc_size = MIN(alloc_size, max_size);
+ else if (max_size <= SPA_OLD_MAXBLOCKSIZE)
+ alloc_size = MIN(alloc_size, SPA_OLD_MAXBLOCKSIZE);
+ alloc_size = P2ALIGN_TYPED(alloc_size, ZIL_MIN_BLKSZ, uint64_t);
+
+ BP_SET_LSIZE(new_bp, alloc_size);
+ BP_SET_PSIZE(new_bp, alloc_size);
BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(new_bp,
spa_version(spa) >= SPA_VERSION_SLIM_ZIL
@@ -4527,8 +4538,8 @@ zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
}
} else {
zfs_dbgmsg("%s: zil block allocation failure: "
- "size %llu, error %d", spa_name(spa), (u_longlong_t)size,
- error);
+ "min_size %llu, max_size %llu, error %d", spa_name(spa),
+ (u_longlong_t)min_size, (u_longlong_t)max_size, error);
}
return (error);
diff --git a/sys/contrib/openzfs/module/zfs/zvol.c b/sys/contrib/openzfs/module/zfs/zvol.c
index 29f51e230a37..faced0db7e9e 100644
--- a/sys/contrib/openzfs/module/zfs/zvol.c
+++ b/sys/contrib/openzfs/module/zfs/zvol.c
@@ -38,25 +38,36 @@
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2012, 2019 by Delphix. All rights reserved.
- * Copyright (c) 2024, Klara, Inc.
+ * Copyright (c) 2024, 2025, Klara, Inc.
*/
/*
* Note on locking of zvol state structures.
*
- * These structures are used to maintain internal state used to emulate block
- * devices on top of zvols. In particular, management of device minor number
- * operations - create, remove, rename, and set_snapdev - involves access to
- * these structures. The zvol_state_lock is primarily used to protect the
- * zvol_state_list. The zv->zv_state_lock is used to protect the contents
- * of the zvol_state_t structures, as well as to make sure that when the
- * time comes to remove the structure from the list, it is not in use, and
- * therefore, it can be taken off zvol_state_list and freed.
+ * zvol_state_t represents the connection between a single dataset
+ * (DMU_OST_ZVOL) and the device "minor" (some OS-specific representation of a
+ * "disk" or "device" or "volume", eg, a /dev/zdXX node, a GEOM object, etc).
*
- * The zv_suspend_lock was introduced to allow for suspending I/O to a zvol,
- * e.g. for the duration of receive and rollback operations. This lock can be
- * held for significant periods of time. Given that it is undesirable to hold
- * mutexes for long periods of time, the following lock ordering applies:
+ * The global zvol_state_lock is used to protect access to zvol_state_list and
+ * zvol_htable, which are the primary way to obtain a zvol_state_t from a name.
+ * It should not be used for anything not name-relateds, and you should avoid
+ * sleeping or waiting while its held. See zvol_find_by_name(), zvol_insert(),
+ * zvol_remove().
+ *
+ * The zv_state_lock is used to protect the contents of the associated
+ * zvol_state_t. Most of the zvol_state_t is dedicated to control and
+ * configuration; almost none of it is needed for data operations (that is,
+ * read, write, flush) so this lock is rarely taken during general IO. It
+ * should be released quickly; you should avoid sleeping or waiting while its
+ * held.
+ *
+ * zv_suspend_lock is used to suspend IO/data operations to a zvol. The read
+ * half should held for the duration of an IO operation. The write half should
+ * be taken when something to wait for IO to complete and the block further IO,
+ * eg for the duration of receive and rollback operations. This lock can be
+ * held for long periods of time.
+ *
+ * Thus, the following lock ordering appies.
* - take zvol_state_lock if necessary, to protect zvol_state_list
* - take zv_suspend_lock if necessary, by the code path in question
* - take zv_state_lock to protect zvol_state_t
@@ -67,9 +78,8 @@
* these operations are serialized per pool. Consequently, we can be certain
* that for a given zvol, there is only one operation at a time in progress.
* That is why one can be sure that first, zvol_state_t for a given zvol is
- * allocated and placed on zvol_state_list, and then other minor operations
- * for this zvol are going to proceed in the order of issue.
- *
+ * allocated and placed on zvol_state_list, and then other minor operations for
+ * this zvol are going to proceed in the order of issue.
*/
#include <sys/dataset_kstats.h>
@@ -1135,20 +1145,34 @@ zvol_tag(zvol_state_t *zv)
/*
* Suspend the zvol for recv and rollback.
*/
-zvol_state_t *
-zvol_suspend(const char *name)
+int
+zvol_suspend(const char *name, zvol_state_t **zvp)
{
zvol_state_t *zv;
zv = zvol_find_by_name(name, RW_WRITER);
if (zv == NULL)
- return (NULL);
+ return (SET_ERROR(ENOENT));
/* block all I/O, release in zvol_resume. */
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
+ /*
+ * If it's being removed, unlock and return error. It doesn't make any
+ * sense to try to suspend a zvol being removed, but being here also
+ * means that zvol_remove_minors_impl() is about to call zvol_remove()
+ * and then destroy the zvol_state_t, so returning a pointer to it for
+ * the caller to mess with would be a disaster anyway.
+ */
+ if (zv->zv_flags & ZVOL_REMOVING) {
+ mutex_exit(&zv->zv_state_lock);
+ rw_exit(&zv->zv_suspend_lock);
+ /* NB: Returning EIO here to match zfsvfs_teardown() */
+ return (SET_ERROR(EIO));
+ }
+
atomic_inc(&zv->zv_suspend_ref);
if (zv->zv_open_count > 0)
@@ -1161,7 +1185,8 @@ zvol_suspend(const char *name)
mutex_exit(&zv->zv_state_lock);
/* zv_suspend_lock is released in zvol_resume() */
- return (zv);
+ *zvp = zv;
+ return (0);
}
int
@@ -1570,184 +1595,156 @@ zvol_create_minors_impl(zvol_task_t *task)
}
/*
- * Remove minors for specified dataset including children and snapshots.
- */
-
-/*
- * Remove the minor for a given zvol. This will do it all:
- * - flag the zvol for removal, so new requests are rejected
- * - wait until outstanding requests are completed
- * - remove it from lists
- * - free it
- * It's also usable as a taskq task, and smells nice too.
+ * Remove minors for specified dataset and, optionally, its children and
+ * snapshots.
*/
static void
-zvol_remove_minor_task(void *arg)
-{
- zvol_state_t *zv = (zvol_state_t *)arg;
-
- ASSERT(!RW_LOCK_HELD(&zvol_state_lock));
- ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
-
- mutex_enter(&zv->zv_state_lock);
- while (zv->zv_open_count > 0 || atomic_read(&zv->zv_suspend_ref)) {
- zv->zv_flags |= ZVOL_REMOVING;
- cv_wait(&zv->zv_removing_cv, &zv->zv_state_lock);
- }
- mutex_exit(&zv->zv_state_lock);
-
- rw_enter(&zvol_state_lock, RW_WRITER);
- mutex_enter(&zv->zv_state_lock);
-
- zvol_remove(zv);
- zvol_os_clear_private(zv);
-
- mutex_exit(&zv->zv_state_lock);
- rw_exit(&zvol_state_lock);
-
- zvol_os_free(zv);
-}
-
-static void
-zvol_free_task(void *arg)
-{
- zvol_os_free(arg);
-}
-
-static void
zvol_remove_minors_impl(zvol_task_t *task)
{
zvol_state_t *zv, *zv_next;
const char *name = task ? task->zt_name1 : NULL;
int namelen = ((name) ? strlen(name) : 0);
- taskqid_t t;
- list_t delay_list, free_list;
+ boolean_t children = task ? !!task->zt_value : B_TRUE;
if (zvol_inhibit_dev)
return;
- list_create(&delay_list, sizeof (zvol_state_t),
- offsetof(zvol_state_t, zv_next));
- list_create(&free_list, sizeof (zvol_state_t),
- offsetof(zvol_state_t, zv_next));
+ /*
+ * We collect up zvols that we want to remove on a separate list, so
+ * that we don't have to hold zvol_state_lock for the whole time.
+ *
+ * We can't remove them from the global lists until we're completely
+ * done with them, because that would make them appear to ZFS-side ops
+ * that they don't exist, and the name might be reused, which can't be
+ * good.
+ */
+ list_t remove_list;
+ list_create(&remove_list, sizeof (zvol_state_t),
+ offsetof(zvol_state_t, zv_remove_node));
- rw_enter(&zvol_state_lock, RW_WRITER);
+ rw_enter(&zvol_state_lock, RW_READER);
for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
zv_next = list_next(&zvol_state_list, zv);
mutex_enter(&zv->zv_state_lock);
+ if (zv->zv_flags & ZVOL_REMOVING) {
+ /* Another thread is handling shutdown, skip it. */
+ mutex_exit(&zv->zv_state_lock);
+ continue;
+ }
+
+ /*
+ * This zvol should be removed if:
+ * - no name was offered (ie removing all at shutdown); or
+ * - name matches exactly; or
+ * - we were asked to remove children, and
+ * - the start of the name matches, and
+ * - there is a '/' immediately after the matched name; or
+ * - there is a '@' immediately after the matched name
+ */
if (name == NULL || strcmp(zv->zv_name, name) == 0 ||
- (strncmp(zv->zv_name, name, namelen) == 0 &&
+ (children && strncmp(zv->zv_name, name, namelen) == 0 &&
(zv->zv_name[namelen] == '/' ||
zv->zv_name[namelen] == '@'))) {
- /*
- * By holding zv_state_lock here, we guarantee that no
- * one is currently using this zv
- */
/*
- * If in use, try to throw everyone off and try again
- * later.
+ * Matched, so mark it removal. We want to take the
+ * write half of the suspend lock to make sure that
+ * the zvol is not suspended, and give any data ops
+ * chance to finish.
*/
- if (zv->zv_open_count > 0 ||
- atomic_read(&zv->zv_suspend_ref)) {
- zv->zv_flags |= ZVOL_REMOVING;
- t = taskq_dispatch(
- zv->zv_objset->os_spa->spa_zvol_taskq,
- zvol_remove_minor_task, zv, TQ_SLEEP);
- if (t == TASKQID_INVALID) {
- /*
- * Couldn't create the task, so we'll
- * do it in place once the loop is
- * finished.
- */
- list_insert_head(&delay_list, zv);
- }
+ mutex_exit(&zv->zv_state_lock);
+ rw_enter(&zv->zv_suspend_lock, RW_WRITER);
+ mutex_enter(&zv->zv_state_lock);
+
+ if (zv->zv_flags & ZVOL_REMOVING) {
+ /* Another thread has taken it, let them. */
mutex_exit(&zv->zv_state_lock);
+ rw_exit(&zv->zv_suspend_lock);
continue;
}
- zvol_remove(zv);
-
/*
- * Cleared while holding zvol_state_lock as a writer
- * which will prevent zvol_open() from opening it.
+ * Mark it and unlock. New entries will see the flag
+ * and return ENXIO.
*/
- zvol_os_clear_private(zv);
-
- /* Drop zv_state_lock before zvol_free() */
+ zv->zv_flags |= ZVOL_REMOVING;
mutex_exit(&zv->zv_state_lock);
+ rw_exit(&zv->zv_suspend_lock);
- /* Try parallel zv_free, if failed do it in place */
- t = taskq_dispatch(system_taskq, zvol_free_task, zv,
- TQ_SLEEP);
- if (t == TASKQID_INVALID)
- list_insert_head(&free_list, zv);
- } else {
+ /* Put it on the list for the next stage. */
+ list_insert_head(&remove_list, zv);
+ } else
mutex_exit(&zv->zv_state_lock);
- }
}
- rw_exit(&zvol_state_lock);
-
- /* Wait for zvols that we couldn't create a remove task for */
- while ((zv = list_remove_head(&delay_list)) != NULL)
- zvol_remove_minor_task(zv);
-
- /* Free any that we couldn't free in parallel earlier */
- while ((zv = list_remove_head(&free_list)) != NULL)
- zvol_os_free(zv);
-}
-
-/* Remove minor for this specific volume only */
-static int
-zvol_remove_minor_impl(const char *name)
-{
- zvol_state_t *zv = NULL, *zv_next;
- if (zvol_inhibit_dev)
- return (0);
+ rw_exit(&zvol_state_lock);
- rw_enter(&zvol_state_lock, RW_WRITER);
+ /* Didn't match any, nothing to do! */
+ if (list_is_empty(&remove_list)) {
+ if (task)
+ task->zt_error = SET_ERROR(ENOENT);
+ return;
+ }
- for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
- zv_next = list_next(&zvol_state_list, zv);
+ /* Actually shut them all down. */
+ for (zv = list_head(&remove_list); zv != NULL; zv = zv_next) {
+ zv_next = list_next(&remove_list, zv);
mutex_enter(&zv->zv_state_lock);
- if (strcmp(zv->zv_name, name) == 0)
- /* Found, leave the the loop with zv_lock held */
- break;
- mutex_exit(&zv->zv_state_lock);
- }
-
- if (zv == NULL) {
- rw_exit(&zvol_state_lock);
- return (SET_ERROR(ENOENT));
- }
- ASSERT(MUTEX_HELD(&zv->zv_state_lock));
+ /*
+ * Still open or suspended, just wait. This can happen if, for
+ * example, we managed to acquire zv_state_lock in the moments
+ * where zvol_open() or zvol_release() are trading locks to
+ * call zvol_first_open() or zvol_last_close().
+ */
+ while (zv->zv_open_count > 0 ||
+ atomic_read(&zv->zv_suspend_ref))
+ cv_wait(&zv->zv_removing_cv, &zv->zv_state_lock);
- if (zv->zv_open_count > 0 || atomic_read(&zv->zv_suspend_ref)) {
/*
- * In use, so try to throw everyone off, then wait
- * until finished.
+ * No users, shut down the OS side. This may not remove the
+ * minor from view immediately, depending on the kernel
+ * specifics, but it will ensure that it is unusable and that
+ * this zvol_state_t can never again be reached from an OS-side
+ * operation.
*/
- zv->zv_flags |= ZVOL_REMOVING;
+ zvol_os_remove_minor(zv);
mutex_exit(&zv->zv_state_lock);
+
+ /* Remove it from the name lookup lists */
+ rw_enter(&zvol_state_lock, RW_WRITER);
+ zvol_remove(zv);
rw_exit(&zvol_state_lock);
- zvol_remove_minor_task(zv);
- return (0);
}
- zvol_remove(zv);
- zvol_os_clear_private(zv);
+ /*
+ * Our own references on remove_list is the last one, free them and
+ * we're done.
+ */
+ while ((zv = list_remove_head(&remove_list)) != NULL)
+ zvol_os_free(zv);
- mutex_exit(&zv->zv_state_lock);
- rw_exit(&zvol_state_lock);
+ list_destroy(&remove_list);
+}
- zvol_os_free(zv);
+/* Remove minor for this specific volume only */
+static int
+zvol_remove_minor_impl(const char *name)
+{
+ if (zvol_inhibit_dev)
+ return (0);
- return (0);
+ zvol_task_t task;
+ memset(&task, 0, sizeof (zvol_task_t));
+ strlcpy(task.zt_name1, name, sizeof (task.zt_name1));
+ task.zt_value = B_FALSE;
+
+ zvol_remove_minors_impl(&task);
+
+ return (task.zt_error);
}
/*
@@ -2067,6 +2064,7 @@ zvol_remove_minors(spa_t *spa, const char *name, boolean_t async)
task = kmem_zalloc(sizeof (zvol_task_t), KM_SLEEP);
task->zt_op = ZVOL_ASYNC_REMOVE_MINORS;
strlcpy(task->zt_name1, name, sizeof (task->zt_name1));
+ task->zt_value = B_TRUE;
id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
if ((async == B_FALSE) && (id != TASKQID_INVALID))
taskq_wait_id(spa->spa_zvol_taskq, id);
@@ -2188,14 +2186,6 @@ zvol_fini_impl(void)
zvol_remove_minors_impl(NULL);
- /*
- * The call to "zvol_remove_minors_impl" may dispatch entries to
- * the system_taskq, but it doesn't wait for those entries to
- * complete before it returns. Thus, we must wait for all of the
- * removals to finish, before we can continue.
- */
- taskq_wait_outstanding(system_taskq, 0);
-
kmem_free(zvol_htable, ZVOL_HT_SIZE * sizeof (struct hlist_head));
list_destroy(&zvol_state_list);
rw_destroy(&zvol_state_lock);
diff --git a/sys/contrib/openzfs/module/zstd/zfs_zstd.c b/sys/contrib/openzfs/module/zstd/zfs_zstd.c
index 391216d6e263..3db196953f74 100644
--- a/sys/contrib/openzfs/module/zstd/zfs_zstd.c
+++ b/sys/contrib/openzfs/module/zstd/zfs_zstd.c
@@ -876,9 +876,9 @@ static void __init
zstd_mempool_init(void)
{
zstd_mempool_cctx =
- kmem_zalloc(ZSTD_POOL_MAX * sizeof (struct zstd_pool), KM_SLEEP);
+ vmem_zalloc(ZSTD_POOL_MAX * sizeof (struct zstd_pool), KM_SLEEP);
zstd_mempool_dctx =
- kmem_zalloc(ZSTD_POOL_MAX * sizeof (struct zstd_pool), KM_SLEEP);
+ vmem_zalloc(ZSTD_POOL_MAX * sizeof (struct zstd_pool), KM_SLEEP);
for (int i = 0; i < ZSTD_POOL_MAX; i++) {
mutex_init(&zstd_mempool_cctx[i].barrier, NULL,
@@ -924,8 +924,8 @@ zstd_mempool_deinit(void)
release_pool(&zstd_mempool_dctx[i]);
}
- kmem_free(zstd_mempool_dctx, ZSTD_POOL_MAX * sizeof (struct zstd_pool));
- kmem_free(zstd_mempool_cctx, ZSTD_POOL_MAX * sizeof (struct zstd_pool));
+ vmem_free(zstd_mempool_dctx, ZSTD_POOL_MAX * sizeof (struct zstd_pool));
+ vmem_free(zstd_mempool_cctx, ZSTD_POOL_MAX * sizeof (struct zstd_pool));
zstd_mempool_dctx = NULL;
zstd_mempool_cctx = NULL;
}