aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/contrib/openzfs/.github/CONTRIBUTING.md68
-rw-r--r--sys/contrib/openzfs/.github/ISSUE_TEMPLATE/bug_report.md53
-rw-r--r--sys/contrib/openzfs/.github/ISSUE_TEMPLATE/config.yml14
-rw-r--r--sys/contrib/openzfs/.github/ISSUE_TEMPLATE/feature_request.md33
-rw-r--r--sys/contrib/openzfs/.github/codecov.yml25
-rw-r--r--sys/contrib/openzfs/.github/no-response.yml13
-rw-r--r--sys/contrib/openzfs/.github/stale.yml26
-rw-r--r--sys/contrib/openzfs/.github/workflows/checkstyle.yaml36
-rw-r--r--sys/contrib/openzfs/.github/workflows/zfs-tests.yml58
-rw-r--r--sys/contrib/openzfs/.github/workflows/zloop.yml67
-rw-r--r--sys/contrib/openzfs/META4
-rw-r--r--sys/contrib/openzfs/Makefile.am10
-rw-r--r--sys/contrib/openzfs/cmd/Makefile.am1
-rwxr-xr-xsys/contrib/openzfs/cmd/arc_summary/arc_summary2108
-rwxr-xr-xsys/contrib/openzfs/cmd/arc_summary/arc_summary377
-rwxr-xr-xsys/contrib/openzfs/cmd/arcstat/arcstat.in76
-rwxr-xr-xsys/contrib/openzfs/cmd/dbufstat/dbufstat.in4
-rw-r--r--sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c91
-rw-r--r--sys/contrib/openzfs/cmd/raidz_test/raidz_bench.c25
-rw-r--r--sys/contrib/openzfs/cmd/raidz_test/raidz_test.c330
-rw-r--r--sys/contrib/openzfs/cmd/raidz_test/raidz_test.h9
-rw-r--r--sys/contrib/openzfs/cmd/zdb/zdb.c33
-rw-r--r--sys/contrib/openzfs/cmd/zed/agents/zfs_agents.c2
-rw-r--r--sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c10
-rw-r--r--sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c11
-rwxr-xr-xsys/contrib/openzfs/cmd/zed/zed.d/all-syslog.sh44
-rwxr-xr-xsys/contrib/openzfs/cmd/zed/zed.d/history_event-zfs-list-cacher.sh.in2
-rw-r--r--sys/contrib/openzfs/cmd/zed/zed.d/zed.rc7
-rw-r--r--sys/contrib/openzfs/cmd/zfs/zfs_main.c166
-rw-r--r--sys/contrib/openzfs/cmd/zfs_ids_to_path/zfs_ids_to_path.c2
-rw-r--r--sys/contrib/openzfs/cmd/zgenhostid/zgenhostid.c10
-rw-r--r--sys/contrib/openzfs/cmd/zhack/zhack.c1
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_iter.c16
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_main.c148
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_util.h4
-rw-r--r--sys/contrib/openzfs/cmd/zpool/zpool_vdev.c393
-rw-r--r--sys/contrib/openzfs/cmd/zpool_influxdb/.gitignore1
-rw-r--r--sys/contrib/openzfs/cmd/zpool_influxdb/Makefile.am11
-rw-r--r--sys/contrib/openzfs/cmd/zpool_influxdb/README.md294
-rw-r--r--sys/contrib/openzfs/cmd/zpool_influxdb/dashboards/README.md3
-rw-r--r--sys/contrib/openzfs/cmd/zpool_influxdb/dashboards/grafana/ZFS-pool-latency-heatmaps-influxdb.json1667
-rw-r--r--sys/contrib/openzfs/cmd/zpool_influxdb/telegraf.d/README.md7
-rw-r--r--sys/contrib/openzfs/cmd/zpool_influxdb/telegraf.d/exec_zpool_influxdb.conf15
-rw-r--r--sys/contrib/openzfs/cmd/zpool_influxdb/telegraf.d/execd_zpool_influxdb.conf23
-rw-r--r--sys/contrib/openzfs/cmd/zpool_influxdb/zpool_influxdb.c843
-rw-r--r--sys/contrib/openzfs/cmd/zstream/zstream_redup.c2
-rw-r--r--sys/contrib/openzfs/cmd/ztest/ztest.c293
-rw-r--r--sys/contrib/openzfs/config/Abigail.am29
-rw-r--r--sys/contrib/openzfs/config/always-python.m42
-rw-r--r--sys/contrib/openzfs/config/always-pyzfs.m42
-rw-r--r--sys/contrib/openzfs/config/always-sed.m42
-rw-r--r--sys/contrib/openzfs/config/deb.am14
-rw-r--r--sys/contrib/openzfs/config/kernel-acl.m44
-rw-r--r--sys/contrib/openzfs/config/kernel-bio.m44
-rw-r--r--sys/contrib/openzfs/config/kernel-blk-queue.m44
-rw-r--r--sys/contrib/openzfs/config/kernel-blkdev.m4137
-rw-r--r--sys/contrib/openzfs/config/kernel-config-defined.m42
-rw-r--r--sys/contrib/openzfs/config/kernel-fpu.m44
-rw-r--r--sys/contrib/openzfs/config/kernel-generic_io_acct.m447
-rw-r--r--sys/contrib/openzfs/config/kernel-get-disk-and-module.m424
-rw-r--r--sys/contrib/openzfs/config/kernel-hotplug.m426
-rw-r--r--sys/contrib/openzfs/config/kernel-make-request-fn.m473
-rw-r--r--sys/contrib/openzfs/config/kernel-objtool.m425
-rw-r--r--sys/contrib/openzfs/config/kernel-percpu.m426
-rw-r--r--sys/contrib/openzfs/config/kernel-revalidate-disk-size.m446
-rw-r--r--sys/contrib/openzfs/config/kernel-rwsem.m428
-rw-r--r--sys/contrib/openzfs/config/kernel-vfs-iov_iter.m4206
-rw-r--r--sys/contrib/openzfs/config/kernel.m450
-rw-r--r--sys/contrib/openzfs/config/mount-helper.m42
-rw-r--r--sys/contrib/openzfs/config/user-dracut.m42
-rw-r--r--sys/contrib/openzfs/config/user-libexec.m42
-rw-r--r--sys/contrib/openzfs/config/user-makedev.m414
-rw-r--r--sys/contrib/openzfs/config/user-systemd.m410
-rw-r--r--sys/contrib/openzfs/config/user-sysvinit.m42
-rw-r--r--sys/contrib/openzfs/config/user-udev.m44
-rw-r--r--sys/contrib/openzfs/config/zfs-build.m42
-rw-r--r--sys/contrib/openzfs/configure.ac9
-rwxr-xr-xsys/contrib/openzfs/contrib/dracut/90zfs/export-zfs.sh.in2
-rwxr-xr-xsys/contrib/openzfs/contrib/dracut/90zfs/module-setup.sh.in8
-rwxr-xr-xsys/contrib/openzfs/contrib/dracut/90zfs/mount-zfs.sh.in4
-rwxr-xr-xsys/contrib/openzfs/contrib/dracut/90zfs/parse-zfs.sh.in2
-rwxr-xr-xsys/contrib/openzfs/contrib/dracut/90zfs/zfs-generator.sh.in23
-rwxr-xr-xsys/contrib/openzfs/contrib/dracut/90zfs/zfs-lib.sh.in4
-rwxr-xr-xsys/contrib/openzfs/contrib/dracut/90zfs/zfs-load-key.sh.in14
-rwxr-xr-xsys/contrib/openzfs/contrib/dracut/90zfs/zfs-needshutdown.sh.in4
-rw-r--r--sys/contrib/openzfs/contrib/initramfs/hooks/zfsunlock.in2
-rw-r--r--sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c54
-rw-r--r--sys/contrib/openzfs/etc/systemd/system/zfs-share.service.in1
-rw-r--r--sys/contrib/openzfs/etc/systemd/system/zfs-volume-wait.service.in1
-rw-r--r--sys/contrib/openzfs/etc/systemd/system/zfs-zed.service.in1
-rw-r--r--sys/contrib/openzfs/include/libzfs.h7
-rw-r--r--sys/contrib/openzfs/include/os/freebsd/spl/sys/misc.h8
-rw-r--r--sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h12
-rw-r--r--sys/contrib/openzfs/include/os/freebsd/spl/sys/policy.h3
-rw-r--r--sys/contrib/openzfs/include/os/freebsd/spl/sys/sysmacros.h1
-rw-r--r--sys/contrib/openzfs/include/os/freebsd/spl/sys/types.h2
-rw-r--r--sys/contrib/openzfs/include/os/freebsd/spl/sys/uio.h23
-rw-r--r--sys/contrib/openzfs/include/os/freebsd/zfs/sys/Makefile.am2
-rw-r--r--sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_context_os.h1
-rw-r--r--sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_vnops.h5
-rw-r--r--sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_znode_impl.h7
-rw-r--r--sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h117
-rw-r--r--sys/contrib/openzfs/include/os/linux/kernel/linux/mod_compat.h14
-rw-r--r--sys/contrib/openzfs/include/os/linux/spl/sys/sysmacros.h1
-rw-r--r--sys/contrib/openzfs/include/os/linux/spl/sys/taskq.h5
-rw-r--r--sys/contrib/openzfs/include/os/linux/spl/sys/uio.h115
-rw-r--r--sys/contrib/openzfs/include/os/linux/spl/sys/vnode.h6
-rw-r--r--sys/contrib/openzfs/include/os/linux/zfs/sys/Makefile.am2
-rw-r--r--sys/contrib/openzfs/include/os/linux/zfs/sys/policy.h4
-rw-r--r--sys/contrib/openzfs/include/os/linux/zfs/sys/trace_acl.h6
-rw-r--r--sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_vnops.h13
-rw-r--r--sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_znode_impl.h13
-rw-r--r--sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h9
-rw-r--r--sys/contrib/openzfs/include/sys/Makefile.am2
-rw-r--r--sys/contrib/openzfs/include/sys/arc.h6
-rw-r--r--sys/contrib/openzfs/include/sys/arc_impl.h20
-rw-r--r--sys/contrib/openzfs/include/sys/dbuf.h35
-rw-r--r--sys/contrib/openzfs/include/sys/dmu.h12
-rw-r--r--sys/contrib/openzfs/include/sys/dmu_impl.h7
-rw-r--r--sys/contrib/openzfs/include/sys/dmu_objset.h4
-rw-r--r--sys/contrib/openzfs/include/sys/dmu_zfetch.h16
-rw-r--r--sys/contrib/openzfs/include/sys/dsl_dataset.h1
-rw-r--r--sys/contrib/openzfs/include/sys/dsl_scan.h1
-rw-r--r--sys/contrib/openzfs/include/sys/frame.h4
-rw-r--r--sys/contrib/openzfs/include/sys/fs/zfs.h17
-rw-r--r--sys/contrib/openzfs/include/sys/metaslab.h1
-rw-r--r--sys/contrib/openzfs/include/sys/metaslab_impl.h52
-rw-r--r--sys/contrib/openzfs/include/sys/spa_impl.h5
-rw-r--r--sys/contrib/openzfs/include/sys/txg.h1
-rw-r--r--sys/contrib/openzfs/include/sys/vdev.h16
-rw-r--r--sys/contrib/openzfs/include/sys/vdev_draid.h110
-rw-r--r--sys/contrib/openzfs/include/sys/vdev_impl.h55
-rw-r--r--sys/contrib/openzfs/include/sys/vdev_raidz.h17
-rw-r--r--sys/contrib/openzfs/include/sys/vdev_raidz_impl.h61
-rw-r--r--sys/contrib/openzfs/include/sys/vdev_rebuild.h4
-rw-r--r--sys/contrib/openzfs/include/sys/zfs_context.h1
-rw-r--r--sys/contrib/openzfs/include/sys/zfs_vnops.h55
-rw-r--r--sys/contrib/openzfs/include/sys/zfs_znode.h1
-rw-r--r--sys/contrib/openzfs/include/sys/zio.h1
-rw-r--r--sys/contrib/openzfs/include/sys/zvol_impl.h2
-rw-r--r--sys/contrib/openzfs/include/zfeature_common.h1
-rw-r--r--sys/contrib/openzfs/lib/Makefile.am19
-rw-r--r--sys/contrib/openzfs/lib/libnvpair/Makefile.am10
-rw-r--r--sys/contrib/openzfs/lib/libnvpair/libnvpair.abi2805
-rw-r--r--sys/contrib/openzfs/lib/libnvpair/libnvpair.suppr2
-rw-r--r--sys/contrib/openzfs/lib/libspl/include/sys/uio.h41
-rw-r--r--sys/contrib/openzfs/lib/libuutil/Makefile.am10
-rw-r--r--sys/contrib/openzfs/lib/libuutil/libuutil.abi1608
-rw-r--r--sys/contrib/openzfs/lib/libuutil/libuutil.suppr2
-rw-r--r--sys/contrib/openzfs/lib/libzfs/Makefile.am14
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs.abi4879
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs.suppr13
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs_dataset.c93
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs_import.c1
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs_pool.c117
-rw-r--r--sys/contrib/openzfs/lib/libzfs/libzfs_util.c10
-rw-r--r--sys/contrib/openzfs/lib/libzfs_core/Makefile.am11
-rw-r--r--sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi2820
-rw-r--r--sys/contrib/openzfs/lib/libzfs_core/libzfs_core.suppr5
-rw-r--r--sys/contrib/openzfs/lib/libzfsbootenv/Makefile.am6
-rw-r--r--sys/contrib/openzfs/lib/libzfsbootenv/libzfsbootenv.abi212
-rw-r--r--sys/contrib/openzfs/lib/libzfsbootenv/libzfsbootenv.pc.in2
-rw-r--r--sys/contrib/openzfs/lib/libzfsbootenv/libzfsbootenv.suppr2
-rw-r--r--sys/contrib/openzfs/lib/libzpool/Makefile.am10
-rw-r--r--sys/contrib/openzfs/lib/libzutil/zutil_import.c7
-rw-r--r--sys/contrib/openzfs/man/man1/arcstat.1112
-rw-r--r--sys/contrib/openzfs/man/man1/raidz_test.19
-rw-r--r--sys/contrib/openzfs/man/man1/ztest.123
-rw-r--r--sys/contrib/openzfs/man/man5/zfs-module-parameters.5136
-rw-r--r--sys/contrib/openzfs/man/man5/zpool-features.534
-rw-r--r--sys/contrib/openzfs/man/man8/Makefile.am3
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-allow.836
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-bookmark.84
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-clone.86
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-create.820
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-destroy.814
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-diff.88
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-hold.814
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-jail.810
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-list.86
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-load-key.818
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-mount.814
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-program.85
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-project.818
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-promote.86
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-receive.818
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-rename.818
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-rollback.86
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-send.826
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-set.814
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-share.810
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-snapshot.86
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-upgrade.814
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-userspace.814
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-wait.86
-rw-r--r--sys/contrib/openzfs/man/man8/zgenhostid.83
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-add.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-attach.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-checkpoint.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-clear.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-create.88
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-destroy.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-detach.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-events.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-export.88
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-get.810
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-history.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-import.814
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-initialize.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-iostat.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-labelclear.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-list.88
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-offline.810
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-reguid.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-remove.810
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-reopen.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-replace.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-resilver.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-scrub.88
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-split.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-status.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-sync.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-trim.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-upgrade.814
-rw-r--r--sys/contrib/openzfs/man/man8/zpool-wait.86
-rw-r--r--sys/contrib/openzfs/man/man8/zpool_influxdb.893
-rw-r--r--sys/contrib/openzfs/man/man8/zpoolconcepts.878
-rw-r--r--sys/contrib/openzfs/man/man8/zpoolprops.815
-rw-r--r--sys/contrib/openzfs/module/Makefile.bsd39
-rw-r--r--sys/contrib/openzfs/module/icp/algs/modes/gcm.c54
-rw-r--r--sys/contrib/openzfs/module/icp/algs/modes/modes.c8
-rw-r--r--sys/contrib/openzfs/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S24
-rw-r--r--sys/contrib/openzfs/module/icp/core/kcf_sched.c4
-rw-r--r--sys/contrib/openzfs/module/icp/include/modes/modes.h8
-rw-r--r--sys/contrib/openzfs/module/icp/io/aes.c18
-rw-r--r--sys/contrib/openzfs/module/lua/lapi.c2
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/spl/spl_policy.c5
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c43
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/arc_os.c10
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/sysctl_os.c12
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/vdev_file.c70
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c36
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_file_os.c6
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_onexit_os.c70
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c3
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops.c877
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_znode.c18
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c15
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c248
-rw-r--r--sys/contrib/openzfs/module/os/linux/spl/spl-taskq.c132
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/Makefile.in3
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/abd_os.c2
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/arc_os.c88
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/policy.c5
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c31
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/vdev_file.c18
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_ctldir.c1
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c4
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops.c1091
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c5
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c15
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_ctldir.c25
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c354
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_inode.c10
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_super.c23
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c24
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c177
-rw-r--r--sys/contrib/openzfs/module/zcommon/Makefile.in1
-rw-r--r--sys/contrib/openzfs/module/zcommon/zfeature_common.c6
-rw-r--r--sys/contrib/openzfs/module/zcommon/zfs_fletcher.c69
-rw-r--r--sys/contrib/openzfs/module/zcommon/zfs_namecheck.c4
-rw-r--r--sys/contrib/openzfs/module/zcommon/zfs_prop.c6
-rw-r--r--sys/contrib/openzfs/module/zcommon/zfs_uio.c173
-rw-r--r--sys/contrib/openzfs/module/zfs/Makefile.in3
-rw-r--r--sys/contrib/openzfs/module/zfs/abd.c14
-rw-r--r--sys/contrib/openzfs/module/zfs/aggsum.c9
-rw-r--r--sys/contrib/openzfs/module/zfs/arc.c306
-rw-r--r--sys/contrib/openzfs/module/zfs/dbuf.c371
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu.c226
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_object.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_objset.c206
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_recv.c294
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_redact.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_send.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_tx.c3
-rw-r--r--sys/contrib/openzfs/module/zfs/dmu_zfetch.c122
-rw-r--r--sys/contrib/openzfs/module/zfs/dnode.c13
-rw-r--r--sys/contrib/openzfs/module/zfs/dnode_sync.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_bookmark.c114
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_crypt.c11
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_dataset.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_pool.c49
-rw-r--r--sys/contrib/openzfs/module/zfs/dsl_scan.c13
-rw-r--r--sys/contrib/openzfs/module/zfs/metaslab.c238
-rw-r--r--sys/contrib/openzfs/module/zfs/mmp.c11
-rw-r--r--sys/contrib/openzfs/module/zfs/multilist.c9
-rw-r--r--sys/contrib/openzfs/module/zfs/spa.c158
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_history.c2
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_misc.c20
-rw-r--r--sys/contrib/openzfs/module/zfs/txg.c9
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev.c387
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_draid.c2984
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_draid_rand.c40
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_indirect.c39
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_initialize.c141
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_label.c62
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_mirror.c137
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_missing.c18
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_queue.c134
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_raidz.c1864
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_raidz_math.c14
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_raidz_math_impl.h313
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_rebuild.c231
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_removal.c100
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_root.c9
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev_trim.c153
-rw-r--r--sys/contrib/openzfs/module/zfs/zcp.c7
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_fm.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_ioctl.c77
-rw-r--r--sys/contrib/openzfs/module/zfs/zfs_vnops.c895
-rw-r--r--sys/contrib/openzfs/module/zfs/zio.c72
-rw-r--r--sys/contrib/openzfs/module/zfs/zio_inject.c6
-rw-r--r--sys/contrib/openzfs/module/zfs/zvol.c22
-rw-r--r--sys/contrib/openzfs/module/zstd/zfs_zstd.c52
-rw-r--r--sys/contrib/openzfs/rpm/generic/zfs-kmod.spec.in4
-rw-r--r--sys/contrib/openzfs/rpm/generic/zfs.spec.in77
-rw-r--r--sys/contrib/openzfs/scripts/Makefile.am3
-rwxr-xr-xsys/contrib/openzfs/scripts/commitcheck.sh113
-rw-r--r--sys/contrib/openzfs/scripts/common.sh.in1
-rwxr-xr-xsys/contrib/openzfs/scripts/dkms.mkconf1
-rwxr-xr-xsys/contrib/openzfs/scripts/zfs-tests.sh12
-rwxr-xr-xsys/contrib/openzfs/scripts/zfs.sh3
-rwxr-xr-xsys/contrib/openzfs/scripts/zloop.sh54
-rw-r--r--sys/contrib/openzfs/tests/runfiles/Makefile.am1
-rw-r--r--sys/contrib/openzfs/tests/runfiles/common.run54
-rw-r--r--sys/contrib/openzfs/tests/runfiles/linux.run3
-rw-r--r--sys/contrib/openzfs/tests/runfiles/sanity.run618
-rwxr-xr-xsys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in3
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/cmd/Makefile.am1
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/cmd/btree_test/btree_test.c2
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/cmd/draid/.gitignore1
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/cmd/draid/Makefile.am15
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/cmd/draid/draid.c1414
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/cmd/libzfs_input_check/libzfs_input_check.c2
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/include/blkdev.shlib89
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/include/commands.cfg5
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib41
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/include/tunables.cfg3
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/Makefile.am3
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/alloc_class/alloc_class_012_pos.ksh5
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/Makefile.am3
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/filetest_001_pos.ksh5
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/checksum/filetest_002_pos.ksh91
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_block_size_histogram.ksh8
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies_002_pos.ksh2
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_create/Makefile.am1
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_001_pos.ksh6
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_create/zfs_create_nomount.ksh51
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_mount/Makefile.am2
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount.kshlib3
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_013_pos.ksh78
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_014_neg.ksh68
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_all_mountpoints.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_share/zfs_share_concurrent_shares.ksh12
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_009_pos.ksh17
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am3
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_001_pos.ksh20
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_003_pos.ksh4
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_dryrun_output.ksh175
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/Makefile.am8
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_001_pos.ksh4
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_005_pos.ksh8
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_006_pos.ksh22
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_007_neg.ksh9
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_009_neg.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_010_neg.ksh7
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_011_neg.ksh14
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_draid_001_pos.ksh75
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_draid_002_pos.ksh82
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_draid_003_pos.ksh112
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_draid_004_pos.ksh43
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_dryrun_output.ksh138
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_001_pos.ksh12
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_002_pos.ksh27
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_003_neg.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_004_pos.ksh2
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg1
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/Makefile.am2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_cachefile_device_added.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh6
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_cachefile_shared_device.ksh1
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_paths_changed.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh1
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_device_replaced.ksh5
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/setup.ksh2
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.cfg3
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.kshlib16
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_007_pos.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_008_pos.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_010_pos.ksh4
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_016_pos.ksh91
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_017_pos.ksh92
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_missing_001_pos.ksh21
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_missing_002_pos.ksh23
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_missing_003_pos.ksh5
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_initialize/zpool_initialize_verify_initialized.ksh49
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_split/Makefile.am3
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_split/zpool_split_dryrun_output.ksh152
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim.kshlib17
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_online_offline.ksh8
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_start_and_cancel_neg.ksh6
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_replace_cancel.ksh1
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/events/events_001_pos.ksh26
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/events/events_002_pos.ksh2
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/events/events_common.kshlib69
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_offline_001_pos.ksh10
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_spare_001_pos.ksh47
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_spare_002_pos.ksh7
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_spare_ashift.ksh4
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_spare_multiple.ksh102
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/fault/auto_spare_shared.ksh4
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/l2arc_arcstats_pos.ksh107
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/l2arc_l2miss_pos.ksh94
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/l2arc/l2arc_mfuonly_pos.ksh94
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/persist_l2arc/Makefile.am7
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/persist_l2arc/cleanup.ksh6
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc.cfg5
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_001_pos.ksh21
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_002_pos.ksh19
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_003_neg.ksh6
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_004_pos.ksh19
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_005_pos.ksh19
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_006_pos.ksh21
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_007_pos.ksh26
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/persist_l2arc/persist_l2arc_008_pos.ksh48
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/persist_l2arc/setup.ksh3
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/raidz/Makefile.am4
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/raidz/raidz_003_pos.ksh41
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/raidz/raidz_004_pos.ksh41
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/Makefile.am15
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib49
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy_001_pos.ksh4
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy_002_pos.ksh4
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy_draid1.ksh78
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy_draid2.ksh85
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy_draid3.ksh85
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_spare1.ksh107
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_spare2.ksh80
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_spare3.ksh197
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy_raidz3.ksh84
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/removal/Makefile.am3
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/removal/remove_attach_mirror.ksh73
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/replacement/attach_rebuild.ksh4
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/replacement/attach_resilver.ksh4
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/replacement/detach.ksh4
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/replacement/rebuild_raidz.ksh7
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/replacement/replace_rebuild.ksh4
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/replacement/replace_resilver.ksh4
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/trim/autotrim_config.ksh10
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/trim/autotrim_integrity.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/trim/autotrim_trim_integrity.ksh2
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/trim/trim_config.ksh10
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/trim/trim_integrity.ksh2
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/userquota/Makefile.am3
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/userquota/userspace_encrypted.ksh85
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/userquota/userspace_send_encrypted.ksh108
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/xattr/xattr_004_pos.ksh34
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/zpool_influxdb/Makefile.am5
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/zpool_influxdb/cleanup.ksh29
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/zpool_influxdb/setup.ksh29
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/functional/zpool_influxdb/zpool_influxdb.ksh71
471 files changed, 34096 insertions, 6691 deletions
diff --git a/sys/contrib/openzfs/.github/CONTRIBUTING.md b/sys/contrib/openzfs/.github/CONTRIBUTING.md
index 9bc2e7ef0684..f28a747e82c4 100644
--- a/sys/contrib/openzfs/.github/CONTRIBUTING.md
+++ b/sys/contrib/openzfs/.github/CONTRIBUTING.md
@@ -126,8 +126,8 @@ feature needed? What problem does it solve?
#### General
-* All pull requests must be based on the current master branch and apply
-without conflicts.
+* All pull requests, except backports and releases, must be based on the current master branch
+and should apply without conflicts.
* Please attempt to limit pull requests to a single commit which resolves
one specific issue.
* Make sure your commit messages are in the correct format. See the
@@ -230,70 +230,6 @@ attempting to solve.
Signed-off-by: Contributor <contributor@email.com>
```
-#### OpenZFS Patch Ports
-If you are porting OpenZFS patches, the commit message must meet
-the following guidelines:
-* The first line must be the summary line from the most important OpenZFS commit being ported.
-It must begin with `OpenZFS dddd, dddd - ` where `dddd` are OpenZFS issue numbers.
-* Provides a `Authored by:` line to attribute each patch for each original author.
-* Provides the `Reviewed by:` and `Approved by:` lines from each original
-OpenZFS commit.
-* Provides a `Ported-by:` line with the developer's name followed by
-their email for each OpenZFS commit.
-* Provides a `OpenZFS-issue:` line with link for each original illumos
-issue.
-* Provides a `OpenZFS-commit:` line with link for each original OpenZFS commit.
-* If necessary, provide some porting notes to describe any deviations from
-the original OpenZFS commits.
-
-An example OpenZFS patch port commit message for a single patch is provided
-below.
-```
-OpenZFS 1234 - Summary from the original OpenZFS commit
-
-Authored by: Original Author <original@email.com>
-Reviewed by: Reviewer One <reviewer1@email.com>
-Reviewed by: Reviewer Two <reviewer2@email.com>
-Approved by: Approver One <approver1@email.com>
-Ported-by: ZFS Contributor <contributor@email.com>
-
-Provide some porting notes here if necessary.
-
-OpenZFS-issue: https://www.illumos.org/issues/1234
-OpenZFS-commit: https://github.com/openzfs/openzfs/commit/abcd1234
-```
-
-If necessary, multiple OpenZFS patches can be combined in a single port.
-This is useful when you are porting a new patch and its subsequent bug
-fixes. An example commit message is provided below.
-```
-OpenZFS 1234, 5678 - Summary of most important OpenZFS commit
-
-1234 Summary from original OpenZFS commit for 1234
-
-Authored by: Original Author <original@email.com>
-Reviewed by: Reviewer Two <reviewer2@email.com>
-Approved by: Approver One <approver1@email.com>
-Ported-by: ZFS Contributor <contributor@email.com>
-
-Provide some porting notes here for 1234 if necessary.
-
-OpenZFS-issue: https://www.illumos.org/issues/1234
-OpenZFS-commit: https://github.com/openzfs/openzfs/commit/abcd1234
-
-5678 Summary from original OpenZFS commit for 5678
-
-Authored by: Original Author2 <original2@email.com>
-Reviewed by: Reviewer One <reviewer1@email.com>
-Approved by: Approver Two <approver2@email.com>
-Ported-by: ZFS Contributor <contributor@email.com>
-
-Provide some porting notes here for 5678 if necessary.
-
-OpenZFS-issue: https://www.illumos.org/issues/5678
-OpenZFS-commit: https://github.com/openzfs/openzfs/commit/efgh5678
-```
-
#### Coverity Defect Fixes
If you are submitting a fix to a
[Coverity defect](https://scan.coverity.com/projects/zfsonlinux-zfs),
diff --git a/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/bug_report.md b/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 000000000000..1dbb5f6edb55
--- /dev/null
+++ b/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,53 @@
+---
+name: Bug report
+about: Create a report to help us improve OpenZFS
+title: ''
+labels: 'Type: Defect, Status: Triage Needed'
+assignees: ''
+
+---
+
+<!-- Please fill out the following template, which will help other contributors address your issue. -->
+
+<!--
+Thank you for reporting an issue.
+
+*IMPORTANT* - Please check our issue tracker before opening a new issue.
+Additional valuable information can be found in the OpenZFS documentation
+and mailing list archives.
+
+Please fill in as much of the template as possible.
+-->
+
+### System information
+<!-- add version after "|" character -->
+Type | Version/Name
+ --- | ---
+Distribution Name |
+Distribution Version |
+Linux Kernel |
+Architecture |
+ZFS Version |
+SPL Version |
+<!--
+Commands to find ZFS/SPL versions:
+modinfo zfs | grep -iw version
+modinfo spl | grep -iw version
+-->
+
+### Describe the problem you're observing
+
+### Describe how to reproduce the problem
+
+### Include any warning/errors/backtraces from the system logs
+<!--
+*IMPORTANT* - Please mark logs and text output from terminal commands
+or else Github will not display them correctly.
+An example is provided below.
+
+Example:
+```
+this is an example how log text should be marked (wrap it with ```)
+```
+-->
+
diff --git a/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/config.yml b/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 000000000000..dd8f0557a30c
--- /dev/null
+++ b/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,14 @@
+blank_issues_enabled: false
+contact_links:
+ - name: OpenZFS Questions
+ url: https://github.com/openzfs/zfs/discussions/new
+ about: Ask the community for help
+ - name: OpenZFS Community Support Mailing list (Linux)
+ url: https://zfsonlinux.topicbox.com/groups/zfs-discuss
+ about: Get community support for OpenZFS on Linux
+ - name: FreeBSD Community Support Mailing list
+ url: https://lists.freebsd.org/mailman/listinfo/freebsd-fs
+ about: Get community support for OpenZFS on FreeBSD
+ - name: OpenZFS on IRC
+ url: https://webchat.freenode.net/#openzfs
+ about: Use IRC to get community support for OpenZFS
diff --git a/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/feature_request.md b/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 000000000000..9b50a4a3d96e
--- /dev/null
+++ b/sys/contrib/openzfs/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,33 @@
+---
+name: Feature request
+about: Suggest a feature for OpenZFS
+title: ''
+labels: 'Type: Feature'
+assignees: ''
+
+---
+
+<!--
+Thank you for suggesting a feature.
+
+Please check our issue tracker before opening a new feature request.
+Filling out the following template will help other contributors better understand your proposed feature.
+-->
+
+### Describe the feature would like to see added to OpenZFS
+
+<!--
+Provide a clear and concise description of the feature.
+-->
+
+### How will this feature improve OpenZFS?
+
+<!--
+What problem does this feature solve?
+-->
+
+### Additional context
+
+<!--
+Any additional information you can add about the proposal?
+-->
diff --git a/sys/contrib/openzfs/.github/codecov.yml b/sys/contrib/openzfs/.github/codecov.yml
new file mode 100644
index 000000000000..6d4932680e5c
--- /dev/null
+++ b/sys/contrib/openzfs/.github/codecov.yml
@@ -0,0 +1,25 @@
+codecov:
+ notify:
+ require_ci_to_pass: false # always post
+ after_n_builds: 2 # user and kernel
+
+coverage:
+ precision: 0 # 0 decimals of precision
+ round: nearest # Round to nearest precision point
+ range: "50...90" # red -> yellow -> green
+
+ status:
+ project:
+ default:
+ threshold: 1% # allow 1% coverage variance
+
+ patch:
+ default:
+ threshold: 1% # allow 1% coverage variance
+
+comment:
+ layout: "reach, diff, flags, footer"
+ behavior: once # update if exists; post new; skip if deleted
+ require_changes: yes # only post when coverage changes
+
+# ignore: Please place any ignores in config/ax_code_coverage.m4 instead
diff --git a/sys/contrib/openzfs/.github/no-response.yml b/sys/contrib/openzfs/.github/no-response.yml
new file mode 100644
index 000000000000..ef2656ec96ef
--- /dev/null
+++ b/sys/contrib/openzfs/.github/no-response.yml
@@ -0,0 +1,13 @@
+# Configuration for probot-no-response - https://github.com/probot/no-response
+
+# Number of days of inactivity before an Issue is closed for lack of response
+daysUntilClose: 31
+# Label requiring a response
+responseRequiredLabel: "Status: Feedback requested"
+# Comment to post when closing an Issue for lack of response. Set to `false` to disable
+closeComment: >
+ This issue has been automatically closed because there has been no response
+ to our request for more information from the original author. With only the
+ information that is currently in the issue, we don't have enough information
+ to take action. Please reach out if you have or find the answers we need so
+ that we can investigate further.
diff --git a/sys/contrib/openzfs/.github/stale.yml b/sys/contrib/openzfs/.github/stale.yml
new file mode 100644
index 000000000000..895cc8e803b2
--- /dev/null
+++ b/sys/contrib/openzfs/.github/stale.yml
@@ -0,0 +1,26 @@
+# Number of days of inactivity before an issue becomes stale
+daysUntilStale: 365
+# Number of days of inactivity before a stale issue is closed
+daysUntilClose: 90
+# Limit to only `issues` or `pulls`
+only: issues
+# Issues with these labels will never be considered stale
+exemptLabels:
+ - "Type: Feature"
+ - "Bot: Not Stale"
+ - "Status: Work in Progress"
+# Set to true to ignore issues in a project (defaults to false)
+exemptProjects: true
+# Set to true to ignore issues in a milestone (defaults to false)
+exemptMilestones: true
+# Set to true to ignore issues with an assignee (defaults to false)
+exemptAssignees: true
+# Label to use when marking an issue as stale
+staleLabel: "Status: Stale"
+# Comment to post when marking an issue as stale. Set to `false` to disable
+markComment: >
+ This issue has been automatically marked as "stale" because it has not had
+ any activity for a while. It will be closed in 90 days if no further activity occurs.
+ Thank you for your contributions.
+# Limit the number of actions per hour, from 1-30. Default is 30
+limitPerRun: 6
diff --git a/sys/contrib/openzfs/.github/workflows/checkstyle.yaml b/sys/contrib/openzfs/.github/workflows/checkstyle.yaml
new file mode 100644
index 000000000000..1707f5bb21db
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/checkstyle.yaml
@@ -0,0 +1,36 @@
+name: checkstyle
+
+on:
+ push:
+ pull_request:
+
+jobs:
+ checkstyle:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+ - name: Install dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install --yes -qq build-essential autoconf libtool gawk alien fakeroot linux-headers-$(uname -r)
+ sudo apt-get install --yes -qq zlib1g-dev uuid-dev libattr1-dev libblkid-dev libselinux-dev libudev-dev libssl-dev python-dev python-setuptools python-cffi python3 python3-dev python3-setuptools python3-cffi
+ # packages for tests
+ sudo apt-get install --yes -qq parted lsscsi ksh attr acl nfs-kernel-server fio
+ sudo apt-get install --yes -qq mandoc cppcheck pax-utils devscripts abigail-tools
+ sudo -E pip --quiet install flake8
+ - name: Prepare
+ run: |
+ sh ./autogen.sh
+ ./configure
+ make -j$(nproc)
+ - name: Checkstyle
+ run: |
+ make checkstyle
+ - name: Lint
+ run: |
+ make lint
+ - name: CheckABI
+ run: |
+ make checkabi
diff --git a/sys/contrib/openzfs/.github/workflows/zfs-tests.yml b/sys/contrib/openzfs/.github/workflows/zfs-tests.yml
new file mode 100644
index 000000000000..b075a78c7729
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/zfs-tests.yml
@@ -0,0 +1,58 @@
+name: zfs-tests-sanity
+
+on:
+ push:
+ pull_request:
+
+jobs:
+ tests:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+ - name: Install dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install --yes -qq build-essential autoconf libtool gdb lcov \
+ git alien fakeroot wget curl bc fio acl \
+ sysstat mdadm lsscsi parted gdebi attr dbench watchdog ksh \
+ nfs-kernel-server samba rng-tools xz-utils \
+ zlib1g-dev uuid-dev libblkid-dev libselinux-dev \
+ xfslibs-dev libattr1-dev libacl1-dev libudev-dev libdevmapper-dev \
+ libssl-dev libffi-dev libaio-dev libelf-dev libmount-dev \
+ libpam0g-dev pamtester python-dev python-setuptools python-cffi \
+ python3 python3-dev python3-setuptools python3-cffi
+ - name: Autogen.sh
+ run: |
+ sh autogen.sh
+ - name: Configure
+ run: |
+ ./configure --enable-debug --enable-debuginfo
+ - name: Make
+ run: |
+ make --no-print-directory -s pkg-utils pkg-kmod
+ - name: Install
+ run: |
+ sudo dpkg -i *.deb
+ # Update order of directories to search for modules, otherwise
+ # Ubuntu will load kernel-shipped ones.
+ sudo sed -i.bak 's/updates/extra updates/' /etc/depmod.d/ubuntu.conf
+ sudo depmod
+ sudo modprobe zfs
+ - name: Tests
+ run: |
+ /usr/share/zfs/zfs-tests.sh -v -s 3G -r sanity
+ - name: Prepare artifacts
+ if: failure()
+ run: |
+ RESULTS_PATH=$(readlink -f /var/tmp/test_results/current)
+ sudo dmesg > $RESULTS_PATH/dmesg
+ sudo cp /var/log/syslog $RESULTS_PATH/
+ sudo chmod +r $RESULTS_PATH/*
+ - uses: actions/upload-artifact@v2
+ if: failure()
+ with:
+ name: Test logs
+ path: /var/tmp/test_results/20*/
+ if-no-files-found: ignore
diff --git a/sys/contrib/openzfs/.github/workflows/zloop.yml b/sys/contrib/openzfs/.github/workflows/zloop.yml
new file mode 100644
index 000000000000..30785b14507a
--- /dev/null
+++ b/sys/contrib/openzfs/.github/workflows/zloop.yml
@@ -0,0 +1,67 @@
+name: zloop
+
+on:
+ push:
+ pull_request:
+
+jobs:
+ tests:
+ runs-on: ubuntu-latest
+ env:
+ TEST_DIR: /var/tmp/zloop
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+ - name: Install dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install --yes -qq build-essential autoconf libtool gdb \
+ git alien fakeroot \
+ zlib1g-dev uuid-dev libblkid-dev libselinux-dev \
+ xfslibs-dev libattr1-dev libacl1-dev libudev-dev libdevmapper-dev \
+ libssl-dev libffi-dev libaio-dev libelf-dev libmount-dev \
+ libpam0g-dev \
+ python-dev python-setuptools python-cffi \
+ python3 python3-dev python3-setuptools python3-cffi
+ - name: Autogen.sh
+ run: |
+ sh autogen.sh
+ - name: Configure
+ run: |
+ ./configure --enable-debug --enable-debuginfo
+ - name: Make
+ run: |
+ make --no-print-directory -s pkg-utils pkg-kmod
+ - name: Install
+ run: |
+ sudo dpkg -i *.deb
+ # Update order of directories to search for modules, otherwise
+ # Ubuntu will load kernel-shipped ones.
+ sudo sed -i.bak 's/updates/extra updates/' /etc/depmod.d/ubuntu.conf
+ sudo depmod
+ sudo modprobe zfs
+ - name: Tests
+ run: |
+ sudo mkdir -p $TEST_DIR
+ # run for 20 minutes to have a total runner time of 30 minutes
+ sudo /usr/share/zfs/zloop.sh -t 1200 -l -m1
+ - name: Prepare artifacts
+ if: failure()
+ run: |
+ sudo chmod +r -R $TEST_DIR/
+ - uses: actions/upload-artifact@v2
+ if: failure()
+ with:
+ name: Logs
+ path: |
+ /var/tmp/zloop/*/
+ !/var/tmp/zloop/*/vdev/
+ if-no-files-found: ignore
+ - uses: actions/upload-artifact@v2
+ if: failure()
+ with:
+ name: Pool files
+ path: |
+ /var/tmp/zloop/*/vdev/
+ if-no-files-found: ignore
diff --git a/sys/contrib/openzfs/META b/sys/contrib/openzfs/META
index 87ffae5f4c09..886da443357d 100644
--- a/sys/contrib/openzfs/META
+++ b/sys/contrib/openzfs/META
@@ -2,9 +2,9 @@ Meta: 1
Name: zfs
Branch: 1.0
Version: 2.0.0
-Release: rc3
+Release: rc1
Release-Tags: relext
License: CDDL
Author: OpenZFS
-Linux-Maximum: 5.9
+Linux-Maximum: 5.10
Linux-Minimum: 3.10
diff --git a/sys/contrib/openzfs/Makefile.am b/sys/contrib/openzfs/Makefile.am
index b409d2196f86..436b78d76282 100644
--- a/sys/contrib/openzfs/Makefile.am
+++ b/sys/contrib/openzfs/Makefile.am
@@ -136,6 +136,13 @@ shellcheck:
echo "skipping shellcheck because shellcheck is not installed"; \
fi
+PHONY += checkabi storeabi
+checkabi: lib
+ $(MAKE) -C lib checkabi
+
+storeabi: lib
+ $(MAKE) -C lib storeabi
+
PHONY += checkbashisms
checkbashisms:
@if type checkbashisms > /dev/null 2>&1; then \
@@ -152,9 +159,10 @@ checkbashisms:
-o -name 'smart' -prune \
-o -name 'paxcheck.sh' -prune \
-o -name 'make_gitrev.sh' -prune \
+ -o -name '90zfs' -prune \
-o -type f ! -name 'config*' \
! -name 'libtool' \
- -exec bash -c 'awk "NR==1 && /\#\!.*bin\/sh.*/ {print FILENAME;}" "{}"' \;); \
+ -exec sh -c 'awk "NR==1 && /\#\!.*bin\/sh.*/ {print FILENAME;}" "{}"' \;); \
else \
echo "skipping checkbashisms because checkbashisms is not installed"; \
fi
diff --git a/sys/contrib/openzfs/cmd/Makefile.am b/sys/contrib/openzfs/cmd/Makefile.am
index 88d32b1c538c..d99d1dc382cc 100644
--- a/sys/contrib/openzfs/cmd/Makefile.am
+++ b/sys/contrib/openzfs/cmd/Makefile.am
@@ -1,5 +1,6 @@
SUBDIRS = zfs zpool zdb zhack zinject zstream zstreamdump ztest
SUBDIRS += fsck_zfs vdev_id raidz_test zfs_ids_to_path
+SUBDIRS += zpool_influxdb
if USING_PYTHON
SUBDIRS += arcstat arc_summary dbufstat
diff --git a/sys/contrib/openzfs/cmd/arc_summary/arc_summary2 b/sys/contrib/openzfs/cmd/arc_summary/arc_summary2
index a925d32788ea..75b5697526f7 100755
--- a/sys/contrib/openzfs/cmd/arc_summary/arc_summary2
+++ b/sys/contrib/openzfs/cmd/arc_summary/arc_summary2
@@ -59,14 +59,20 @@ if sys.platform.startswith('freebsd'):
# Requires py27-sysctl on FreeBSD
import sysctl
+ def is_value(ctl):
+ return ctl.type != sysctl.CTLTYPE_NODE
+
def load_kstats(namespace):
"""Collect information on a specific subsystem of the ARC"""
base = 'kstat.zfs.misc.%s.' % namespace
- return [(kstat.name, D(kstat.value)) for kstat in sysctl.filter(base)]
+ fmt = lambda kstat: (kstat.name, D(kstat.value))
+ kstats = sysctl.filter(base)
+ return [fmt(kstat) for kstat in kstats if is_value(kstat)]
def load_tunables():
- return dict((ctl.name, ctl.value) for ctl in sysctl.filter('vfs.zfs'))
+ ctls = sysctl.filter('vfs.zfs')
+ return dict((ctl.name, ctl.value) for ctl in ctls if is_value(ctl))
elif sys.platform.startswith('linux'):
@@ -219,12 +225,30 @@ def get_arc_summary(Kstat):
deleted = Kstat["kstat.zfs.misc.arcstats.deleted"]
mutex_miss = Kstat["kstat.zfs.misc.arcstats.mutex_miss"]
evict_skip = Kstat["kstat.zfs.misc.arcstats.evict_skip"]
+ evict_l2_cached = Kstat["kstat.zfs.misc.arcstats.evict_l2_cached"]
+ evict_l2_eligible = Kstat["kstat.zfs.misc.arcstats.evict_l2_eligible"]
+ evict_l2_eligible_mfu = Kstat["kstat.zfs.misc.arcstats.evict_l2_eligible_mfu"]
+ evict_l2_eligible_mru = Kstat["kstat.zfs.misc.arcstats.evict_l2_eligible_mru"]
+ evict_l2_ineligible = Kstat["kstat.zfs.misc.arcstats.evict_l2_ineligible"]
+ evict_l2_skip = Kstat["kstat.zfs.misc.arcstats.evict_l2_skip"]
# ARC Misc.
output["arc_misc"] = {}
output["arc_misc"]["deleted"] = fHits(deleted)
- output["arc_misc"]['mutex_miss'] = fHits(mutex_miss)
- output["arc_misc"]['evict_skips'] = fHits(evict_skip)
+ output["arc_misc"]["mutex_miss"] = fHits(mutex_miss)
+ output["arc_misc"]["evict_skips"] = fHits(evict_skip)
+ output["arc_misc"]["evict_l2_skip"] = fHits(evict_l2_skip)
+ output["arc_misc"]["evict_l2_cached"] = fBytes(evict_l2_cached)
+ output["arc_misc"]["evict_l2_eligible"] = fBytes(evict_l2_eligible)
+ output["arc_misc"]["evict_l2_eligible_mfu"] = {
+ 'per': fPerc(evict_l2_eligible_mfu, evict_l2_eligible),
+ 'num': fBytes(evict_l2_eligible_mfu),
+ }
+ output["arc_misc"]["evict_l2_eligible_mru"] = {
+ 'per': fPerc(evict_l2_eligible_mru, evict_l2_eligible),
+ 'num': fBytes(evict_l2_eligible_mru),
+ }
+ output["arc_misc"]["evict_l2_ineligible"] = fBytes(evict_l2_ineligible)
# ARC Sizing
arc_size = Kstat["kstat.zfs.misc.arcstats.size"]
@@ -340,8 +364,26 @@ def _arc_summary(Kstat):
sys.stdout.write("\tDeleted:\t\t\t\t%s\n" % arc['arc_misc']['deleted'])
sys.stdout.write("\tMutex Misses:\t\t\t\t%s\n" %
arc['arc_misc']['mutex_miss'])
- sys.stdout.write("\tEvict Skips:\t\t\t\t%s\n" %
+ sys.stdout.write("\tEviction Skips:\t\t\t\t%s\n" %
arc['arc_misc']['evict_skips'])
+ sys.stdout.write("\tEviction Skips Due to L2 Writes:\t%s\n" %
+ arc['arc_misc']['evict_l2_skip'])
+ sys.stdout.write("\tL2 Cached Evictions:\t\t\t%s\n" %
+ arc['arc_misc']['evict_l2_cached'])
+ sys.stdout.write("\tL2 Eligible Evictions:\t\t\t%s\n" %
+ arc['arc_misc']['evict_l2_eligible'])
+ sys.stdout.write("\tL2 Eligible MFU Evictions:\t%s\t%s\n" % (
+ arc['arc_misc']['evict_l2_eligible_mfu']['per'],
+ arc['arc_misc']['evict_l2_eligible_mfu']['num'],
+ )
+ )
+ sys.stdout.write("\tL2 Eligible MRU Evictions:\t%s\t%s\n" % (
+ arc['arc_misc']['evict_l2_eligible_mru']['per'],
+ arc['arc_misc']['evict_l2_eligible_mru']['num'],
+ )
+ )
+ sys.stdout.write("\tL2 Ineligible Evictions:\t\t%s\n" %
+ arc['arc_misc']['evict_l2_ineligible'])
sys.stdout.write("\n")
# ARC Sizing
@@ -677,6 +719,11 @@ def get_l2arc_summary(Kstat):
l2_writes_done = Kstat["kstat.zfs.misc.arcstats.l2_writes_done"]
l2_writes_error = Kstat["kstat.zfs.misc.arcstats.l2_writes_error"]
l2_writes_sent = Kstat["kstat.zfs.misc.arcstats.l2_writes_sent"]
+ l2_mfu_asize = Kstat["kstat.zfs.misc.arcstats.l2_mfu_asize"]
+ l2_mru_asize = Kstat["kstat.zfs.misc.arcstats.l2_mru_asize"]
+ l2_prefetch_asize = Kstat["kstat.zfs.misc.arcstats.l2_prefetch_asize"]
+ l2_bufc_data_asize = Kstat["kstat.zfs.misc.arcstats.l2_bufc_data_asize"]
+ l2_bufc_metadata_asize = Kstat["kstat.zfs.misc.arcstats.l2_bufc_metadata_asize"]
l2_access_total = (l2_hits + l2_misses)
output['l2_health_count'] = (l2_writes_error + l2_cksum_bad + l2_io_error)
@@ -699,7 +746,7 @@ def get_l2arc_summary(Kstat):
output["io_errors"] = fHits(l2_io_error)
output["l2_arc_size"] = {}
- output["l2_arc_size"]["adative"] = fBytes(l2_size)
+ output["l2_arc_size"]["adaptive"] = fBytes(l2_size)
output["l2_arc_size"]["actual"] = {
'per': fPerc(l2_asize, l2_size),
'num': fBytes(l2_asize)
@@ -708,6 +755,26 @@ def get_l2arc_summary(Kstat):
'per': fPerc(l2_hdr_size, l2_size),
'num': fBytes(l2_hdr_size),
}
+ output["l2_arc_size"]["mfu_asize"] = {
+ 'per': fPerc(l2_mfu_asize, l2_asize),
+ 'num': fBytes(l2_mfu_asize),
+ }
+ output["l2_arc_size"]["mru_asize"] = {
+ 'per': fPerc(l2_mru_asize, l2_asize),
+ 'num': fBytes(l2_mru_asize),
+ }
+ output["l2_arc_size"]["prefetch_asize"] = {
+ 'per': fPerc(l2_prefetch_asize, l2_asize),
+ 'num': fBytes(l2_prefetch_asize),
+ }
+ output["l2_arc_size"]["bufc_data_asize"] = {
+ 'per': fPerc(l2_bufc_data_asize, l2_asize),
+ 'num': fBytes(l2_bufc_data_asize),
+ }
+ output["l2_arc_size"]["bufc_metadata_asize"] = {
+ 'per': fPerc(l2_bufc_metadata_asize, l2_asize),
+ 'num': fBytes(l2_bufc_metadata_asize),
+ }
output["l2_arc_evicts"] = {}
output["l2_arc_evicts"]['lock_retries'] = fHits(l2_evict_lock_retry)
@@ -772,7 +839,7 @@ def _l2arc_summary(Kstat):
sys.stdout.write("\n")
sys.stdout.write("L2 ARC Size: (Adaptive)\t\t\t\t%s\n" %
- arc["l2_arc_size"]["adative"])
+ arc["l2_arc_size"]["adaptive"])
sys.stdout.write("\tCompressed:\t\t\t%s\t%s\n" % (
arc["l2_arc_size"]["actual"]["per"],
arc["l2_arc_size"]["actual"]["num"],
@@ -783,11 +850,36 @@ def _l2arc_summary(Kstat):
arc["l2_arc_size"]["head_size"]["num"],
)
)
+ sys.stdout.write("\tMFU Alloc. Size:\t\t%s\t%s\n" % (
+ arc["l2_arc_size"]["mfu_asize"]["per"],
+ arc["l2_arc_size"]["mfu_asize"]["num"],
+ )
+ )
+ sys.stdout.write("\tMRU Alloc. Size:\t\t%s\t%s\n" % (
+ arc["l2_arc_size"]["mru_asize"]["per"],
+ arc["l2_arc_size"]["mru_asize"]["num"],
+ )
+ )
+ sys.stdout.write("\tPrefetch Alloc. Size:\t\t%s\t%s\n" % (
+ arc["l2_arc_size"]["prefetch_asize"]["per"],
+ arc["l2_arc_size"]["prefetch_asize"]["num"],
+ )
+ )
+ sys.stdout.write("\tData (buf content) Alloc. Size:\t%s\t%s\n" % (
+ arc["l2_arc_size"]["bufc_data_asize"]["per"],
+ arc["l2_arc_size"]["bufc_data_asize"]["num"],
+ )
+ )
+ sys.stdout.write("\tMetadata (buf content) Size:\t%s\t%s\n" % (
+ arc["l2_arc_size"]["bufc_metadata_asize"]["per"],
+ arc["l2_arc_size"]["bufc_metadata_asize"]["num"],
+ )
+ )
sys.stdout.write("\n")
if arc["l2_arc_evicts"]['lock_retries'] != '0' or \
arc["l2_arc_evicts"]["reading"] != '0':
- sys.stdout.write("L2 ARC Evicts:\n")
+ sys.stdout.write("L2 ARC Evictions:\n")
sys.stdout.write("\tLock Retries:\t\t\t\t%s\n" %
arc["l2_arc_evicts"]['lock_retries'])
sys.stdout.write("\tUpon Reading:\t\t\t\t%s\n" %
diff --git a/sys/contrib/openzfs/cmd/arc_summary/arc_summary3 b/sys/contrib/openzfs/cmd/arc_summary/arc_summary3
index 83cbf0f1728d..96f7990e1726 100755
--- a/sys/contrib/openzfs/cmd/arc_summary/arc_summary3
+++ b/sys/contrib/openzfs/cmd/arc_summary/arc_summary3
@@ -58,7 +58,6 @@ SECTION_PATHS = {'arc': 'arcstats',
'dmu': 'dmu_tx',
'l2arc': 'arcstats', # L2ARC stuff lives in arcstats
'vdev': 'vdev_cache_stats',
- 'xuio': 'xuio_stats',
'zfetch': 'zfetchstats',
'zil': 'zil'}
@@ -86,16 +85,24 @@ if sys.platform.startswith('freebsd'):
VDEV_CACHE_SIZE = 'vdev.cache_size'
+ def is_value(ctl):
+ return ctl.type != sysctl.CTLTYPE_NODE
+
+ def namefmt(ctl, base='vfs.zfs.'):
+ # base is removed from the name
+ cut = len(base)
+ return ctl.name[cut:]
+
def load_kstats(section):
base = 'kstat.zfs.misc.{section}.'.format(section=section)
- # base is removed from the name
- fmt = lambda kstat: '{name} : {value}'.format(name=kstat.name[len(base):],
+ fmt = lambda kstat: '{name} : {value}'.format(name=namefmt(kstat, base),
value=kstat.value)
- return [fmt(kstat) for kstat in sysctl.filter(base)]
+ kstats = sysctl.filter(base)
+ return [fmt(kstat) for kstat in kstats if is_value(kstat)]
def get_params(base):
- cut = 8 # = len('vfs.zfs.')
- return {ctl.name[cut:]: str(ctl.value) for ctl in sysctl.filter(base)}
+ ctls = sysctl.filter(base)
+ return {namefmt(ctl): str(ctl.value) for ctl in ctls if is_value(ctl)}
def get_tunable_params():
return get_params('vfs.zfs')
@@ -112,25 +119,8 @@ if sys.platform.startswith('freebsd'):
return '{} version {}'.format(name, version)
def get_descriptions(_request):
- # py-sysctl doesn't give descriptions, so we have to shell out.
- command = ['sysctl', '-d', 'vfs.zfs']
-
- # The recommended way to do this is with subprocess.run(). However,
- # some installed versions of Python are < 3.5, so we offer them
- # the option of doing it the old way (for now)
- if 'run' in dir(subprocess):
- info = subprocess.run(command, stdout=subprocess.PIPE,
- universal_newlines=True)
- lines = info.stdout.split('\n')
- else:
- info = subprocess.check_output(command, universal_newlines=True)
- lines = info.split('\n')
-
- def fmt(line):
- name, desc = line.split(':', 1)
- return (name.strip(), desc.strip())
-
- return dict([fmt(line) for line in lines if len(line) > 0])
+ ctls = sysctl.filter('vfs.zfs')
+ return {namefmt(ctl): ctl.description for ctl in ctls if is_value(ctl)}
elif sys.platform.startswith('linux'):
@@ -397,8 +387,12 @@ def format_raw_line(name, value):
if ARGS.alt:
result = '{0}{1}={2}'.format(INDENT, name, value)
else:
- spc = LINE_LENGTH-(len(INDENT)+len(value))
- result = '{0}{1:<{spc}}{2}'.format(INDENT, name, value, spc=spc)
+ # Right-align the value within the line length if it fits,
+ # otherwise just separate it from the name by a single space.
+ fit = LINE_LENGTH - len(INDENT) - len(name)
+ overflow = len(value) + 1
+ w = max(fit, overflow)
+ result = '{0}{1}{2:>{w}}'.format(INDENT, name, value, w=w)
return result
@@ -598,6 +592,20 @@ def section_arc(kstats_dict):
prt_i1('Deleted:', f_hits(arc_stats['deleted']))
prt_i1('Mutex misses:', f_hits(arc_stats['mutex_miss']))
prt_i1('Eviction skips:', f_hits(arc_stats['evict_skip']))
+ prt_i1('Eviction skips due to L2 writes:',
+ f_hits(arc_stats['evict_l2_skip']))
+ prt_i1('L2 cached evictions:', f_bytes(arc_stats['evict_l2_cached']))
+ prt_i1('L2 eligible evictions:', f_bytes(arc_stats['evict_l2_eligible']))
+ prt_i2('L2 eligible MFU evictions:',
+ f_perc(arc_stats['evict_l2_eligible_mfu'],
+ arc_stats['evict_l2_eligible']),
+ f_bytes(arc_stats['evict_l2_eligible_mfu']))
+ prt_i2('L2 eligible MRU evictions:',
+ f_perc(arc_stats['evict_l2_eligible_mru'],
+ arc_stats['evict_l2_eligible']),
+ f_bytes(arc_stats['evict_l2_eligible_mru']))
+ prt_i1('L2 ineligible evictions:',
+ f_bytes(arc_stats['evict_l2_ineligible']))
print()
@@ -736,6 +744,21 @@ def section_l2arc(kstats_dict):
prt_i2('Header size:',
f_perc(arc_stats['l2_hdr_size'], arc_stats['l2_size']),
f_bytes(arc_stats['l2_hdr_size']))
+ prt_i2('MFU allocated size:',
+ f_perc(arc_stats['l2_mfu_asize'], arc_stats['l2_asize']),
+ f_bytes(arc_stats['l2_mfu_asize']))
+ prt_i2('MRU allocated size:',
+ f_perc(arc_stats['l2_mru_asize'], arc_stats['l2_asize']),
+ f_bytes(arc_stats['l2_mru_asize']))
+ prt_i2('Prefetch allocated size:',
+ f_perc(arc_stats['l2_prefetch_asize'], arc_stats['l2_asize']),
+ f_bytes(arc_stats['l2_prefetch_asize']))
+ prt_i2('Data (buffer content) allocated size:',
+ f_perc(arc_stats['l2_bufc_data_asize'], arc_stats['l2_asize']),
+ f_bytes(arc_stats['l2_bufc_data_asize']))
+ prt_i2('Metadata (buffer content) allocated size:',
+ f_perc(arc_stats['l2_bufc_metadata_asize'], arc_stats['l2_asize']),
+ f_bytes(arc_stats['l2_bufc_metadata_asize']))
print()
prt_1('L2ARC breakdown:', f_hits(l2_access_total))
diff --git a/sys/contrib/openzfs/cmd/arcstat/arcstat.in b/sys/contrib/openzfs/cmd/arcstat/arcstat.in
index c83a1c74599e..9e7c52a6c7a3 100755
--- a/sys/contrib/openzfs/cmd/arcstat/arcstat.in
+++ b/sys/contrib/openzfs/cmd/arcstat/arcstat.in
@@ -88,6 +88,12 @@ cols = {
"mfug": [4, 1000, "MFU ghost list hits per second"],
"mrug": [4, 1000, "MRU ghost list hits per second"],
"eskip": [5, 1000, "evict_skip per second"],
+ "el2skip": [7, 1000, "evict skip, due to l2 writes, per second"],
+ "el2cach": [7, 1024, "Size of L2 cached evictions per second"],
+ "el2el": [5, 1024, "Size of L2 eligible evictions per second"],
+ "el2mfu": [6, 1024, "Size of L2 eligible MFU evictions per second"],
+ "el2mru": [6, 1024, "Size of L2 eligible MRU evictions per second"],
+ "el2inel": [7, 1024, "Size of L2 ineligible evictions per second"],
"mtxmis": [6, 1000, "mutex_miss per second"],
"dread": [5, 1000, "Demand accesses per second"],
"pread": [5, 1000, "Prefetch accesses per second"],
@@ -96,6 +102,16 @@ cols = {
"l2read": [6, 1000, "Total L2ARC accesses per second"],
"l2hit%": [6, 100, "L2ARC access hit percentage"],
"l2miss%": [7, 100, "L2ARC access miss percentage"],
+ "l2pref": [6, 1024, "L2ARC prefetch allocated size"],
+ "l2mfu": [5, 1024, "L2ARC MFU allocated size"],
+ "l2mru": [5, 1024, "L2ARC MRU allocated size"],
+ "l2data": [6, 1024, "L2ARC data allocated size"],
+ "l2meta": [6, 1024, "L2ARC metadata allocated size"],
+ "l2pref%": [7, 100, "L2ARC prefetch percentage"],
+ "l2mfu%": [6, 100, "L2ARC MFU percentage"],
+ "l2mru%": [6, 100, "L2ARC MRU percentage"],
+ "l2data%": [7, 100, "L2ARC data percentage"],
+ "l2meta%": [7, 100, "L2ARC metadata percentage"],
"l2asize": [7, 1024, "Actual (compressed) size of the L2ARC"],
"l2size": [6, 1024, "Size of the L2ARC"],
"l2bytes": [7, 1024, "Bytes read per second from the L2ARC"],
@@ -118,22 +134,24 @@ opfile = None
sep = " " # Default separator is 2 spaces
version = "0.4"
l2exist = False
-cmd = ("Usage: arcstat [-hvx] [-f fields] [-o file] [-s string] [interval "
+cmd = ("Usage: arcstat [-havxp] [-f fields] [-o file] [-s string] [interval "
"[count]]\n")
cur = {}
d = {}
out = None
kstat = None
+pretty_print = True
if sys.platform.startswith('freebsd'):
- # Requires py27-sysctl on FreeBSD
+ # Requires py-sysctl on FreeBSD
import sysctl
def kstat_update():
global kstat
- k = sysctl.filter('kstat.zfs.misc.arcstats')
+ k = [ctl for ctl in sysctl.filter('kstat.zfs.misc.arcstats')
+ if ctl.type != sysctl.CTLTYPE_NODE]
if not k:
sys.exit(1)
@@ -181,6 +199,7 @@ def detailed_usage():
def usage():
sys.stderr.write("%s\n" % cmd)
sys.stderr.write("\t -h : Print this help message\n")
+ sys.stderr.write("\t -a : Print all possible stats\n")
sys.stderr.write("\t -v : List all possible field headers and definitions"
"\n")
sys.stderr.write("\t -x : Print extended stats\n")
@@ -188,6 +207,7 @@ def usage():
sys.stderr.write("\t -o : Redirect output to the specified file\n")
sys.stderr.write("\t -s : Override default field separator with custom "
"character or string\n")
+ sys.stderr.write("\t -p : Disable auto-scaling of numerical fields\n")
sys.stderr.write("\nExamples:\n")
sys.stderr.write("\tarcstat -o /tmp/a.log 2 10\n")
sys.stderr.write("\tarcstat -s \",\" -o /tmp/a.log 2 10\n")
@@ -246,10 +266,14 @@ def print_values():
global hdr
global sep
global v
+ global pretty_print
- sys.stdout.write(sep.join(
- prettynum(cols[col][0], cols[col][1], v[col]) for col in hdr))
+ if pretty_print:
+ fmt = lambda col: prettynum(cols[col][0], cols[col][1], v[col])
+ else:
+ fmt = lambda col: v[col]
+ sys.stdout.write(sep.join(fmt(col) for col in hdr))
sys.stdout.write("\n")
sys.stdout.flush()
@@ -257,9 +281,14 @@ def print_values():
def print_header():
global hdr
global sep
+ global pretty_print
- sys.stdout.write(sep.join("%*s" % (cols[col][0], col) for col in hdr))
+ if pretty_print:
+ fmt = lambda col: "%*s" % (cols[col][0], col)
+ else:
+ fmt = lambda col: col
+ sys.stdout.write(sep.join(fmt(col) for col in hdr))
sys.stdout.write("\n")
@@ -296,8 +325,10 @@ def init():
global sep
global out
global l2exist
+ global pretty_print
desired_cols = None
+ aflag = False
xflag = False
hflag = False
vflag = False
@@ -306,14 +337,16 @@ def init():
try:
opts, args = getopt.getopt(
sys.argv[1:],
- "xo:hvs:f:",
+ "axo:hvs:f:p",
[
+ "all",
"extended",
"outfile",
"help",
"verbose",
"separator",
- "columns"
+ "columns",
+ "parsable"
]
)
except getopt.error as msg:
@@ -322,6 +355,8 @@ def init():
opts = None
for opt, arg in opts:
+ if opt in ('-a', '--all'):
+ aflag = True
if opt in ('-x', '--extended'):
xflag = True
if opt in ('-o', '--outfile'):
@@ -337,6 +372,8 @@ def init():
if opt in ('-f', '--columns'):
desired_cols = arg
i += 1
+ if opt in ('-p', '--parsable'):
+ pretty_print = False
i += 1
argv = sys.argv[i:]
@@ -381,6 +418,12 @@ def init():
incompat)
usage()
+ if aflag:
+ if l2exist:
+ hdr = cols.keys()
+ else:
+ hdr = [col for col in cols.keys() if not col.startswith("l2")]
+
if opfile:
try:
out = open(opfile, "w")
@@ -436,6 +479,12 @@ def calculate():
v["mrug"] = d["mru_ghost_hits"] / sint
v["mfug"] = d["mfu_ghost_hits"] / sint
v["eskip"] = d["evict_skip"] / sint
+ v["el2skip"] = d["evict_l2_skip"] / sint
+ v["el2cach"] = d["evict_l2_cached"] / sint
+ v["el2el"] = d["evict_l2_eligible"] / sint
+ v["el2mfu"] = d["evict_l2_eligible_mfu"] / sint
+ v["el2mru"] = d["evict_l2_eligible_mru"] / sint
+ v["el2inel"] = d["evict_l2_ineligible"] / sint
v["mtxmis"] = d["mutex_miss"] / sint
if l2exist:
@@ -449,6 +498,17 @@ def calculate():
v["l2size"] = cur["l2_size"]
v["l2bytes"] = d["l2_read_bytes"] / sint
+ v["l2pref"] = cur["l2_prefetch_asize"]
+ v["l2mfu"] = cur["l2_mfu_asize"]
+ v["l2mru"] = cur["l2_mru_asize"]
+ v["l2data"] = cur["l2_bufc_data_asize"]
+ v["l2meta"] = cur["l2_bufc_metadata_asize"]
+ v["l2pref%"] = 100 * v["l2pref"] / v["l2asize"]
+ v["l2mfu%"] = 100 * v["l2mfu"] / v["l2asize"]
+ v["l2mru%"] = 100 * v["l2mru"] / v["l2asize"]
+ v["l2data%"] = 100 * v["l2data"] / v["l2asize"]
+ v["l2meta%"] = 100 * v["l2meta"] / v["l2asize"]
+
v["grow"] = 0 if cur["arc_no_grow"] else 1
v["need"] = cur["arc_need_free"]
v["free"] = cur["memory_free_bytes"]
diff --git a/sys/contrib/openzfs/cmd/dbufstat/dbufstat.in b/sys/contrib/openzfs/cmd/dbufstat/dbufstat.in
index 1d4eb39d7242..82250353f5eb 100755
--- a/sys/contrib/openzfs/cmd/dbufstat/dbufstat.in
+++ b/sys/contrib/openzfs/cmd/dbufstat/dbufstat.in
@@ -131,7 +131,7 @@ elif sys.platform.startswith("linux"):
def print_incompat_helper(incompat):
cnt = 0
for key in sorted(incompat):
- if cnt is 0:
+ if cnt == 0:
sys.stderr.write("\t")
elif cnt > 8:
sys.stderr.write(",\n\t")
@@ -662,7 +662,7 @@ def main():
if not ifile:
ifile = default_ifile()
- if ifile is not "-":
+ if ifile != "-":
try:
tmp = open(ifile, "r")
sys.stdin = tmp
diff --git a/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c b/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c
index ed9f167ccac8..ca39d228479e 100644
--- a/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c
+++ b/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c
@@ -43,67 +43,30 @@
libzfs_handle_t *g_zfs;
/*
- * Return the pool/dataset to mount given the name passed to mount. This
- * is expected to be of the form pool/dataset, however may also refer to
- * a block device if that device contains a valid zfs label.
+ * Opportunistically convert a target string into a pool name. If the
+ * string does not represent a block device with a valid zfs label
+ * then it is passed through without modification.
*/
-static char *
-parse_dataset(char *dataset)
+static void
+parse_dataset(const char *target, char **dataset)
{
- char cwd[PATH_MAX];
- struct stat64 statbuf;
- int error;
- int len;
+ /* Assume pool/dataset is more likely */
+ strlcpy(*dataset, target, PATH_MAX);
- /*
- * We expect a pool/dataset to be provided, however if we're
- * given a device which is a member of a zpool we attempt to
- * extract the pool name stored in the label. Given the pool
- * name we can mount the root dataset.
- */
- error = stat64(dataset, &statbuf);
- if (error == 0) {
- nvlist_t *config;
- char *name;
- int fd;
-
- fd = open(dataset, O_RDONLY);
- if (fd < 0)
- goto out;
-
- error = zpool_read_label(fd, &config, NULL);
- (void) close(fd);
- if (error)
- goto out;
-
- error = nvlist_lookup_string(config,
- ZPOOL_CONFIG_POOL_NAME, &name);
- if (error) {
- nvlist_free(config);
- } else {
- dataset = strdup(name);
- nvlist_free(config);
- return (dataset);
- }
+ int fd = open(target, O_RDONLY | O_CLOEXEC);
+ if (fd < 0)
+ return;
+
+ nvlist_t *cfg = NULL;
+ if (zpool_read_label(fd, &cfg, NULL) == 0) {
+ char *nm = NULL;
+ if (!nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &nm))
+ strlcpy(*dataset, nm, PATH_MAX);
+ nvlist_free(cfg);
}
-out:
- /*
- * If a file or directory in your current working directory is
- * named 'dataset' then mount(8) will prepend your current working
- * directory to the dataset. There is no way to prevent this
- * behavior so we simply check for it and strip the prepended
- * patch when it is added.
- */
- if (getcwd(cwd, PATH_MAX) == NULL)
- return (dataset);
-
- len = strlen(cwd);
-
- /* Do not add one when cwd already ends in a trailing '/' */
- if (strncmp(cwd, dataset, len) == 0)
- return (dataset + len + (cwd[len-1] != '/'));
- return (dataset);
+ if (close(fd))
+ perror("close");
}
/*
@@ -147,8 +110,8 @@ mtab_update(char *dataset, char *mntpoint, char *type, char *mntopts)
if (!fp) {
(void) fprintf(stderr, gettext(
"filesystem '%s' was mounted, but /etc/mtab "
- "could not be opened due to error %d\n"),
- dataset, errno);
+ "could not be opened due to error: %s\n"),
+ dataset, strerror(errno));
return (MOUNT_FILEIO);
}
@@ -156,8 +119,8 @@ mtab_update(char *dataset, char *mntpoint, char *type, char *mntopts)
if (error) {
(void) fprintf(stderr, gettext(
"filesystem '%s' was mounted, but /etc/mtab "
- "could not be updated due to error %d\n"),
- dataset, errno);
+ "could not be updated due to error: %s\n"),
+ dataset, strerror(errno));
return (MOUNT_FILEIO);
}
@@ -176,7 +139,7 @@ main(int argc, char **argv)
char badopt[MNT_LINE_MAX] = { '\0' };
char mtabopt[MNT_LINE_MAX] = { '\0' };
char mntpoint[PATH_MAX];
- char *dataset;
+ char dataset[PATH_MAX], *pdataset = dataset;
unsigned long mntflags = 0, zfsflags = 0, remount = 0;
int sloppy = 0, fake = 0, verbose = 0, nomtab = 0, zfsutil = 0;
int error, c;
@@ -232,13 +195,13 @@ main(int argc, char **argv)
return (MOUNT_USAGE);
}
- dataset = parse_dataset(argv[0]);
+ parse_dataset(argv[0], &pdataset);
/* canonicalize the mount point */
if (realpath(argv[1], mntpoint) == NULL) {
(void) fprintf(stderr, gettext("filesystem '%s' cannot be "
- "mounted at '%s' due to canonicalization error %d.\n"),
- dataset, argv[1], errno);
+ "mounted at '%s' due to canonicalization error: %s\n"),
+ dataset, argv[1], strerror(errno));
return (MOUNT_SYSERR);
}
diff --git a/sys/contrib/openzfs/cmd/raidz_test/raidz_bench.c b/sys/contrib/openzfs/cmd/raidz_test/raidz_bench.c
index 8a2cec4ca685..a3446c52c416 100644
--- a/sys/contrib/openzfs/cmd/raidz_test/raidz_bench.c
+++ b/sys/contrib/openzfs/cmd/raidz_test/raidz_bench.c
@@ -83,8 +83,17 @@ run_gen_bench_impl(const char *impl)
/* create suitable raidz_map */
ncols = rto_opts.rto_dcols + fn + 1;
zio_bench.io_size = 1ULL << ds;
- rm_bench = vdev_raidz_map_alloc(&zio_bench,
- BENCH_ASHIFT, ncols, fn+1);
+
+ if (rto_opts.rto_expand) {
+ rm_bench = vdev_raidz_map_alloc_expanded(
+ zio_bench.io_abd,
+ zio_bench.io_size, zio_bench.io_offset,
+ rto_opts.rto_ashift, ncols+1, ncols,
+ fn+1, rto_opts.rto_expand_offset);
+ } else {
+ rm_bench = vdev_raidz_map_alloc(&zio_bench,
+ BENCH_ASHIFT, ncols, fn+1);
+ }
/* estimate iteration count */
iter_cnt = GEN_BENCH_MEMORY;
@@ -163,8 +172,16 @@ run_rec_bench_impl(const char *impl)
(1ULL << BENCH_ASHIFT))
continue;
- rm_bench = vdev_raidz_map_alloc(&zio_bench,
- BENCH_ASHIFT, ncols, PARITY_PQR);
+ if (rto_opts.rto_expand) {
+ rm_bench = vdev_raidz_map_alloc_expanded(
+ zio_bench.io_abd,
+ zio_bench.io_size, zio_bench.io_offset,
+ BENCH_ASHIFT, ncols+1, ncols,
+ PARITY_PQR, rto_opts.rto_expand_offset);
+ } else {
+ rm_bench = vdev_raidz_map_alloc(&zio_bench,
+ BENCH_ASHIFT, ncols, PARITY_PQR);
+ }
/* estimate iteration count */
iter_cnt = (REC_BENCH_MEMORY);
diff --git a/sys/contrib/openzfs/cmd/raidz_test/raidz_test.c b/sys/contrib/openzfs/cmd/raidz_test/raidz_test.c
index 66f36b0d56ca..4e2639f3676d 100644
--- a/sys/contrib/openzfs/cmd/raidz_test/raidz_test.c
+++ b/sys/contrib/openzfs/cmd/raidz_test/raidz_test.c
@@ -77,16 +77,20 @@ static void print_opts(raidz_test_opts_t *opts, boolean_t force)
(void) fprintf(stdout, DBLSEP "Running with options:\n"
" (-a) zio ashift : %zu\n"
" (-o) zio offset : 1 << %zu\n"
+ " (-e) expanded map : %s\n"
+ " (-r) reflow offset : %llx\n"
" (-d) number of raidz data columns : %zu\n"
" (-s) size of DATA : 1 << %zu\n"
" (-S) sweep parameters : %s \n"
" (-v) verbose : %s \n\n",
- opts->rto_ashift, /* -a */
- ilog2(opts->rto_offset), /* -o */
- opts->rto_dcols, /* -d */
- ilog2(opts->rto_dsize), /* -s */
- opts->rto_sweep ? "yes" : "no", /* -S */
- verbose); /* -v */
+ opts->rto_ashift, /* -a */
+ ilog2(opts->rto_offset), /* -o */
+ opts->rto_expand ? "yes" : "no", /* -e */
+ (u_longlong_t)opts->rto_expand_offset, /* -r */
+ opts->rto_dcols, /* -d */
+ ilog2(opts->rto_dsize), /* -s */
+ opts->rto_sweep ? "yes" : "no", /* -S */
+ verbose); /* -v */
}
}
@@ -104,6 +108,8 @@ static void usage(boolean_t requested)
"\t[-S parameter sweep (default: %s)]\n"
"\t[-t timeout for parameter sweep test]\n"
"\t[-B benchmark all raidz implementations]\n"
+ "\t[-e use expanded raidz map (default: %s)]\n"
+ "\t[-r expanded raidz map reflow offset (default: %llx)]\n"
"\t[-v increase verbosity (default: %zu)]\n"
"\t[-h (print help)]\n"
"\t[-T test the test, see if failure would be detected]\n"
@@ -114,6 +120,8 @@ static void usage(boolean_t requested)
o->rto_dcols, /* -d */
ilog2(o->rto_dsize), /* -s */
rto_opts.rto_sweep ? "yes" : "no", /* -S */
+ rto_opts.rto_expand ? "yes" : "no", /* -e */
+ (u_longlong_t)o->rto_expand_offset, /* -r */
o->rto_v); /* -d */
exit(requested ? 0 : 1);
@@ -128,7 +136,7 @@ static void process_options(int argc, char **argv)
bcopy(&rto_opts_defaults, o, sizeof (*o));
- while ((opt = getopt(argc, argv, "TDBSvha:o:d:s:t:")) != -1) {
+ while ((opt = getopt(argc, argv, "TDBSvha:er:o:d:s:t:")) != -1) {
value = 0;
switch (opt) {
@@ -136,6 +144,12 @@ static void process_options(int argc, char **argv)
value = strtoull(optarg, NULL, 0);
o->rto_ashift = MIN(13, MAX(9, value));
break;
+ case 'e':
+ o->rto_expand = 1;
+ break;
+ case 'r':
+ o->rto_expand_offset = strtoull(optarg, NULL, 0);
+ break;
case 'o':
value = strtoull(optarg, NULL, 0);
o->rto_offset = ((1ULL << MIN(12, value)) >> 9) << 9;
@@ -179,25 +193,34 @@ static void process_options(int argc, char **argv)
}
}
-#define DATA_COL(rm, i) ((rm)->rm_col[raidz_parity(rm) + (i)].rc_abd)
-#define DATA_COL_SIZE(rm, i) ((rm)->rm_col[raidz_parity(rm) + (i)].rc_size)
+#define DATA_COL(rr, i) ((rr)->rr_col[rr->rr_firstdatacol + (i)].rc_abd)
+#define DATA_COL_SIZE(rr, i) ((rr)->rr_col[rr->rr_firstdatacol + (i)].rc_size)
-#define CODE_COL(rm, i) ((rm)->rm_col[(i)].rc_abd)
-#define CODE_COL_SIZE(rm, i) ((rm)->rm_col[(i)].rc_size)
+#define CODE_COL(rr, i) ((rr)->rr_col[(i)].rc_abd)
+#define CODE_COL_SIZE(rr, i) ((rr)->rr_col[(i)].rc_size)
static int
cmp_code(raidz_test_opts_t *opts, const raidz_map_t *rm, const int parity)
{
- int i, ret = 0;
+ int r, i, ret = 0;
VERIFY(parity >= 1 && parity <= 3);
- for (i = 0; i < parity; i++) {
- if (abd_cmp(CODE_COL(rm, i), CODE_COL(opts->rm_golden, i))
- != 0) {
- ret++;
- LOG_OPT(D_DEBUG, opts,
- "\nParity block [%d] different!\n", i);
+ for (r = 0; r < rm->rm_nrows; r++) {
+ raidz_row_t * const rr = rm->rm_row[r];
+ raidz_row_t * const rrg = opts->rm_golden->rm_row[r];
+ for (i = 0; i < parity; i++) {
+ if (CODE_COL_SIZE(rrg, i) == 0) {
+ VERIFY0(CODE_COL_SIZE(rr, i));
+ continue;
+ }
+
+ if (abd_cmp(CODE_COL(rr, i),
+ CODE_COL(rrg, i)) != 0) {
+ ret++;
+ LOG_OPT(D_DEBUG, opts,
+ "\nParity block [%d] different!\n", i);
+ }
}
}
return (ret);
@@ -206,16 +229,26 @@ cmp_code(raidz_test_opts_t *opts, const raidz_map_t *rm, const int parity)
static int
cmp_data(raidz_test_opts_t *opts, raidz_map_t *rm)
{
- int i, ret = 0;
- int dcols = opts->rm_golden->rm_cols - raidz_parity(opts->rm_golden);
+ int r, i, dcols, ret = 0;
+
+ for (r = 0; r < rm->rm_nrows; r++) {
+ raidz_row_t *rr = rm->rm_row[r];
+ raidz_row_t *rrg = opts->rm_golden->rm_row[r];
+ dcols = opts->rm_golden->rm_row[0]->rr_cols -
+ raidz_parity(opts->rm_golden);
+ for (i = 0; i < dcols; i++) {
+ if (DATA_COL_SIZE(rrg, i) == 0) {
+ VERIFY0(DATA_COL_SIZE(rr, i));
+ continue;
+ }
- for (i = 0; i < dcols; i++) {
- if (abd_cmp(DATA_COL(opts->rm_golden, i), DATA_COL(rm, i))
- != 0) {
- ret++;
+ if (abd_cmp(DATA_COL(rrg, i),
+ DATA_COL(rr, i)) != 0) {
+ ret++;
- LOG_OPT(D_DEBUG, opts,
- "\nData block [%d] different!\n", i);
+ LOG_OPT(D_DEBUG, opts,
+ "\nData block [%d] different!\n", i);
+ }
}
}
return (ret);
@@ -236,12 +269,13 @@ init_rand(void *data, size_t size, void *private)
static void
corrupt_colums(raidz_map_t *rm, const int *tgts, const int cnt)
{
- int i;
- raidz_col_t *col;
-
- for (i = 0; i < cnt; i++) {
- col = &rm->rm_col[tgts[i]];
- abd_iterate_func(col->rc_abd, 0, col->rc_size, init_rand, NULL);
+ for (int r = 0; r < rm->rm_nrows; r++) {
+ raidz_row_t *rr = rm->rm_row[r];
+ for (int i = 0; i < cnt; i++) {
+ raidz_col_t *col = &rr->rr_col[tgts[i]];
+ abd_iterate_func(col->rc_abd, 0, col->rc_size,
+ init_rand, NULL);
+ }
}
}
@@ -288,10 +322,22 @@ init_raidz_golden_map(raidz_test_opts_t *opts, const int parity)
VERIFY0(vdev_raidz_impl_set("original"));
- opts->rm_golden = vdev_raidz_map_alloc(opts->zio_golden,
- opts->rto_ashift, total_ncols, parity);
- rm_test = vdev_raidz_map_alloc(zio_test,
- opts->rto_ashift, total_ncols, parity);
+ if (opts->rto_expand) {
+ opts->rm_golden =
+ vdev_raidz_map_alloc_expanded(opts->zio_golden->io_abd,
+ opts->zio_golden->io_size, opts->zio_golden->io_offset,
+ opts->rto_ashift, total_ncols+1, total_ncols,
+ parity, opts->rto_expand_offset);
+ rm_test = vdev_raidz_map_alloc_expanded(zio_test->io_abd,
+ zio_test->io_size, zio_test->io_offset,
+ opts->rto_ashift, total_ncols+1, total_ncols,
+ parity, opts->rto_expand_offset);
+ } else {
+ opts->rm_golden = vdev_raidz_map_alloc(opts->zio_golden,
+ opts->rto_ashift, total_ncols, parity);
+ rm_test = vdev_raidz_map_alloc(zio_test,
+ opts->rto_ashift, total_ncols, parity);
+ }
VERIFY(opts->zio_golden);
VERIFY(opts->rm_golden);
@@ -312,6 +358,188 @@ init_raidz_golden_map(raidz_test_opts_t *opts, const int parity)
return (err);
}
+/*
+ * If reflow is not in progress, reflow_offset should be UINT64_MAX.
+ * For each row, if the row is entirely before reflow_offset, it will
+ * come from the new location. Otherwise this row will come from the
+ * old location. Therefore, rows that straddle the reflow_offset will
+ * come from the old location.
+ *
+ * NOTE: Until raidz expansion is implemented this function is only
+ * needed by raidz_test.c to the multi-row raid_map_t functionality.
+ */
+raidz_map_t *
+vdev_raidz_map_alloc_expanded(abd_t *abd, uint64_t size, uint64_t offset,
+ uint64_t ashift, uint64_t physical_cols, uint64_t logical_cols,
+ uint64_t nparity, uint64_t reflow_offset)
+{
+ /* The zio's size in units of the vdev's minimum sector size. */
+ uint64_t s = size >> ashift;
+ uint64_t q, r, bc, devidx, asize = 0, tot;
+
+ /*
+ * "Quotient": The number of data sectors for this stripe on all but
+ * the "big column" child vdevs that also contain "remainder" data.
+ * AKA "full rows"
+ */
+ q = s / (logical_cols - nparity);
+
+ /*
+ * "Remainder": The number of partial stripe data sectors in this I/O.
+ * This will add a sector to some, but not all, child vdevs.
+ */
+ r = s - q * (logical_cols - nparity);
+
+ /* The number of "big columns" - those which contain remainder data. */
+ bc = (r == 0 ? 0 : r + nparity);
+
+ /*
+ * The total number of data and parity sectors associated with
+ * this I/O.
+ */
+ tot = s + nparity * (q + (r == 0 ? 0 : 1));
+
+ /* How many rows contain data (not skip) */
+ uint64_t rows = howmany(tot, logical_cols);
+ int cols = MIN(tot, logical_cols);
+
+ raidz_map_t *rm = kmem_zalloc(offsetof(raidz_map_t, rm_row[rows]),
+ KM_SLEEP);
+ rm->rm_nrows = rows;
+
+ for (uint64_t row = 0; row < rows; row++) {
+ raidz_row_t *rr = kmem_alloc(offsetof(raidz_row_t,
+ rr_col[cols]), KM_SLEEP);
+ rm->rm_row[row] = rr;
+
+ /* The starting RAIDZ (parent) vdev sector of the row. */
+ uint64_t b = (offset >> ashift) + row * logical_cols;
+
+ /*
+ * If we are in the middle of a reflow, and any part of this
+ * row has not been copied, then use the old location of
+ * this row.
+ */
+ int row_phys_cols = physical_cols;
+ if (b + (logical_cols - nparity) > reflow_offset >> ashift)
+ row_phys_cols--;
+
+ /* starting child of this row */
+ uint64_t child_id = b % row_phys_cols;
+ /* The starting byte offset on each child vdev. */
+ uint64_t child_offset = (b / row_phys_cols) << ashift;
+
+ /*
+ * We set cols to the entire width of the block, even
+ * if this row is shorter. This is needed because parity
+ * generation (for Q and R) needs to know the entire width,
+ * because it treats the short row as though it was
+ * full-width (and the "phantom" sectors were zero-filled).
+ *
+ * Another approach to this would be to set cols shorter
+ * (to just the number of columns that we might do i/o to)
+ * and have another mechanism to tell the parity generation
+ * about the "entire width". Reconstruction (at least
+ * vdev_raidz_reconstruct_general()) would also need to
+ * know about the "entire width".
+ */
+ rr->rr_cols = cols;
+ rr->rr_bigcols = bc;
+ rr->rr_missingdata = 0;
+ rr->rr_missingparity = 0;
+ rr->rr_firstdatacol = nparity;
+ rr->rr_abd_copy = NULL;
+ rr->rr_abd_empty = NULL;
+ rr->rr_nempty = 0;
+
+ for (int c = 0; c < rr->rr_cols; c++, child_id++) {
+ if (child_id >= row_phys_cols) {
+ child_id -= row_phys_cols;
+ child_offset += 1ULL << ashift;
+ }
+ rr->rr_col[c].rc_devidx = child_id;
+ rr->rr_col[c].rc_offset = child_offset;
+ rr->rr_col[c].rc_gdata = NULL;
+ rr->rr_col[c].rc_orig_data = NULL;
+ rr->rr_col[c].rc_error = 0;
+ rr->rr_col[c].rc_tried = 0;
+ rr->rr_col[c].rc_skipped = 0;
+ rr->rr_col[c].rc_need_orig_restore = B_FALSE;
+
+ uint64_t dc = c - rr->rr_firstdatacol;
+ if (c < rr->rr_firstdatacol) {
+ rr->rr_col[c].rc_size = 1ULL << ashift;
+ rr->rr_col[c].rc_abd =
+ abd_alloc_linear(rr->rr_col[c].rc_size,
+ B_TRUE);
+ } else if (row == rows - 1 && bc != 0 && c >= bc) {
+ /*
+ * Past the end, this for parity generation.
+ */
+ rr->rr_col[c].rc_size = 0;
+ rr->rr_col[c].rc_abd = NULL;
+ } else {
+ /*
+ * "data column" (col excluding parity)
+ * Add an ASCII art diagram here
+ */
+ uint64_t off;
+
+ if (c < bc || r == 0) {
+ off = dc * rows + row;
+ } else {
+ off = r * rows +
+ (dc - r) * (rows - 1) + row;
+ }
+ rr->rr_col[c].rc_size = 1ULL << ashift;
+ rr->rr_col[c].rc_abd =
+ abd_get_offset(abd, off << ashift);
+ }
+
+ asize += rr->rr_col[c].rc_size;
+ }
+ /*
+ * If all data stored spans all columns, there's a danger that
+ * parity will always be on the same device and, since parity
+ * isn't read during normal operation, that that device's I/O
+ * bandwidth won't be used effectively. We therefore switch
+ * the parity every 1MB.
+ *
+ * ...at least that was, ostensibly, the theory. As a practical
+ * matter unless we juggle the parity between all devices
+ * evenly, we won't see any benefit. Further, occasional writes
+ * that aren't a multiple of the LCM of the number of children
+ * and the minimum stripe width are sufficient to avoid pessimal
+ * behavior. Unfortunately, this decision created an implicit
+ * on-disk format requirement that we need to support for all
+ * eternity, but only for single-parity RAID-Z.
+ *
+ * If we intend to skip a sector in the zeroth column for
+ * padding we must make sure to note this swap. We will never
+ * intend to skip the first column since at least one data and
+ * one parity column must appear in each row.
+ */
+ if (rr->rr_firstdatacol == 1 && rr->rr_cols > 1 &&
+ (offset & (1ULL << 20))) {
+ ASSERT(rr->rr_cols >= 2);
+ ASSERT(rr->rr_col[0].rc_size == rr->rr_col[1].rc_size);
+ devidx = rr->rr_col[0].rc_devidx;
+ uint64_t o = rr->rr_col[0].rc_offset;
+ rr->rr_col[0].rc_devidx = rr->rr_col[1].rc_devidx;
+ rr->rr_col[0].rc_offset = rr->rr_col[1].rc_offset;
+ rr->rr_col[1].rc_devidx = devidx;
+ rr->rr_col[1].rc_offset = o;
+ }
+
+ }
+ ASSERT3U(asize, ==, tot << ashift);
+
+ /* init RAIDZ parity ops */
+ rm->rm_ops = vdev_raidz_math_get_ops();
+
+ return (rm);
+}
+
static raidz_map_t *
init_raidz_map(raidz_test_opts_t *opts, zio_t **zio, const int parity)
{
@@ -330,8 +558,15 @@ init_raidz_map(raidz_test_opts_t *opts, zio_t **zio, const int parity)
(*zio)->io_abd = raidz_alloc(alloc_dsize);
init_zio_abd(*zio);
- rm = vdev_raidz_map_alloc(*zio, opts->rto_ashift,
- total_ncols, parity);
+ if (opts->rto_expand) {
+ rm = vdev_raidz_map_alloc_expanded((*zio)->io_abd,
+ (*zio)->io_size, (*zio)->io_offset,
+ opts->rto_ashift, total_ncols+1, total_ncols,
+ parity, opts->rto_expand_offset);
+ } else {
+ rm = vdev_raidz_map_alloc(*zio, opts->rto_ashift,
+ total_ncols, parity);
+ }
VERIFY(rm);
/* Make sure code columns are destroyed */
@@ -420,7 +655,7 @@ run_rec_check_impl(raidz_test_opts_t *opts, raidz_map_t *rm, const int fn)
if (fn < RAIDZ_REC_PQ) {
/* can reconstruct 1 failed data disk */
for (x0 = 0; x0 < opts->rto_dcols; x0++) {
- if (x0 >= rm->rm_cols - raidz_parity(rm))
+ if (x0 >= rm->rm_row[0]->rr_cols - raidz_parity(rm))
continue;
/* Check if should stop */
@@ -445,10 +680,11 @@ run_rec_check_impl(raidz_test_opts_t *opts, raidz_map_t *rm, const int fn)
} else if (fn < RAIDZ_REC_PQR) {
/* can reconstruct 2 failed data disk */
for (x0 = 0; x0 < opts->rto_dcols; x0++) {
- if (x0 >= rm->rm_cols - raidz_parity(rm))
+ if (x0 >= rm->rm_row[0]->rr_cols - raidz_parity(rm))
continue;
for (x1 = x0 + 1; x1 < opts->rto_dcols; x1++) {
- if (x1 >= rm->rm_cols - raidz_parity(rm))
+ if (x1 >= rm->rm_row[0]->rr_cols -
+ raidz_parity(rm))
continue;
/* Check if should stop */
@@ -475,14 +711,15 @@ run_rec_check_impl(raidz_test_opts_t *opts, raidz_map_t *rm, const int fn)
} else {
/* can reconstruct 3 failed data disk */
for (x0 = 0; x0 < opts->rto_dcols; x0++) {
- if (x0 >= rm->rm_cols - raidz_parity(rm))
+ if (x0 >= rm->rm_row[0]->rr_cols - raidz_parity(rm))
continue;
for (x1 = x0 + 1; x1 < opts->rto_dcols; x1++) {
- if (x1 >= rm->rm_cols - raidz_parity(rm))
+ if (x1 >= rm->rm_row[0]->rr_cols -
+ raidz_parity(rm))
continue;
for (x2 = x1 + 1; x2 < opts->rto_dcols; x2++) {
- if (x2 >=
- rm->rm_cols - raidz_parity(rm))
+ if (x2 >= rm->rm_row[0]->rr_cols -
+ raidz_parity(rm))
continue;
/* Check if should stop */
@@ -700,6 +937,8 @@ run_sweep(void)
opts->rto_dcols = dcols_v[d];
opts->rto_offset = (1 << ashift_v[a]) * rand();
opts->rto_dsize = size_v[s];
+ opts->rto_expand = rto_opts.rto_expand;
+ opts->rto_expand_offset = rto_opts.rto_expand_offset;
opts->rto_v = 0; /* be quiet */
VERIFY3P(thread_create(NULL, 0, sweep_thread, (void *) opts,
@@ -732,6 +971,7 @@ exit:
return (sweep_state == SWEEP_ERROR ? SWEEP_ERROR : 0);
}
+
int
main(int argc, char **argv)
{
diff --git a/sys/contrib/openzfs/cmd/raidz_test/raidz_test.h b/sys/contrib/openzfs/cmd/raidz_test/raidz_test.h
index 09c825ae43c7..0f7f4cee3eb6 100644
--- a/sys/contrib/openzfs/cmd/raidz_test/raidz_test.h
+++ b/sys/contrib/openzfs/cmd/raidz_test/raidz_test.h
@@ -44,13 +44,15 @@ static const char *raidz_impl_names[] = {
typedef struct raidz_test_opts {
size_t rto_ashift;
- size_t rto_offset;
+ uint64_t rto_offset;
size_t rto_dcols;
size_t rto_dsize;
size_t rto_v;
size_t rto_sweep;
size_t rto_sweep_timeout;
size_t rto_benchmark;
+ size_t rto_expand;
+ uint64_t rto_expand_offset;
size_t rto_sanity;
size_t rto_gdb;
@@ -69,6 +71,8 @@ static const raidz_test_opts_t rto_opts_defaults = {
.rto_v = 0,
.rto_sweep = 0,
.rto_benchmark = 0,
+ .rto_expand = 0,
+ .rto_expand_offset = -1ULL,
.rto_sanity = 0,
.rto_gdb = 0,
.rto_should_stop = B_FALSE
@@ -113,4 +117,7 @@ void init_zio_abd(zio_t *zio);
void run_raidz_benchmark(void);
+struct raidz_map *vdev_raidz_map_alloc_expanded(abd_t *, uint64_t, uint64_t,
+ uint64_t, uint64_t, uint64_t, uint64_t, uint64_t);
+
#endif /* RAIDZ_TEST_H */
diff --git a/sys/contrib/openzfs/cmd/zdb/zdb.c b/sys/contrib/openzfs/cmd/zdb/zdb.c
index 376b24db1eec..e45bff26944a 100644
--- a/sys/contrib/openzfs/cmd/zdb/zdb.c
+++ b/sys/contrib/openzfs/cmd/zdb/zdb.c
@@ -1642,7 +1642,11 @@ dump_metaslab(metaslab_t *msp)
SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift);
}
- ASSERT(msp->ms_size == (1ULL << vd->vdev_ms_shift));
+ if (vd->vdev_ops == &vdev_draid_ops)
+ ASSERT3U(msp->ms_size, <=, 1ULL << vd->vdev_ms_shift);
+ else
+ ASSERT3U(msp->ms_size, ==, 1ULL << vd->vdev_ms_shift);
+
dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
if (spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
@@ -4202,6 +4206,8 @@ dump_l2arc_log_entries(uint64_t log_entries,
(u_longlong_t)L2BLK_GET_PREFETCH((&le[j])->le_prop));
(void) printf("|\t\t\t\taddress: %llu\n",
(u_longlong_t)le[j].le_daddr);
+ (void) printf("|\t\t\t\tARC state: %llu\n",
+ (u_longlong_t)L2BLK_GET_STATE((&le[j])->le_prop));
(void) printf("|\n");
}
(void) printf("\n");
@@ -5201,8 +5207,6 @@ zdb_blkptr_done(zio_t *zio)
zdb_cb_t *zcb = zio->io_private;
zbookmark_phys_t *zb = &zio->io_bookmark;
- abd_free(zio->io_abd);
-
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
@@ -5229,6 +5233,8 @@ zdb_blkptr_done(zio_t *zio)
blkbuf);
}
mutex_exit(&spa->spa_scrub_lock);
+
+ abd_free(zio->io_abd);
}
static int
@@ -6316,7 +6322,7 @@ dump_block_stats(spa_t *spa)
(void) printf("\t%-16s %14llu used: %5.2f%%\n", "Normal class:",
(u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space);
- if (spa_special_class(spa)->mc_rotor != NULL) {
+ if (spa_special_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_special_class(spa));
uint64_t space = metaslab_class_get_space(
@@ -6327,7 +6333,7 @@ dump_block_stats(spa_t *spa)
100.0 * alloc / space);
}
- if (spa_dedup_class(spa)->mc_rotor != NULL) {
+ if (spa_dedup_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_dedup_class(spa));
uint64_t space = metaslab_class_get_space(
@@ -6756,6 +6762,7 @@ import_checkpointed_state(char *target, nvlist_t *cfg, char **new_path)
{
int error = 0;
char *poolname, *bogus_name = NULL;
+ boolean_t freecfg = B_FALSE;
/* If the target is not a pool, the extract the pool name */
char *path_start = strchr(target, '/');
@@ -6774,6 +6781,7 @@ import_checkpointed_state(char *target, nvlist_t *cfg, char **new_path)
"spa_get_stats() failed with error %d\n",
poolname, error);
}
+ freecfg = B_TRUE;
}
if (asprintf(&bogus_name, "%s%s", poolname, BOGUS_SUFFIX) == -1)
@@ -6783,6 +6791,8 @@ import_checkpointed_state(char *target, nvlist_t *cfg, char **new_path)
error = spa_import(bogus_name, cfg, NULL,
ZFS_IMPORT_MISSING_LOG | ZFS_IMPORT_CHECKPOINT |
ZFS_IMPORT_SKIP_MMP);
+ if (freecfg)
+ nvlist_free(cfg);
if (error != 0) {
fatal("Tried to import pool \"%s\" but spa_import() failed "
"with error %d\n", bogus_name, error);
@@ -7011,7 +7021,6 @@ verify_checkpoint_blocks(spa_t *spa)
spa_t *checkpoint_spa;
char *checkpoint_pool;
- nvlist_t *config = NULL;
int error = 0;
/*
@@ -7019,7 +7028,7 @@ verify_checkpoint_blocks(spa_t *spa)
* name) so we can do verification on it against the current state
* of the pool.
*/
- checkpoint_pool = import_checkpointed_state(spa->spa_name, config,
+ checkpoint_pool = import_checkpointed_state(spa->spa_name, NULL,
NULL);
ASSERT(strcmp(spa->spa_name, checkpoint_pool) != 0);
@@ -8429,6 +8438,11 @@ main(int argc, char **argv)
}
}
+ if (searchdirs != NULL) {
+ umem_free(searchdirs, nsearch * sizeof (char *));
+ searchdirs = NULL;
+ }
+
/*
* import_checkpointed_state makes the assumption that the
* target pool that we pass it is already part of the spa
@@ -8447,6 +8461,11 @@ main(int argc, char **argv)
target = checkpoint_target;
}
+ if (cfg != NULL) {
+ nvlist_free(cfg);
+ cfg = NULL;
+ }
+
if (target_pool != target)
free(target_pool);
diff --git a/sys/contrib/openzfs/cmd/zed/agents/zfs_agents.c b/sys/contrib/openzfs/cmd/zed/agents/zfs_agents.c
index 6c40470e83d7..0e1bcf92765b 100644
--- a/sys/contrib/openzfs/cmd/zed/agents/zfs_agents.c
+++ b/sys/contrib/openzfs/cmd/zed/agents/zfs_agents.c
@@ -181,6 +181,8 @@ zfs_agent_post_event(const char *class, const char *subclass, nvlist_t *nvl)
* from the vdev_disk layer after a hot unplug. Fortunately we do
* get an EC_DEV_REMOVE from our disk monitor and it is a suitable
* proxy so we remap it here for the benefit of the diagnosis engine.
+ * Starting in OpenZFS 2.0, we do get FM_RESOURCE_REMOVED from the spa
+ * layer. Processing multiple FM_RESOURCE_REMOVED events is not harmful.
*/
if ((strcmp(class, EC_DEV_REMOVE) == 0) &&
(strcmp(subclass, ESC_DISK) == 0) &&
diff --git a/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c b/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
index 8190beb0c9e7..4a58e1f1dbd3 100644
--- a/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
+++ b/sys/contrib/openzfs/cmd/zed/agents/zfs_mod.c
@@ -435,7 +435,15 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
return;
}
- ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE, B_FALSE);
+ /*
+ * Prefer sequential resilvering when supported (mirrors and dRAID),
+ * otherwise fallback to a traditional healing resilver.
+ */
+ ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE, B_TRUE);
+ if (ret != 0) {
+ ret = zpool_vdev_attach(zhp, fullpath, path, nvroot,
+ B_TRUE, B_FALSE);
+ }
zed_log_msg(LOG_INFO, " zpool_vdev_replace: %s with %s (%s)",
fullpath, path, (ret == 0) ? "no errors" :
diff --git a/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c b/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c
index ba8a6de3a66f..89bb84e489b6 100644
--- a/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c
+++ b/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c
@@ -219,12 +219,18 @@ replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
* replace it.
*/
for (s = 0; s < nspares; s++) {
- char *spare_name;
+ boolean_t rebuild = B_FALSE;
+ char *spare_name, *type;
if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
&spare_name) != 0)
continue;
+ /* prefer sequential resilvering for distributed spares */
+ if ((nvlist_lookup_string(spares[s], ZPOOL_CONFIG_TYPE,
+ &type) == 0) && strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0)
+ rebuild = B_TRUE;
+
/* if set, add the "ashift" pool property to the spare nvlist */
if (source != ZPROP_SRC_DEFAULT)
(void) nvlist_add_uint64(spares[s],
@@ -237,7 +243,7 @@ replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
dev_name, basename(spare_name));
if (zpool_vdev_attach(zhp, dev_name, spare_name,
- replacement, B_TRUE, B_FALSE) == 0) {
+ replacement, B_TRUE, rebuild) == 0) {
free(dev_name);
nvlist_free(replacement);
return (B_TRUE);
@@ -499,6 +505,7 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
* Attempt to substitute a hot spare.
*/
(void) replace_with_spare(hdl, zhp, vdev);
+
zpool_close(zhp);
}
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/all-syslog.sh b/sys/contrib/openzfs/cmd/zed/zed.d/all-syslog.sh
index cb9286500136..270b1bc67e5c 100755
--- a/sys/contrib/openzfs/cmd/zed/zed.d/all-syslog.sh
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/all-syslog.sh
@@ -1,14 +1,50 @@
#!/bin/sh
#
+# Copyright (C) 2013-2014 Lawrence Livermore National Security, LLC.
+# Copyright (c) 2020 by Delphix. All rights reserved.
+#
+
+#
# Log the zevent via syslog.
+#
[ -f "${ZED_ZEDLET_DIR}/zed.rc" ] && . "${ZED_ZEDLET_DIR}/zed.rc"
. "${ZED_ZEDLET_DIR}/zed-functions.sh"
zed_exit_if_ignoring_this_event
-zed_log_msg "eid=${ZEVENT_EID}" "class=${ZEVENT_SUBCLASS}" \
- "${ZEVENT_POOL_GUID:+"pool_guid=${ZEVENT_POOL_GUID}"}" \
- "${ZEVENT_VDEV_PATH:+"vdev_path=${ZEVENT_VDEV_PATH}"}" \
- "${ZEVENT_VDEV_STATE_STR:+"vdev_state=${ZEVENT_VDEV_STATE_STR}"}"
+# build a string of name=value pairs for this event
+msg="eid=${ZEVENT_EID} class=${ZEVENT_SUBCLASS}"
+
+if [ "${ZED_SYSLOG_DISPLAY_GUIDS}" = "1" ]; then
+ [ -n "${ZEVENT_POOL_GUID}" ] && msg="${msg} pool_guid=${ZEVENT_POOL_GUID}"
+ [ -n "${ZEVENT_VDEV_GUID}" ] && msg="${msg} vdev_guid=${ZEVENT_VDEV_GUID}"
+else
+ [ -n "${ZEVENT_POOL}" ] && msg="${msg} pool='${ZEVENT_POOL}'"
+ [ -n "${ZEVENT_VDEV_PATH}" ] && msg="${msg} vdev=$(basename "${ZEVENT_VDEV_PATH}")"
+fi
+
+# log pool state if state is anything other than 'ACTIVE'
+[ -n "${ZEVENT_POOL_STATE_STR}" ] && [ "$ZEVENT_POOL_STATE" -ne 0 ] && \
+ msg="${msg} pool_state=${ZEVENT_POOL_STATE_STR}"
+
+# Log the following payload nvpairs if they are present
+[ -n "${ZEVENT_VDEV_STATE_STR}" ] && msg="${msg} vdev_state=${ZEVENT_VDEV_STATE_STR}"
+[ -n "${ZEVENT_CKSUM_ALGORITHM}" ] && msg="${msg} algorithm=${ZEVENT_CKSUM_ALGORITHM}"
+[ -n "${ZEVENT_ZIO_SIZE}" ] && msg="${msg} size=${ZEVENT_ZIO_SIZE}"
+[ -n "${ZEVENT_ZIO_OFFSET}" ] && msg="${msg} offset=${ZEVENT_ZIO_OFFSET}"
+[ -n "${ZEVENT_ZIO_PRIORITY}" ] && msg="${msg} priority=${ZEVENT_ZIO_PRIORITY}"
+[ -n "${ZEVENT_ZIO_ERR}" ] && msg="${msg} err=${ZEVENT_ZIO_ERR}"
+[ -n "${ZEVENT_ZIO_FLAGS}" ] && msg="${msg} flags=$(printf '0x%x' "${ZEVENT_ZIO_FLAGS}")"
+
+# log delays that are >= 10 milisec
+[ -n "${ZEVENT_ZIO_DELAY}" ] && [ "$ZEVENT_ZIO_DELAY" -gt 10000000 ] && \
+ msg="${msg} delay=$((ZEVENT_ZIO_DELAY / 1000000))ms"
+
+# list the bookmark data together
+[ -n "${ZEVENT_ZIO_OBJSET}" ] && \
+ msg="${msg} bookmark=${ZEVENT_ZIO_OBJSET}:${ZEVENT_ZIO_OBJECT}:${ZEVENT_ZIO_LEVEL}:${ZEVENT_ZIO_BLKID}"
+
+zed_log_msg "${msg}"
+
exit 0
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/history_event-zfs-list-cacher.sh.in b/sys/contrib/openzfs/cmd/zed/zed.d/history_event-zfs-list-cacher.sh.in
index 053b4414a768..bf5a121f6a79 100755
--- a/sys/contrib/openzfs/cmd/zed/zed.d/history_event-zfs-list-cacher.sh.in
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/history_event-zfs-list-cacher.sh.in
@@ -13,7 +13,7 @@ FSLIST="${FSLIST_DIR}/${ZEVENT_POOL}"
[ -f "${ZED_ZEDLET_DIR}/zed.rc" ] && . "${ZED_ZEDLET_DIR}/zed.rc"
. "${ZED_ZEDLET_DIR}/zed-functions.sh"
-zed_exit_if_ignoring_this_event
+[ "$ZEVENT_SUBCLASS" != "history_event" ] && exit 0
zed_check_cmd "${ZFS}" sort diff grep
# If we are acting on a snapshot, we have nothing to do
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc b/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc
index 1b220d28db20..df560f921e60 100644
--- a/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc
@@ -118,5 +118,10 @@ ZED_USE_ENCLOSURE_LEDS=1
# Otherwise, if ZED_SYSLOG_SUBCLASS_EXCLUDE is set, the
# matching subclasses are excluded from logging.
#ZED_SYSLOG_SUBCLASS_INCLUDE="checksum|scrub_*|vdev.*"
-#ZED_SYSLOG_SUBCLASS_EXCLUDE="statechange|config_*|history_event"
+ZED_SYSLOG_SUBCLASS_EXCLUDE="history_event"
+
+##
+# Use GUIDs instead of names when logging pool and vdevs
+# Disabled by default, 1 to enable and 0 to disable.
+#ZED_SYSLOG_DISPLAY_GUIDS=1
diff --git a/sys/contrib/openzfs/cmd/zfs/zfs_main.c b/sys/contrib/openzfs/cmd/zfs/zfs_main.c
index 42c180890fec..ab2b006ae460 100644
--- a/sys/contrib/openzfs/cmd/zfs/zfs_main.c
+++ b/sys/contrib/openzfs/cmd/zfs/zfs_main.c
@@ -270,7 +270,7 @@ get_usage(zfs_help_t idx)
return (gettext("\tclone [-p] [-o property=value] ... "
"<snapshot> <filesystem|volume>\n"));
case HELP_CREATE:
- return (gettext("\tcreate [-Pnpv] [-o property=value] ... "
+ return (gettext("\tcreate [-Pnpuv] [-o property=value] ... "
"<filesystem>\n"
"\tcreate [-Pnpsv] [-b blocksize] [-o property=value] ... "
"-V <size> <volume>\n"));
@@ -893,6 +893,107 @@ usage:
}
/*
+ * Return a default volblocksize for the pool which always uses more than
+ * half of the data sectors. This primarily applies to dRAID which always
+ * writes full stripe widths.
+ */
+static uint64_t
+default_volblocksize(zpool_handle_t *zhp, nvlist_t *props)
+{
+ uint64_t volblocksize, asize = SPA_MINBLOCKSIZE;
+ nvlist_t *tree, **vdevs;
+ uint_t nvdevs;
+
+ nvlist_t *config = zpool_get_config(zhp, NULL);
+
+ if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) != 0 ||
+ nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN,
+ &vdevs, &nvdevs) != 0) {
+ return (ZVOL_DEFAULT_BLOCKSIZE);
+ }
+
+ for (int i = 0; i < nvdevs; i++) {
+ nvlist_t *nv = vdevs[i];
+ uint64_t ashift, ndata, nparity;
+
+ if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &ashift) != 0)
+ continue;
+
+ if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA,
+ &ndata) == 0) {
+ /* dRAID minimum allocation width */
+ asize = MAX(asize, ndata * (1ULL << ashift));
+ } else if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
+ &nparity) == 0) {
+ /* raidz minimum allocation width */
+ if (nparity == 1)
+ asize = MAX(asize, 2 * (1ULL << ashift));
+ else
+ asize = MAX(asize, 4 * (1ULL << ashift));
+ } else {
+ /* mirror or (non-redundant) leaf vdev */
+ asize = MAX(asize, 1ULL << ashift);
+ }
+ }
+
+ /*
+ * Calculate the target volblocksize such that more than half
+ * of the asize is used. The following table is for 4k sectors.
+ *
+ * n asize blksz used | n asize blksz used
+ * -------------------------+---------------------------------
+ * 1 4,096 8,192 100% | 9 36,864 32,768 88%
+ * 2 8,192 8,192 100% | 10 40,960 32,768 80%
+ * 3 12,288 8,192 66% | 11 45,056 32,768 72%
+ * 4 16,384 16,384 100% | 12 49,152 32,768 66%
+ * 5 20,480 16,384 80% | 13 53,248 32,768 61%
+ * 6 24,576 16,384 66% | 14 57,344 32,768 57%
+ * 7 28,672 16,384 57% | 15 61,440 32,768 53%
+ * 8 32,768 32,768 100% | 16 65,536 65,636 100%
+ *
+ * This is primarily a concern for dRAID which always allocates
+ * a full stripe width. For dRAID the default stripe width is
+ * n=8 in which case the volblocksize is set to 32k. Ignoring
+ * compression there are no unused sectors. This same reasoning
+ * applies to raidz[2,3] so target 4 sectors to minimize waste.
+ */
+ uint64_t tgt_volblocksize = ZVOL_DEFAULT_BLOCKSIZE;
+ while (tgt_volblocksize * 2 <= asize)
+ tgt_volblocksize *= 2;
+
+ const char *prop = zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE);
+ if (nvlist_lookup_uint64(props, prop, &volblocksize) == 0) {
+
+ /* Issue a warning when a non-optimal size is requested. */
+ if (volblocksize < ZVOL_DEFAULT_BLOCKSIZE) {
+ (void) fprintf(stderr, gettext("Warning: "
+ "volblocksize (%llu) is less than the default "
+ "minimum block size (%llu).\nTo reduce wasted "
+ "space a volblocksize of %llu is recommended.\n"),
+ (u_longlong_t)volblocksize,
+ (u_longlong_t)ZVOL_DEFAULT_BLOCKSIZE,
+ (u_longlong_t)tgt_volblocksize);
+ } else if (volblocksize < tgt_volblocksize) {
+ (void) fprintf(stderr, gettext("Warning: "
+ "volblocksize (%llu) is much less than the "
+ "minimum allocation\nunit (%llu), which wastes "
+ "at least %llu%% of space. To reduce wasted "
+ "space,\nuse a larger volblocksize (%llu is "
+ "recommended), fewer dRAID data disks\n"
+ "per group, or smaller sector size (ashift).\n"),
+ (u_longlong_t)volblocksize, (u_longlong_t)asize,
+ (u_longlong_t)((100 * (asize - volblocksize)) /
+ asize), (u_longlong_t)tgt_volblocksize);
+ }
+ } else {
+ volblocksize = tgt_volblocksize;
+ fnvlist_add_uint64(props, prop, volblocksize);
+ }
+
+ return (volblocksize);
+}
+
+/*
* zfs create [-Pnpv] [-o prop=value] ... fs
* zfs create [-Pnpsv] [-b blocksize] [-o prop=value] ... -V vol size
*
@@ -911,6 +1012,8 @@ usage:
* check of arguments and properties, but does not check for permissions,
* available space, etc.
*
+ * The '-u' flag prevents the newly created file system from being mounted.
+ *
* The '-v' flag is for verbose output.
*
* The '-P' flag is used for parseable output. It implies '-v'.
@@ -927,17 +1030,19 @@ zfs_do_create(int argc, char **argv)
boolean_t bflag = B_FALSE;
boolean_t parents = B_FALSE;
boolean_t dryrun = B_FALSE;
+ boolean_t nomount = B_FALSE;
boolean_t verbose = B_FALSE;
boolean_t parseable = B_FALSE;
int ret = 1;
nvlist_t *props;
uint64_t intval;
+ char *strval;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
- while ((c = getopt(argc, argv, ":PV:b:nso:pv")) != -1) {
+ while ((c = getopt(argc, argv, ":PV:b:nso:puv")) != -1) {
switch (c) {
case 'V':
type = ZFS_TYPE_VOLUME;
@@ -984,6 +1089,9 @@ zfs_do_create(int argc, char **argv)
case 's':
noreserve = B_TRUE;
break;
+ case 'u':
+ nomount = B_TRUE;
+ break;
case 'v':
verbose = B_TRUE;
break;
@@ -1003,6 +1111,11 @@ zfs_do_create(int argc, char **argv)
"used when creating a volume\n"));
goto badusage;
}
+ if (nomount && type != ZFS_TYPE_FILESYSTEM) {
+ (void) fprintf(stderr, gettext("'-u' can only be "
+ "used when creating a filesystem\n"));
+ goto badusage;
+ }
argc -= optind;
argv += optind;
@@ -1018,7 +1131,7 @@ zfs_do_create(int argc, char **argv)
goto badusage;
}
- if (dryrun || (type == ZFS_TYPE_VOLUME && !noreserve)) {
+ if (dryrun || type == ZFS_TYPE_VOLUME) {
char msg[ZFS_MAX_DATASET_NAME_LEN * 2];
char *p;
@@ -1040,18 +1153,24 @@ zfs_do_create(int argc, char **argv)
}
}
- /*
- * if volsize is not a multiple of volblocksize, round it up to the
- * nearest multiple of the volblocksize
- */
if (type == ZFS_TYPE_VOLUME) {
- uint64_t volblocksize;
+ const char *prop = zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE);
+ uint64_t volblocksize = default_volblocksize(zpool_handle,
+ real_props);
- if (nvlist_lookup_uint64(props,
- zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
- &volblocksize) != 0)
- volblocksize = ZVOL_DEFAULT_BLOCKSIZE;
+ if (volblocksize != ZVOL_DEFAULT_BLOCKSIZE &&
+ nvlist_lookup_string(props, prop, &strval) != 0) {
+ if (asprintf(&strval, "%llu",
+ (u_longlong_t)volblocksize) == -1)
+ nomem();
+ nvlist_add_string(props, prop, strval);
+ free(strval);
+ }
+ /*
+ * If volsize is not a multiple of volblocksize, round it
+ * up to the nearest multiple of the volblocksize.
+ */
if (volsize % volblocksize) {
volsize = P2ROUNDUP_TYPED(volsize, volblocksize,
uint64_t);
@@ -1064,11 +1183,9 @@ zfs_do_create(int argc, char **argv)
}
}
-
if (type == ZFS_TYPE_VOLUME && !noreserve) {
uint64_t spa_version;
zfs_prop_t resv_prop;
- char *strval;
spa_version = zpool_get_prop_int(zpool_handle,
ZPOOL_PROP_VERSION, NULL);
@@ -1159,6 +1276,11 @@ zfs_do_create(int argc, char **argv)
log_history = B_FALSE;
}
+ if (nomount) {
+ ret = 0;
+ goto error;
+ }
+
ret = zfs_mount_and_share(g_zfs, argv[0], ZFS_TYPE_DATASET);
error:
nvlist_free(props);
@@ -6596,9 +6718,9 @@ share_mount_one(zfs_handle_t *zhp, int op, int flags, char *protocol,
(void) fprintf(stderr, gettext("cannot share '%s': "
"legacy share\n"), zfs_get_name(zhp));
- (void) fprintf(stderr, gettext("use share(1M) to "
- "share this filesystem, or set "
- "sharenfs property on\n"));
+ (void) fprintf(stderr, gettext("use exports(5) or "
+ "smb.conf(5) to share this filesystem, or set "
+ "the sharenfs or sharesmb property\n"));
return (1);
}
@@ -6613,7 +6735,7 @@ share_mount_one(zfs_handle_t *zhp, int op, int flags, char *protocol,
(void) fprintf(stderr, gettext("cannot %s '%s': "
"legacy mountpoint\n"), cmdname, zfs_get_name(zhp));
- (void) fprintf(stderr, gettext("use %s(1M) to "
+ (void) fprintf(stderr, gettext("use %s(8) to "
"%s this filesystem\n"), cmdname, cmdname);
return (1);
}
@@ -7416,8 +7538,8 @@ unshare_unmount(int op, int argc, char **argv)
"unshare '%s': legacy share\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use "
- "unshare(1M) to unshare this "
- "filesystem\n"));
+ "exports(5) or smb.conf(5) to unshare "
+ "this filesystem\n"));
ret = 1;
} else if (!zfs_is_shared(zhp)) {
(void) fprintf(stderr, gettext("cannot "
@@ -7435,7 +7557,7 @@ unshare_unmount(int op, int argc, char **argv)
"unmount '%s': legacy "
"mountpoint\n"), zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use "
- "umount(1M) to unmount this "
+ "umount(8) to unmount this "
"filesystem\n"));
ret = 1;
} else if (!zfs_is_mounted(zhp, NULL)) {
@@ -8370,7 +8492,7 @@ zfs_do_wait(int argc, char **argv)
{
boolean_t enabled[ZFS_WAIT_NUM_ACTIVITIES];
int error, i;
- char c;
+ int c;
/* By default, wait for all types of activity. */
for (i = 0; i < ZFS_WAIT_NUM_ACTIVITIES; i++)
diff --git a/sys/contrib/openzfs/cmd/zfs_ids_to_path/zfs_ids_to_path.c b/sys/contrib/openzfs/cmd/zfs_ids_to_path/zfs_ids_to_path.c
index 6cfaa6f41fa5..80dd5bf2dc2e 100644
--- a/sys/contrib/openzfs/cmd/zfs_ids_to_path/zfs_ids_to_path.c
+++ b/sys/contrib/openzfs/cmd/zfs_ids_to_path/zfs_ids_to_path.c
@@ -44,7 +44,7 @@ int
main(int argc, char **argv)
{
boolean_t verbose = B_FALSE;
- char c;
+ int c;
while ((c = getopt(argc, argv, "v")) != -1) {
switch (c) {
case 'v':
diff --git a/sys/contrib/openzfs/cmd/zgenhostid/zgenhostid.c b/sys/contrib/openzfs/cmd/zgenhostid/zgenhostid.c
index 562262928c77..50fcf05e420c 100644
--- a/sys/contrib/openzfs/cmd/zgenhostid/zgenhostid.c
+++ b/sys/contrib/openzfs/cmd/zgenhostid/zgenhostid.c
@@ -47,10 +47,10 @@ usage(void)
" -h\t\t print this usage and exit\n"
" -o <filename>\t write hostid to this file\n\n"
"If hostid file is not present, store a hostid in it.\n"
- "The optional value must be an 8-digit hex number between"
- "1 and 2^32-1.\n"
- "If no value is provided, a random one will"
- "be generated.\n"
+ "The optional value should be an 8-digit hex number between"
+ " 1 and 2^32-1.\n"
+ "If the value is 0 or no value is provided, a random one"
+ " will be generated.\n"
"The value must be unique among your systems.\n");
exit(EXIT_FAILURE);
/* NOTREACHED */
@@ -108,7 +108,7 @@ main(int argc, char **argv)
exit(EXIT_FAILURE);
}
- if (input_i < 0x1 || input_i > UINT32_MAX) {
+ if (input_i > UINT32_MAX) {
fprintf(stderr, "%s\n", strerror(ERANGE));
usage();
}
diff --git a/sys/contrib/openzfs/cmd/zhack/zhack.c b/sys/contrib/openzfs/cmd/zhack/zhack.c
index 4d958fe4365a..08263120c7c4 100644
--- a/sys/contrib/openzfs/cmd/zhack/zhack.c
+++ b/sys/contrib/openzfs/cmd/zhack/zhack.c
@@ -150,6 +150,7 @@ zhack_import(char *target, boolean_t readonly)
zfeature_checks_disable = B_TRUE;
error = spa_import(target, config, props,
(readonly ? ZFS_IMPORT_SKIP_MMP : ZFS_IMPORT_NORMAL));
+ fnvlist_free(config);
zfeature_checks_disable = B_FALSE;
if (error == EEXIST)
error = 0;
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_iter.c b/sys/contrib/openzfs/cmd/zpool/zpool_iter.c
index 5f3153bca2c2..d70d266699cf 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_iter.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_iter.c
@@ -56,6 +56,7 @@ typedef struct zpool_node {
struct zpool_list {
boolean_t zl_findall;
+ boolean_t zl_literal;
uu_avl_t *zl_avl;
uu_avl_pool_t *zl_pool;
zprop_list_t **zl_proplist;
@@ -88,7 +89,9 @@ add_pool(zpool_handle_t *zhp, void *data)
uu_avl_node_init(node, &node->zn_avlnode, zlp->zl_pool);
if (uu_avl_find(zlp->zl_avl, node, NULL, &idx) == NULL) {
if (zlp->zl_proplist &&
- zpool_expand_proplist(zhp, zlp->zl_proplist) != 0) {
+ zpool_expand_proplist(zhp, zlp->zl_proplist,
+ zlp->zl_literal)
+ != 0) {
zpool_close(zhp);
free(node);
return (-1);
@@ -110,7 +113,8 @@ add_pool(zpool_handle_t *zhp, void *data)
* line.
*/
zpool_list_t *
-pool_list_get(int argc, char **argv, zprop_list_t **proplist, int *err)
+pool_list_get(int argc, char **argv, zprop_list_t **proplist,
+ boolean_t literal, int *err)
{
zpool_list_t *zlp;
@@ -128,6 +132,8 @@ pool_list_get(int argc, char **argv, zprop_list_t **proplist, int *err)
zlp->zl_proplist = proplist;
+ zlp->zl_literal = literal;
+
if (argc == 0) {
(void) zpool_iter(g_zfs, add_pool, zlp);
zlp->zl_findall = B_TRUE;
@@ -242,12 +248,12 @@ pool_list_count(zpool_list_t *zlp)
*/
int
for_each_pool(int argc, char **argv, boolean_t unavail,
- zprop_list_t **proplist, zpool_iter_f func, void *data)
+ zprop_list_t **proplist, boolean_t literal, zpool_iter_f func, void *data)
{
zpool_list_t *list;
int ret = 0;
- if ((list = pool_list_get(argc, argv, proplist, &ret)) == NULL)
+ if ((list = pool_list_get(argc, argv, proplist, literal, &ret)) == NULL)
return (1);
if (pool_list_iter(list, unavail, func, data) != 0)
@@ -711,7 +717,7 @@ all_pools_for_each_vdev_run(int argc, char **argv, char *cmd,
vcdl->g_zfs = g_zfs;
/* Gather our list of all vdevs in all pools */
- for_each_pool(argc, argv, B_TRUE, NULL,
+ for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE,
all_pools_for_each_vdev_gather_cb, vcdl);
/* Run command on all vdevs in all pools */
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_main.c b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
index 83a9b5a5ac07..e00fdb7ae1b0 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_main.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
@@ -669,9 +669,16 @@ print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
}
for (c = 0; c < children; c++) {
- uint64_t is_log = B_FALSE;
+ uint64_t is_log = B_FALSE, is_hole = B_FALSE;
char *class = "";
+ (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
+ &is_hole);
+
+ if (is_hole == B_TRUE) {
+ continue;
+ }
+
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log)
@@ -692,6 +699,54 @@ print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
}
}
+/*
+ * Print the list of l2cache devices for dry runs.
+ */
+static void
+print_cache_list(nvlist_t *nv, int indent)
+{
+ nvlist_t **child;
+ uint_t c, children;
+
+ if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
+ &child, &children) == 0 && children > 0) {
+ (void) printf("\t%*s%s\n", indent, "", "cache");
+ } else {
+ return;
+ }
+ for (c = 0; c < children; c++) {
+ char *vname;
+
+ vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
+ (void) printf("\t%*s%s\n", indent + 2, "", vname);
+ free(vname);
+ }
+}
+
+/*
+ * Print the list of spares for dry runs.
+ */
+static void
+print_spare_list(nvlist_t *nv, int indent)
+{
+ nvlist_t **child;
+ uint_t c, children;
+
+ if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
+ &child, &children) == 0 && children > 0) {
+ (void) printf("\t%*s%s\n", indent, "", "spares");
+ } else {
+ return;
+ }
+ for (c = 0; c < children; c++) {
+ char *vname;
+
+ vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
+ (void) printf("\t%*s%s\n", indent + 2, "", vname);
+ free(vname);
+ }
+}
+
static boolean_t
prop_list_contains_feature(nvlist_t *proplist)
{
@@ -921,16 +976,16 @@ zpool_do_add(int argc, char **argv)
if (dryrun) {
nvlist_t *poolnvroot;
- nvlist_t **l2child;
- uint_t l2children, c;
+ nvlist_t **l2child, **sparechild;
+ uint_t l2children, sparechildren, c;
char *vname;
- boolean_t hadcache = B_FALSE;
+ boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&poolnvroot) == 0);
(void) printf(gettext("would update '%s' to the following "
- "configuration:\n"), zpool_get_name(zhp));
+ "configuration:\n\n"), zpool_get_name(zhp));
/* print original main pool and new tree */
print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
@@ -991,6 +1046,29 @@ zpool_do_add(int argc, char **argv)
free(vname);
}
}
+ /* And finaly the spares */
+ if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
+ &sparechild, &sparechildren) == 0 && sparechildren > 0) {
+ hadspare = B_TRUE;
+ (void) printf(gettext("\tspares\n"));
+ for (c = 0; c < sparechildren; c++) {
+ vname = zpool_vdev_name(g_zfs, NULL,
+ sparechild[c], name_flags);
+ (void) printf("\t %s\n", vname);
+ free(vname);
+ }
+ }
+ if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
+ &sparechild, &sparechildren) == 0 && sparechildren > 0) {
+ if (!hadspare)
+ (void) printf(gettext("\tspares\n"));
+ for (c = 0; c < sparechildren; c++) {
+ vname = zpool_vdev_name(g_zfs, NULL,
+ sparechild[c], name_flags);
+ (void) printf("\t %s\n", vname);
+ free(vname);
+ }
+ }
ret = 0;
} else {
@@ -1548,6 +1626,8 @@ zpool_do_create(int argc, char **argv)
VDEV_ALLOC_BIAS_SPECIAL, 0);
print_vdev_tree(NULL, "logs", nvroot, 0,
VDEV_ALLOC_BIAS_LOG, 0);
+ print_cache_list(nvroot, 0);
+ print_spare_list(nvroot, 0);
ret = 0;
} else {
@@ -1762,7 +1842,7 @@ zpool_do_export(int argc, char **argv)
}
return (for_each_pool(argc, argv, B_TRUE, NULL,
- zpool_export_one, &cb));
+ B_FALSE, zpool_export_one, &cb));
}
/* check arguments */
@@ -1771,7 +1851,8 @@ zpool_do_export(int argc, char **argv)
usage(B_FALSE);
}
- ret = for_each_pool(argc, argv, B_TRUE, NULL, zpool_export_one, &cb);
+ ret = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, zpool_export_one,
+ &cb);
return (ret);
}
@@ -2294,7 +2375,7 @@ print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
}
}
- /* Display vdev initialization and trim status for leaves */
+ /* Display vdev initialization and trim status for leaves. */
if (children == 0) {
print_status_initialize(vs, cb->cb_print_vdev_init);
print_status_trim(vs, cb->cb_print_vdev_trim);
@@ -3613,7 +3694,8 @@ zpool_do_sync(int argc, char **argv)
argv += optind;
/* if argc == 0 we will execute zpool_sync_one on all pools */
- ret = for_each_pool(argc, argv, B_FALSE, NULL, zpool_sync_one, &force);
+ ret = for_each_pool(argc, argv, B_FALSE, NULL, B_FALSE, zpool_sync_one,
+ &force);
return (ret);
}
@@ -4958,7 +5040,7 @@ are_vdevs_in_pool(int argc, char **argv, char *pool_name,
/* Is this name a vdev in our pools? */
ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
- is_vdev, cb);
+ B_FALSE, is_vdev, cb);
if (!ret) {
/* No match */
break;
@@ -4986,7 +5068,8 @@ is_pool_cb(zpool_handle_t *zhp, void *data)
static int
is_pool(char *name)
{
- return (for_each_pool(0, NULL, B_TRUE, NULL, is_pool_cb, name));
+ return (for_each_pool(0, NULL, B_TRUE, NULL, B_FALSE, is_pool_cb,
+ name));
}
/* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
@@ -5438,7 +5521,7 @@ zpool_do_iostat(int argc, char **argv)
* Construct the list of all interesting pools.
*/
ret = 0;
- if ((list = pool_list_get(argc, argv, NULL, &ret)) == NULL)
+ if ((list = pool_list_get(argc, argv, NULL, parsable, &ret)) == NULL)
return (1);
if (pool_list_count(list) == 0 && argc != 0) {
@@ -6112,7 +6195,7 @@ zpool_do_list(int argc, char **argv)
for (;;) {
if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
- &ret)) == NULL)
+ cb.cb_literal, &ret)) == NULL)
return (1);
if (pool_list_count(list) == 0)
@@ -6512,6 +6595,10 @@ zpool_do_split(int argc, char **argv)
"following layout:\n\n"), newpool);
print_vdev_tree(NULL, newpool, config, 0, "",
flags.name_flags);
+ print_vdev_tree(NULL, "dedup", config, 0,
+ VDEV_ALLOC_BIAS_DEDUP, 0);
+ print_vdev_tree(NULL, "special", config, 0,
+ VDEV_ALLOC_BIAS_SPECIAL, 0);
}
}
@@ -6864,7 +6951,7 @@ zpool_do_reopen(int argc, char **argv)
argv += optind;
/* if argc == 0 we will execute zpool_reopen_one on all pools */
- ret = for_each_pool(argc, argv, B_TRUE, NULL, zpool_reopen_one,
+ ret = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, zpool_reopen_one,
&scrub_restart);
return (ret);
@@ -6994,12 +7081,13 @@ zpool_do_scrub(int argc, char **argv)
usage(B_FALSE);
}
- error = for_each_pool(argc, argv, B_TRUE, NULL, scrub_callback, &cb);
+ error = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE,
+ scrub_callback, &cb);
if (wait && !error) {
zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
- error = for_each_pool(argc, argv, B_TRUE, NULL, wait_callback,
- &act);
+ error = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE,
+ wait_callback, &act);
}
return (error);
@@ -7037,7 +7125,8 @@ zpool_do_resilver(int argc, char **argv)
usage(B_FALSE);
}
- return (for_each_pool(argc, argv, B_TRUE, NULL, scrub_callback, &cb));
+ return (for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE,
+ scrub_callback, &cb));
}
/*
@@ -7590,7 +7679,7 @@ print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
vdev_name = zpool_vdev_name(g_zfs, zhp,
child[prs->prs_removing_vdev], B_TRUE);
- (void) printf(gettext("remove: "));
+ printf_color(ANSI_BOLD, gettext("remove: "));
start = prs->prs_start_time;
end = prs->prs_end_time;
@@ -8431,7 +8520,7 @@ zpool_do_status(int argc, char **argv)
cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
NULL, NULL, 0, 0);
- ret = for_each_pool(argc, argv, B_TRUE, NULL,
+ ret = for_each_pool(argc, argv, B_TRUE, NULL, cb.cb_literal,
status_callback, &cb);
if (cb.vcdl != NULL)
@@ -8950,7 +9039,7 @@ zpool_do_upgrade(int argc, char **argv)
(void) printf(gettext("\n"));
}
} else {
- ret = for_each_pool(argc, argv, B_FALSE, NULL,
+ ret = for_each_pool(argc, argv, B_FALSE, NULL, B_FALSE,
upgrade_one, &cb);
}
@@ -9036,6 +9125,12 @@ print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
dump_nvlist(fnvlist_lookup_nvlist(rec,
ZPOOL_HIST_OUTPUT_NVL), 8);
}
+ if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
+ (void) printf(" output nvlist omitted; "
+ "original size: %lldKB\n",
+ (longlong_t)fnvlist_lookup_int64(rec,
+ ZPOOL_HIST_OUTPUT_SIZE) / 1024);
+ }
if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
(void) printf(" errno: %lld\n",
(longlong_t)fnvlist_lookup_int64(rec,
@@ -9133,7 +9228,7 @@ zpool_do_history(int argc, char **argv)
argc -= optind;
argv += optind;
- ret = for_each_pool(argc, argv, B_FALSE, NULL, get_history_one,
+ ret = for_each_pool(argc, argv, B_FALSE, NULL, B_FALSE, get_history_one,
&cbdata);
if (argc == 0 && cbdata.first == B_TRUE) {
@@ -9696,7 +9791,7 @@ zpool_do_get(int argc, char **argv)
cb.cb_proplist = &fake_name;
}
- ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist,
+ ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_literal,
get_callback, &cb);
if (cb.cb_proplist == &fake_name)
@@ -9766,7 +9861,7 @@ zpool_do_set(int argc, char **argv)
*(cb.cb_value) = '\0';
cb.cb_value++;
- error = for_each_pool(argc - 2, argv + 2, B_TRUE, NULL,
+ error = for_each_pool(argc - 2, argv + 2, B_TRUE, NULL, B_FALSE,
set_callback, &cb);
return (error);
@@ -9849,7 +9944,8 @@ vdev_any_spare_replacing(nvlist_t *nv)
(void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
- strcmp(vdev_type, VDEV_TYPE_SPARE) == 0) {
+ strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
+ strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
return (B_TRUE);
}
@@ -10051,7 +10147,7 @@ int
zpool_do_wait(int argc, char **argv)
{
boolean_t verbose = B_FALSE;
- char c;
+ int c;
char *value;
int i;
unsigned long count;
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_util.h b/sys/contrib/openzfs/cmd/zpool/zpool_util.h
index 265aa58953a0..abaa22d78c20 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_util.h
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_util.h
@@ -64,7 +64,7 @@ nvlist_t *split_mirror_vdev(zpool_handle_t *zhp, char *newname,
* Pool list functions
*/
int for_each_pool(int, char **, boolean_t unavail, zprop_list_t **,
- zpool_iter_f, void *);
+ boolean_t, zpool_iter_f, void *);
/* Vdev list functions */
typedef int (*pool_vdev_iter_f)(zpool_handle_t *, nvlist_t *, void *);
@@ -72,7 +72,7 @@ int for_each_vdev(zpool_handle_t *zhp, pool_vdev_iter_f func, void *data);
typedef struct zpool_list zpool_list_t;
-zpool_list_t *pool_list_get(int, char **, zprop_list_t **, int *);
+zpool_list_t *pool_list_get(int, char **, zprop_list_t **, boolean_t, int *);
void pool_list_update(zpool_list_t *);
int pool_list_iter(zpool_list_t *, int unavail, zpool_iter_f, void *);
void pool_list_free(zpool_list_t *);
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c b/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
index 9aa09b18c4ae..c86081a8153a 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_vdev.c
@@ -86,9 +86,6 @@
boolean_t error_seen;
boolean_t is_force;
-
-
-
/*PRINTFLIKE1*/
void
vdev_error(const char *fmt, ...)
@@ -222,6 +219,9 @@ is_spare(nvlist_t *config, const char *path)
uint_t i, nspares;
boolean_t inuse;
+ if (zpool_is_draid_spare(path))
+ return (B_TRUE);
+
if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
return (B_FALSE);
@@ -267,9 +267,10 @@ is_spare(nvlist_t *config, const char *path)
* /dev/xxx Complete disk path
* /xxx Full path to file
* xxx Shorthand for <zfs_vdev_paths>/xxx
+ * draid* Virtual dRAID spare
*/
static nvlist_t *
-make_leaf_vdev(nvlist_t *props, const char *arg, uint64_t is_log)
+make_leaf_vdev(nvlist_t *props, const char *arg, boolean_t is_primary)
{
char path[MAXPATHLEN];
struct stat64 statbuf;
@@ -309,6 +310,17 @@ make_leaf_vdev(nvlist_t *props, const char *arg, uint64_t is_log)
/* After whole disk check restore original passed path */
strlcpy(path, arg, sizeof (path));
+ } else if (zpool_is_draid_spare(arg)) {
+ if (!is_primary) {
+ (void) fprintf(stderr,
+ gettext("cannot open '%s': dRAID spares can only "
+ "be used to replace primary vdevs\n"), arg);
+ return (NULL);
+ }
+
+ wholedisk = B_TRUE;
+ strlcpy(path, arg, sizeof (path));
+ type = VDEV_TYPE_DRAID_SPARE;
} else {
err = is_shorthand_path(arg, path, sizeof (path),
&statbuf, &wholedisk);
@@ -337,17 +349,19 @@ make_leaf_vdev(nvlist_t *props, const char *arg, uint64_t is_log)
}
}
- /*
- * Determine whether this is a device or a file.
- */
- if (wholedisk || S_ISBLK(statbuf.st_mode)) {
- type = VDEV_TYPE_DISK;
- } else if (S_ISREG(statbuf.st_mode)) {
- type = VDEV_TYPE_FILE;
- } else {
- (void) fprintf(stderr, gettext("cannot use '%s': must be a "
- "block device or regular file\n"), path);
- return (NULL);
+ if (type == NULL) {
+ /*
+ * Determine whether this is a device or a file.
+ */
+ if (wholedisk || S_ISBLK(statbuf.st_mode)) {
+ type = VDEV_TYPE_DISK;
+ } else if (S_ISREG(statbuf.st_mode)) {
+ type = VDEV_TYPE_FILE;
+ } else {
+ fprintf(stderr, gettext("cannot use '%s': must "
+ "be a block device or regular file\n"), path);
+ return (NULL);
+ }
}
/*
@@ -358,10 +372,7 @@ make_leaf_vdev(nvlist_t *props, const char *arg, uint64_t is_log)
verify(nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) == 0);
verify(nvlist_add_string(vdev, ZPOOL_CONFIG_PATH, path) == 0);
verify(nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, type) == 0);
- verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_LOG, is_log) == 0);
- if (is_log)
- verify(nvlist_add_string(vdev, ZPOOL_CONFIG_ALLOCATION_BIAS,
- VDEV_ALLOC_BIAS_LOG) == 0);
+
if (strcmp(type, VDEV_TYPE_DISK) == 0)
verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK,
(uint64_t)wholedisk) == 0);
@@ -432,11 +443,16 @@ typedef struct replication_level {
#define ZPOOL_FUZZ (16 * 1024 * 1024)
+/*
+ * N.B. For the purposes of comparing replication levels dRAID can be
+ * considered functionally equivilant to raidz.
+ */
static boolean_t
is_raidz_mirror(replication_level_t *a, replication_level_t *b,
replication_level_t **raidz, replication_level_t **mirror)
{
- if (strcmp(a->zprl_type, "raidz") == 0 &&
+ if ((strcmp(a->zprl_type, "raidz") == 0 ||
+ strcmp(a->zprl_type, "draid") == 0) &&
strcmp(b->zprl_type, "mirror") == 0) {
*raidz = a;
*mirror = b;
@@ -446,6 +462,22 @@ is_raidz_mirror(replication_level_t *a, replication_level_t *b,
}
/*
+ * Comparison for determining if dRAID and raidz where passed in either order.
+ */
+static boolean_t
+is_raidz_draid(replication_level_t *a, replication_level_t *b)
+{
+ if ((strcmp(a->zprl_type, "raidz") == 0 ||
+ strcmp(a->zprl_type, "draid") == 0) &&
+ (strcmp(b->zprl_type, "raidz") == 0 ||
+ strcmp(b->zprl_type, "draid") == 0)) {
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+/*
* Given a list of toplevel vdevs, return the current replication level. If
* the config is inconsistent, then NULL is returned. If 'fatal' is set, then
* an error message will be displayed for each self-inconsistent vdev.
@@ -511,7 +543,8 @@ get_replication(nvlist_t *nvroot, boolean_t fatal)
rep.zprl_type = type;
rep.zprl_children = 0;
- if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
+ if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
+ strcmp(type, VDEV_TYPE_DRAID) == 0) {
verify(nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_NPARITY,
&rep.zprl_parity) == 0);
@@ -677,6 +710,29 @@ get_replication(nvlist_t *nvroot, boolean_t fatal)
else
return (NULL);
}
+ } else if (is_raidz_draid(&lastrep, &rep)) {
+ /*
+ * Accepted raidz and draid when they can
+ * handle the same number of disk failures.
+ */
+ if (lastrep.zprl_parity != rep.zprl_parity) {
+ if (ret != NULL)
+ free(ret);
+ ret = NULL;
+ if (fatal)
+ vdev_error(gettext(
+ "mismatched replication "
+ "level: %s and %s vdevs "
+ "with different "
+ "redundancy, %llu vs. "
+ "%llu are present\n"),
+ lastrep.zprl_type,
+ rep.zprl_type,
+ lastrep.zprl_parity,
+ rep.zprl_parity);
+ else
+ return (NULL);
+ }
} else if (strcmp(lastrep.zprl_type, rep.zprl_type) !=
0) {
if (ret != NULL)
@@ -1103,31 +1159,87 @@ is_device_in_use(nvlist_t *config, nvlist_t *nv, boolean_t force,
return (anyinuse);
}
-static const char *
-is_grouping(const char *type, int *mindev, int *maxdev)
+/*
+ * Returns the parity level extracted from a raidz or draid type.
+ * If the parity cannot be determined zero is returned.
+ */
+static int
+get_parity(const char *type)
{
- if (strncmp(type, "raidz", 5) == 0) {
- const char *p = type + 5;
- char *end;
- long nparity;
+ long parity = 0;
+ const char *p;
+
+ if (strncmp(type, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0) {
+ p = type + strlen(VDEV_TYPE_RAIDZ);
if (*p == '\0') {
- nparity = 1;
+ /* when unspecified default to single parity */
+ return (1);
} else if (*p == '0') {
- return (NULL); /* no zero prefixes allowed */
+ /* no zero prefixes allowed */
+ return (0);
} else {
+ /* 0-3, no suffixes allowed */
+ char *end;
errno = 0;
- nparity = strtol(p, &end, 10);
- if (errno != 0 || nparity < 1 || nparity >= 255 ||
- *end != '\0')
- return (NULL);
+ parity = strtol(p, &end, 10);
+ if (errno != 0 || *end != '\0' ||
+ parity < 1 || parity > VDEV_RAIDZ_MAXPARITY) {
+ return (0);
+ }
+ }
+ } else if (strncmp(type, VDEV_TYPE_DRAID,
+ strlen(VDEV_TYPE_DRAID)) == 0) {
+ p = type + strlen(VDEV_TYPE_DRAID);
+
+ if (*p == '\0' || *p == ':') {
+ /* when unspecified default to single parity */
+ return (1);
+ } else if (*p == '0') {
+ /* no zero prefixes allowed */
+ return (0);
+ } else {
+ /* 0-3, allowed suffixes: '\0' or ':' */
+ char *end;
+ errno = 0;
+ parity = strtol(p, &end, 10);
+ if (errno != 0 ||
+ parity < 1 || parity > VDEV_DRAID_MAXPARITY ||
+ (*end != '\0' && *end != ':')) {
+ return (0);
+ }
}
+ }
+
+ return ((int)parity);
+}
+
+/*
+ * Assign the minimum and maximum number of devices allowed for
+ * the specified type. On error NULL is returned, otherwise the
+ * type prefix is returned (raidz, mirror, etc).
+ */
+static const char *
+is_grouping(const char *type, int *mindev, int *maxdev)
+{
+ int nparity;
+ if (strncmp(type, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
+ strncmp(type, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0) {
+ nparity = get_parity(type);
+ if (nparity == 0)
+ return (NULL);
if (mindev != NULL)
*mindev = nparity + 1;
if (maxdev != NULL)
*maxdev = 255;
- return (VDEV_TYPE_RAIDZ);
+
+ if (strncmp(type, VDEV_TYPE_RAIDZ,
+ strlen(VDEV_TYPE_RAIDZ)) == 0) {
+ return (VDEV_TYPE_RAIDZ);
+ } else {
+ return (VDEV_TYPE_DRAID);
+ }
}
if (maxdev != NULL)
@@ -1168,6 +1280,163 @@ is_grouping(const char *type, int *mindev, int *maxdev)
}
/*
+ * Extract the configuration parameters encoded in the dRAID type and
+ * use them to generate a dRAID configuration. The expected format is:
+ *
+ * draid[<parity>][:<data><d|D>][:<children><c|C>][:<spares><s|S>]
+ *
+ * The intent is to be able to generate a good configuration when no
+ * additional information is provided. The only mandatory component
+ * of the 'type' is the 'draid' prefix. If a value is not provided
+ * then reasonable defaults are used. The optional components may
+ * appear in any order but the d/s/c suffix is required.
+ *
+ * Valid inputs:
+ * - data: number of data devices per group (1-255)
+ * - parity: number of parity blocks per group (1-3)
+ * - spares: number of distributed spare (0-100)
+ * - children: total number of devices (1-255)
+ *
+ * Examples:
+ * - zpool create tank draid <devices...>
+ * - zpool create tank draid2:8d:51c:2s <devices...>
+ */
+static int
+draid_config_by_type(nvlist_t *nv, const char *type, uint64_t children)
+{
+ uint64_t nparity = 1;
+ uint64_t nspares = 0;
+ uint64_t ndata = UINT64_MAX;
+ uint64_t ngroups = 1;
+ long value;
+
+ if (strncmp(type, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) != 0)
+ return (EINVAL);
+
+ nparity = (uint64_t)get_parity(type);
+ if (nparity == 0)
+ return (EINVAL);
+
+ char *p = (char *)type;
+ while ((p = strchr(p, ':')) != NULL) {
+ char *end;
+
+ p = p + 1;
+ errno = 0;
+
+ if (!isdigit(p[0])) {
+ (void) fprintf(stderr, gettext("invalid dRAID "
+ "syntax; expected [:<number><c|d|s>] not '%s'\n"),
+ type);
+ return (EINVAL);
+ }
+
+ /* Expected non-zero value with c/d/s suffix */
+ value = strtol(p, &end, 10);
+ char suffix = tolower(*end);
+ if (errno != 0 ||
+ (suffix != 'c' && suffix != 'd' && suffix != 's')) {
+ (void) fprintf(stderr, gettext("invalid dRAID "
+ "syntax; expected [:<number><c|d|s>] not '%s'\n"),
+ type);
+ return (EINVAL);
+ }
+
+ if (suffix == 'c') {
+ if ((uint64_t)value != children) {
+ fprintf(stderr,
+ gettext("invalid number of dRAID children; "
+ "%llu required but %llu provided\n"),
+ (u_longlong_t)value,
+ (u_longlong_t)children);
+ return (EINVAL);
+ }
+ } else if (suffix == 'd') {
+ ndata = (uint64_t)value;
+ } else if (suffix == 's') {
+ nspares = (uint64_t)value;
+ } else {
+ verify(0); /* Unreachable */
+ }
+ }
+
+ /*
+ * When a specific number of data disks is not provided limit a
+ * redundancy group to 8 data disks. This value was selected to
+ * provide a reasonable tradeoff between capacity and performance.
+ */
+ if (ndata == UINT64_MAX) {
+ if (children > nspares + nparity) {
+ ndata = MIN(children - nspares - nparity, 8);
+ } else {
+ fprintf(stderr, gettext("request number of "
+ "distributed spares %llu and parity level %llu\n"
+ "leaves no disks available for data\n"),
+ (u_longlong_t)nspares, (u_longlong_t)nparity);
+ return (EINVAL);
+ }
+ }
+
+ /* Verify the maximum allowed group size is never exceeded. */
+ if (ndata == 0 || (ndata + nparity > children - nspares)) {
+ fprintf(stderr, gettext("requested number of dRAID data "
+ "disks per group %llu is too high,\nat most %llu disks "
+ "are available for data\n"), (u_longlong_t)ndata,
+ (u_longlong_t)(children - nspares - nparity));
+ return (EINVAL);
+ }
+
+ if (nparity == 0 || nparity > VDEV_DRAID_MAXPARITY) {
+ fprintf(stderr,
+ gettext("invalid dRAID parity level %llu; must be "
+ "between 1 and %d\n"), (u_longlong_t)nparity,
+ VDEV_DRAID_MAXPARITY);
+ return (EINVAL);
+ }
+
+ /*
+ * Verify the requested number of spares can be satisfied.
+ * An arbitrary limit of 100 distributed spares is applied.
+ */
+ if (nspares > 100 || nspares > (children - (ndata + nparity))) {
+ fprintf(stderr,
+ gettext("invalid number of dRAID spares %llu; additional "
+ "disks would be required\n"), (u_longlong_t)nspares);
+ return (EINVAL);
+ }
+
+ /* Verify the requested number children is sufficient. */
+ if (children < (ndata + nparity + nspares)) {
+ fprintf(stderr, gettext("%llu disks were provided, but at "
+ "least %llu disks are required for this config\n"),
+ (u_longlong_t)children,
+ (u_longlong_t)(ndata + nparity + nspares));
+ }
+
+ if (children > VDEV_DRAID_MAX_CHILDREN) {
+ fprintf(stderr, gettext("%llu disks were provided, but "
+ "dRAID only supports up to %u disks"),
+ (u_longlong_t)children, VDEV_DRAID_MAX_CHILDREN);
+ }
+
+ /*
+ * Calculate the minimum number of groups required to fill a slice.
+ * This is the LCM of the stripe width (ndata + nparity) and the
+ * number of data drives (children - nspares).
+ */
+ while (ngroups * (ndata + nparity) % (children - nspares) != 0)
+ ngroups++;
+
+ /* Store the basic dRAID configuration. */
+ fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, nparity);
+ fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, ndata);
+ fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, nspares);
+ fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, ngroups);
+
+ return (0);
+}
+
+/*
* Construct a syntactically valid vdev specification,
* and ensure that all devices and files exist and can be opened.
* Note: we don't bother freeing anything in the error paths
@@ -1178,8 +1447,8 @@ construct_spec(nvlist_t *props, int argc, char **argv)
{
nvlist_t *nvroot, *nv, **top, **spares, **l2cache;
int t, toplevels, mindev, maxdev, nspares, nlogs, nl2cache;
- const char *type;
- uint64_t is_log, is_special, is_dedup;
+ const char *type, *fulltype;
+ boolean_t is_log, is_special, is_dedup, is_spare;
boolean_t seen_logs;
top = NULL;
@@ -1189,18 +1458,20 @@ construct_spec(nvlist_t *props, int argc, char **argv)
nspares = 0;
nlogs = 0;
nl2cache = 0;
- is_log = is_special = is_dedup = B_FALSE;
+ is_log = is_special = is_dedup = is_spare = B_FALSE;
seen_logs = B_FALSE;
nvroot = NULL;
while (argc > 0) {
+ fulltype = argv[0];
nv = NULL;
/*
- * If it's a mirror or raidz, the subsequent arguments are
- * its leaves -- until we encounter the next mirror or raidz.
+ * If it's a mirror, raidz, or draid the subsequent arguments
+ * are its leaves -- until we encounter the next mirror,
+ * raidz or draid.
*/
- if ((type = is_grouping(argv[0], &mindev, &maxdev)) != NULL) {
+ if ((type = is_grouping(fulltype, &mindev, &maxdev)) != NULL) {
nvlist_t **child = NULL;
int c, children = 0;
@@ -1212,6 +1483,7 @@ construct_spec(nvlist_t *props, int argc, char **argv)
"specified only once\n"));
goto spec_out;
}
+ is_spare = B_TRUE;
is_log = is_special = is_dedup = B_FALSE;
}
@@ -1225,8 +1497,7 @@ construct_spec(nvlist_t *props, int argc, char **argv)
}
seen_logs = B_TRUE;
is_log = B_TRUE;
- is_special = B_FALSE;
- is_dedup = B_FALSE;
+ is_special = is_dedup = is_spare = B_FALSE;
argc--;
argv++;
/*
@@ -1238,8 +1509,7 @@ construct_spec(nvlist_t *props, int argc, char **argv)
if (strcmp(type, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
is_special = B_TRUE;
- is_log = B_FALSE;
- is_dedup = B_FALSE;
+ is_log = is_dedup = is_spare = B_FALSE;
argc--;
argv++;
continue;
@@ -1247,8 +1517,7 @@ construct_spec(nvlist_t *props, int argc, char **argv)
if (strcmp(type, VDEV_ALLOC_BIAS_DEDUP) == 0) {
is_dedup = B_TRUE;
- is_log = B_FALSE;
- is_special = B_FALSE;
+ is_log = is_special = is_spare = B_FALSE;
argc--;
argv++;
continue;
@@ -1262,7 +1531,8 @@ construct_spec(nvlist_t *props, int argc, char **argv)
"specified only once\n"));
goto spec_out;
}
- is_log = is_special = is_dedup = B_FALSE;
+ is_log = is_special = B_FALSE;
+ is_dedup = is_spare = B_FALSE;
}
if (is_log || is_special || is_dedup) {
@@ -1280,13 +1550,15 @@ construct_spec(nvlist_t *props, int argc, char **argv)
for (c = 1; c < argc; c++) {
if (is_grouping(argv[c], NULL, NULL) != NULL)
break;
+
children++;
child = realloc(child,
children * sizeof (nvlist_t *));
if (child == NULL)
zpool_no_memory();
if ((nv = make_leaf_vdev(props, argv[c],
- B_FALSE)) == NULL) {
+ !(is_log || is_special || is_dedup ||
+ is_spare))) == NULL) {
for (c = 0; c < children - 1; c++)
nvlist_free(child[c]);
free(child);
@@ -1335,10 +1607,11 @@ construct_spec(nvlist_t *props, int argc, char **argv)
type) == 0);
verify(nvlist_add_uint64(nv,
ZPOOL_CONFIG_IS_LOG, is_log) == 0);
- if (is_log)
+ if (is_log) {
verify(nvlist_add_string(nv,
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_LOG) == 0);
+ }
if (is_special) {
verify(nvlist_add_string(nv,
ZPOOL_CONFIG_ALLOCATION_BIAS,
@@ -1354,6 +1627,15 @@ construct_spec(nvlist_t *props, int argc, char **argv)
ZPOOL_CONFIG_NPARITY,
mindev - 1) == 0);
}
+ if (strcmp(type, VDEV_TYPE_DRAID) == 0) {
+ if (draid_config_by_type(nv,
+ fulltype, children) != 0) {
+ for (c = 0; c < children; c++)
+ nvlist_free(child[c]);
+ free(child);
+ goto spec_out;
+ }
+ }
verify(nvlist_add_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, child,
children) == 0);
@@ -1367,12 +1649,19 @@ construct_spec(nvlist_t *props, int argc, char **argv)
* We have a device. Pass off to make_leaf_vdev() to
* construct the appropriate nvlist describing the vdev.
*/
- if ((nv = make_leaf_vdev(props, argv[0],
- is_log)) == NULL)
+ if ((nv = make_leaf_vdev(props, argv[0], !(is_log ||
+ is_special || is_dedup || is_spare))) == NULL)
goto spec_out;
- if (is_log)
+ verify(nvlist_add_uint64(nv,
+ ZPOOL_CONFIG_IS_LOG, is_log) == 0);
+ if (is_log) {
+ verify(nvlist_add_string(nv,
+ ZPOOL_CONFIG_ALLOCATION_BIAS,
+ VDEV_ALLOC_BIAS_LOG) == 0);
nlogs++;
+ }
+
if (is_special) {
verify(nvlist_add_string(nv,
ZPOOL_CONFIG_ALLOCATION_BIAS,
diff --git a/sys/contrib/openzfs/cmd/zpool_influxdb/.gitignore b/sys/contrib/openzfs/cmd/zpool_influxdb/.gitignore
new file mode 100644
index 000000000000..bd765d188278
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zpool_influxdb/.gitignore
@@ -0,0 +1 @@
+/zpool_influxdb
diff --git a/sys/contrib/openzfs/cmd/zpool_influxdb/Makefile.am b/sys/contrib/openzfs/cmd/zpool_influxdb/Makefile.am
new file mode 100644
index 000000000000..28e94d616e61
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zpool_influxdb/Makefile.am
@@ -0,0 +1,11 @@
+include $(top_srcdir)/config/Rules.am
+
+zfsexec_PROGRAMS = zpool_influxdb
+
+zpool_influxdb_SOURCES = \
+ zpool_influxdb.c
+
+zpool_influxdb_LDADD = \
+ $(top_builddir)/lib/libspl/libspl.la \
+ $(top_builddir)/lib/libnvpair/libnvpair.la \
+ $(top_builddir)/lib/libzfs/libzfs.la
diff --git a/sys/contrib/openzfs/cmd/zpool_influxdb/README.md b/sys/contrib/openzfs/cmd/zpool_influxdb/README.md
new file mode 100644
index 000000000000..864d67498325
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zpool_influxdb/README.md
@@ -0,0 +1,294 @@
+# Influxdb Metrics for ZFS Pools
+The _zpool_influxdb_ program produces
+[influxdb](https://github.com/influxdata/influxdb) line protocol
+compatible metrics from zpools. In the UNIX tradition, _zpool_influxdb_
+does one thing: read statistics from a pool and print them to
+stdout. In many ways, this is a metrics-friendly output of
+statistics normally observed via the `zpool` command.
+
+## Usage
+When run without arguments, _zpool_influxdb_ runs once, reading data
+from all imported pools, and prints to stdout.
+```shell
+zpool_influxdb [options] [poolname]
+```
+If no poolname is specified, then all pools are sampled.
+
+| option | short option | description |
+|---|---|---|
+| --execd | -e | For use with telegraf's `execd` plugin. When [enter] is pressed, the pools are sampled. To exit, use [ctrl+D] |
+| --no-histogram | -n | Do not print histogram information |
+| --signed-int | -i | Use signed integer data type (default=unsigned) |
+| --sum-histogram-buckets | -s | Sum histogram bucket values |
+| --tags key=value[,key=value...] | -t | Add tags to data points. No tag sanity checking is performed. |
+| --help | -h | Print a short usage message |
+
+#### Histogram Bucket Values
+The histogram data collected by ZFS is stored as independent bucket values.
+This works well out-of-the-box with an influxdb data source and grafana's
+heatmap visualization. The influxdb query for a grafana heatmap
+visualization looks like:
+```
+field(disk_read) last() non_negative_derivative(1s)
+```
+
+Another method for storing histogram data sums the values for lower-value
+buckets. For example, a latency bucket tagged "le=10" includes the values
+in the bucket "le=1".
+This method is often used for prometheus histograms.
+The `zpool_influxdb --sum-histogram-buckets` option presents the data from ZFS
+as summed values.
+
+## Measurements
+The following measurements are collected:
+
+| measurement | description | zpool equivalent |
+|---|---|---|
+| zpool_stats | general size and data | zpool list |
+| zpool_scan_stats | scrub, rebuild, and resilver statistics (omitted if no scan has been requested) | zpool status |
+| zpool_vdev_stats | per-vdev statistics | zpool iostat -q |
+| zpool_io_size | per-vdev I/O size histogram | zpool iostat -r |
+| zpool_latency | per-vdev I/O latency histogram | zpool iostat -w |
+| zpool_vdev_queue | per-vdev instantaneous queue depth | zpool iostat -q |
+
+### zpool_stats Description
+zpool_stats contains top-level summary statistics for the pool.
+Performance counters measure the I/Os to the pool's devices.
+
+#### zpool_stats Tags
+
+| label | description |
+|---|---|
+| name | pool name |
+| path | for leaf vdevs, the pathname |
+| state | pool state, as shown by _zpool status_ |
+| vdev | vdev name (root = entire pool) |
+
+#### zpool_stats Fields
+
+| field | units | description |
+|---|---|---|
+| alloc | bytes | allocated space |
+| free | bytes | unallocated space |
+| size | bytes | total pool size |
+| read_bytes | bytes | bytes read since pool import |
+| read_errors | count | number of read errors |
+| read_ops | count | number of read operations |
+| write_bytes | bytes | bytes written since pool import |
+| write_errors | count | number of write errors |
+| write_ops | count | number of write operations |
+
+### zpool_scan_stats Description
+Once a pool has been scrubbed, resilvered, or rebuilt, the zpool_scan_stats
+contain information about the status and performance of the operation.
+Otherwise, the zpool_scan_stats do not exist in the kernel, and therefore
+cannot be reported by this collector.
+
+#### zpool_scan_stats Tags
+
+| label | description |
+|---|---|
+| name | pool name |
+| function | name of the scan function running or recently completed |
+| state | scan state, as shown by _zpool status_ |
+
+#### zpool_scan_stats Fields
+
+| field | units | description |
+|---|---|---|
+| errors | count | number of errors encountered by scan |
+| examined | bytes | total data examined during scan |
+| to_examine | bytes | prediction of total bytes to be scanned |
+| pass_examined | bytes | data examined during current scan pass |
+| issued | bytes | size of I/Os issued to disks |
+| pass_issued | bytes | size of I/Os issued to disks for current pass |
+| processed | bytes | data reconstructed during scan |
+| to_process | bytes | total bytes to be repaired |
+| rate | bytes/sec | examination rate |
+| start_ts | epoch timestamp | start timestamp for scan |
+| pause_ts | epoch timestamp | timestamp for a scan pause request |
+| end_ts | epoch timestamp | completion timestamp for scan |
+| paused_t | seconds | elapsed time while paused |
+| remaining_t | seconds | estimate of time remaining for scan |
+
+### zpool_vdev_stats Description
+The ZFS I/O (ZIO) scheduler uses five queues to schedule I/Os to each vdev.
+These queues are further divided into active and pending states.
+An I/O is pending prior to being issued to the vdev. An active
+I/O has been issued to the vdev. The scheduler and its tunable
+parameters are described at the
+[ZFS documentation for ZIO Scheduler]
+(https://openzfs.github.io/openzfs-docs/Performance%20and%20Tuning/ZIO%20Scheduler.html)
+The ZIO scheduler reports the queue depths as gauges where the value
+represents an instantaneous snapshot of the queue depth at
+the sample time. Therefore, it is not unusual to see all zeroes
+for an idle pool.
+
+#### zpool_vdev_stats Tags
+| label | description |
+|---|---|
+| name | pool name |
+| vdev | vdev name (root = entire pool) |
+
+#### zpool_vdev_stats Fields
+| field | units | description |
+|---|---|---|
+| sync_r_active_queue | entries | synchronous read active queue depth |
+| sync_w_active_queue | entries | synchronous write active queue depth |
+| async_r_active_queue | entries | asynchronous read active queue depth |
+| async_w_active_queue | entries | asynchronous write active queue depth |
+| async_scrub_active_queue | entries | asynchronous scrub active queue depth |
+| sync_r_pend_queue | entries | synchronous read pending queue depth |
+| sync_w_pend_queue | entries | synchronous write pending queue depth |
+| async_r_pend_queue | entries | asynchronous read pending queue depth |
+| async_w_pend_queue | entries | asynchronous write pending queue depth |
+| async_scrub_pend_queue | entries | asynchronous scrub pending queue depth |
+
+### zpool_latency Histogram
+ZFS tracks the latency of each I/O in the ZIO pipeline. This latency can
+be useful for observing latency-related issues that are not easily observed
+using the averaged latency statistics.
+
+The histogram fields show cumulative values from lowest to highest.
+The largest bucket is tagged "le=+Inf", representing the total count
+of I/Os by type and vdev.
+
+#### zpool_latency Histogram Tags
+| label | description |
+|---|---|
+| le | bucket for histogram, latency is less than or equal to bucket value in seconds |
+| name | pool name |
+| path | for leaf vdevs, the device path name, otherwise omitted |
+| vdev | vdev name (root = entire pool) |
+
+#### zpool_latency Histogram Fields
+| field | units | description |
+|---|---|---|
+| total_read | operations | read operations of all types |
+| total_write | operations | write operations of all types |
+| disk_read | operations | disk read operations |
+| disk_write | operations | disk write operations |
+| sync_read | operations | ZIO sync reads |
+| sync_write | operations | ZIO sync writes |
+| async_read | operations | ZIO async reads|
+| async_write | operations | ZIO async writes |
+| scrub | operations | ZIO scrub/scan reads |
+| trim | operations | ZIO trim (aka unmap) writes |
+
+### zpool_io_size Histogram
+ZFS tracks I/O throughout the ZIO pipeline. The size of each I/O is used
+to create a histogram of the size by I/O type and vdev. For example, a
+4KiB write to mirrored pool will show a 4KiB write to the top-level vdev
+(root) and a 4KiB write to each of the mirror leaf vdevs.
+
+The ZIO pipeline can aggregate I/O operations. For example, a contiguous
+series of writes can be aggregated into a single, larger I/O to the leaf
+vdev. The independent I/O operations reflect the logical operations and
+the aggregated I/O operations reflect the physical operations.
+
+The histogram fields show cumulative values from lowest to highest.
+The largest bucket is tagged "le=+Inf", representing the total count
+of I/Os by type and vdev.
+
+Note: trim I/Os can be larger than 16MiB, but the larger sizes are
+accounted in the 16MiB bucket.
+
+#### zpool_io_size Histogram Tags
+| label | description |
+|---|---|
+| le | bucket for histogram, I/O size is less than or equal to bucket value in bytes |
+| name | pool name |
+| path | for leaf vdevs, the device path name, otherwise omitted |
+| vdev | vdev name (root = entire pool) |
+
+#### zpool_io_size Histogram Fields
+| field | units | description |
+|---|---|---|
+| sync_read_ind | blocks | independent sync reads |
+| sync_write_ind | blocks | independent sync writes |
+| async_read_ind | blocks | independent async reads |
+| async_write_ind | blocks | independent async writes |
+| scrub_read_ind | blocks | independent scrub/scan reads |
+| trim_write_ind | blocks | independent trim (aka unmap) writes |
+| sync_read_agg | blocks | aggregated sync reads |
+| sync_write_agg | blocks | aggregated sync writes |
+| async_read_agg | blocks | aggregated async reads |
+| async_write_agg | blocks | aggregated async writes |
+| scrub_read_agg | blocks | aggregated scrub/scan reads |
+| trim_write_agg | blocks | aggregated trim (aka unmap) writes |
+
+#### About unsigned integers
+Telegraf v1.6.2 and later support unsigned 64-bit integers which more
+closely matches the uint64_t values used by ZFS. By default, zpool_influxdb
+uses ZFS' uint64_t values and influxdb line protocol unsigned integer type.
+If you are using old telegraf or influxdb where unsigned integers are not
+available, use the `--signed-int` option.
+
+## Using _zpool_influxdb_
+
+The simplest method is to use the execd input agent in telegraf. For older
+versions of telegraf which lack execd, the exec input agent can be used.
+For convenience, one of the sample config files below can be placed in the
+telegraf config-directory (often /etc/telegraf/telegraf.d). Telegraf can
+be restarted to read the config-directory files.
+
+### Example telegraf execd configuration
+```toml
+# # Read metrics from zpool_influxdb
+[[inputs.execd]]
+# ## default installation location for zpool_influxdb command
+ command = ["/usr/libexec/zfs/zpool_influxdb", "--execd"]
+
+ ## Define how the process is signaled on each collection interval.
+ ## Valid values are:
+ ## "none" : Do not signal anything. (Recommended for service inputs)
+ ## The process must output metrics by itself.
+ ## "STDIN" : Send a newline on STDIN. (Recommended for gather inputs)
+ ## "SIGHUP" : Send a HUP signal. Not available on Windows. (not recommended)
+ ## "SIGUSR1" : Send a USR1 signal. Not available on Windows.
+ ## "SIGUSR2" : Send a USR2 signal. Not available on Windows.
+ signal = "STDIN"
+
+ ## Delay before the process is restarted after an unexpected termination
+ restart_delay = "10s"
+
+ ## Data format to consume.
+ ## Each data format has its own unique set of configuration options, read
+ ## more about them here:
+ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+ data_format = "influx"
+```
+
+### Example telegraf exec configuration
+```toml
+# # Read metrics from zpool_influxdb
+[[inputs.exec]]
+# ## default installation location for zpool_influxdb command
+ commands = ["/usr/libexec/zfs/zpool_influxdb"]
+ data_format = "influx"
+```
+
+## Caveat Emptor
+* Like the _zpool_ command, _zpool_influxdb_ takes a reader
+ lock on spa_config for each imported pool. If this lock blocks,
+ then the command will also block indefinitely and might be
+ unkillable. This is not a normal condition, but can occur if
+ there are bugs in the kernel modules.
+ For this reason, care should be taken:
+ * avoid spawning many of these commands hoping that one might
+ finish
+ * avoid frequent updates or short sample time
+ intervals, because the locks can interfere with the performance
+ of other instances of _zpool_ or _zpool_influxdb_
+
+## Other collectors
+There are a few other collectors for zpool statistics roaming around
+the Internet. Many attempt to screen-scrape `zpool` output in various
+ways. The screen-scrape method works poorly for `zpool` output because
+of its human-friendly nature. Also, they suffer from the same caveats
+as this implementation. This implementation is optimized for directly
+collecting the metrics and is much more efficient than the screen-scrapers.
+
+## Feedback Encouraged
+Pull requests and issues are greatly appreciated at
+https://github.com/openzfs/zfs
diff --git a/sys/contrib/openzfs/cmd/zpool_influxdb/dashboards/README.md b/sys/contrib/openzfs/cmd/zpool_influxdb/dashboards/README.md
new file mode 100644
index 000000000000..2fdbe49834ff
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zpool_influxdb/dashboards/README.md
@@ -0,0 +1,3 @@
+### Dashboards for zpool_influxdb
+This directory contains a collection of dashboards related to ZFS with data
+collected from the zpool_influxdb collector.
diff --git a/sys/contrib/openzfs/cmd/zpool_influxdb/dashboards/grafana/ZFS-pool-latency-heatmaps-influxdb.json b/sys/contrib/openzfs/cmd/zpool_influxdb/dashboards/grafana/ZFS-pool-latency-heatmaps-influxdb.json
new file mode 100644
index 000000000000..a99f92783bc4
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zpool_influxdb/dashboards/grafana/ZFS-pool-latency-heatmaps-influxdb.json
@@ -0,0 +1,1667 @@
+{
+ "__inputs": [
+ {
+ "name": "DS_MACBOOK-INFLUX",
+ "label": "macbook-influx",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "influxdb",
+ "pluginName": "InfluxDB"
+ }
+ ],
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "6.7.3"
+ },
+ {
+ "type": "panel",
+ "id": "heatmap",
+ "name": "Heatmap",
+ "version": ""
+ },
+ {
+ "type": "datasource",
+ "id": "influxdb",
+ "name": "InfluxDB",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "jdbranham-diagram-panel",
+ "name": "Diagram",
+ "version": "1.4.5"
+ },
+ {
+ "type": "panel",
+ "id": "text",
+ "name": "Text",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "$$hashKey": "object:1627",
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "Top-level ZFS pool latency by ZIO type",
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 1,
+ "id": null,
+ "iteration": 1590445168391,
+ "links": [],
+ "panels": [
+ {
+ "collapsed": false,
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 5,
+ "panels": [],
+ "title": "Total Reads and Writes",
+ "type": "row"
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "description": "Latency histogram for the total reads of a ZFS pool",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 1
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 2,
+ "legend": {
+ "show": true
+ },
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "alias": "$tag_le",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "le"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "zpool_latency",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "total_read"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "last"
+ },
+ {
+ "params": [
+ "1s"
+ ],
+ "type": "non_negative_derivative"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "host",
+ "operator": "=~",
+ "value": "/^$hostname$/"
+ },
+ {
+ "condition": "AND",
+ "key": "name",
+ "operator": "=~",
+ "value": "/^$poolname$/"
+ }
+ ]
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total Reads",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": 0,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "description": "Latency histogram for the total writes of a ZFS pool",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 1
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 3,
+ "legend": {
+ "show": true
+ },
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "alias": "$tag_le",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "le"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "zpool_latency",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "total_write"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "last"
+ },
+ {
+ "params": [
+ "1s"
+ ],
+ "type": "non_negative_derivative"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "host",
+ "operator": "=~",
+ "value": "/^$hostname$/"
+ },
+ {
+ "condition": "AND",
+ "key": "name",
+ "operator": "=~",
+ "value": "/^$poolname$/"
+ }
+ ]
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total Writes",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": 0,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "collapsed": false,
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 10
+ },
+ "id": 8,
+ "panels": [],
+ "title": "ZIO Scheduler Queues for Read Operations",
+ "type": "row"
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "description": "Latency histogram for the synchronous reads of a ZFS pool",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 5,
+ "x": 0,
+ "y": 11
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 6,
+ "legend": {
+ "show": false
+ },
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "alias": "$tag_le",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "le"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "zpool_latency",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "sync_read"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "last"
+ },
+ {
+ "params": [
+ "1s"
+ ],
+ "type": "non_negative_derivative"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "host",
+ "operator": "=~",
+ "value": "/^$hostname$/"
+ },
+ {
+ "condition": "AND",
+ "key": "name",
+ "operator": "=~",
+ "value": "/^$poolname$/"
+ }
+ ]
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Sync Read Queue",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": 0,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "description": "Latency histogram for the asynchronous reads of a ZFS pool",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 5,
+ "x": 5,
+ "y": 11
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 9,
+ "legend": {
+ "show": false
+ },
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "alias": "$tag_le",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "le"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "zpool_latency",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "async_read"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "last"
+ },
+ {
+ "params": [
+ "1s"
+ ],
+ "type": "non_negative_derivative"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "host",
+ "operator": "=~",
+ "value": "/^$hostname$/"
+ },
+ {
+ "condition": "AND",
+ "key": "name",
+ "operator": "=~",
+ "value": "/^$poolname$/"
+ }
+ ]
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Async Read Queue",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": 0,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "description": "Latency histogram for the scrub or scan reads of a ZFS pool",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 5,
+ "x": 10,
+ "y": 11
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 10,
+ "legend": {
+ "show": false
+ },
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "alias": "$tag_le",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "le"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "zpool_latency",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "scrub"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "last"
+ },
+ {
+ "params": [
+ "1s"
+ ],
+ "type": "non_negative_derivative"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "host",
+ "operator": "=~",
+ "value": "/^$hostname$/"
+ },
+ {
+ "condition": "AND",
+ "key": "name",
+ "operator": "=~",
+ "value": "/^$poolname$/"
+ }
+ ]
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Scrub/Scan Read Queue",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": 0,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "description": "Latency histogram for the actual disk reads of a ZFS pool",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 9,
+ "x": 15,
+ "y": 11
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 11,
+ "legend": {
+ "show": false
+ },
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "alias": "$tag_le",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "le"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "zpool_latency",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "disk_read"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "last"
+ },
+ {
+ "params": [
+ "1s"
+ ],
+ "type": "non_negative_derivative"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "host",
+ "operator": "=~",
+ "value": "/^$hostname$/"
+ },
+ {
+ "condition": "AND",
+ "key": "name",
+ "operator": "=~",
+ "value": "/^$poolname$/"
+ }
+ ]
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk Read Queue",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": 0,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "collapsed": false,
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 19
+ },
+ "id": 13,
+ "panels": [],
+ "title": "ZIO Scheduler Queues for Write Operations",
+ "type": "row"
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "description": "Latency histogram for the synchronous writes of a ZFS pool",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 5,
+ "x": 0,
+ "y": 20
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 14,
+ "legend": {
+ "show": false
+ },
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "alias": "$tag_le",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "le"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "zpool_latency",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "sync_write"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "last"
+ },
+ {
+ "params": [
+ "1s"
+ ],
+ "type": "non_negative_derivative"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "host",
+ "operator": "=~",
+ "value": "/^$hostname$/"
+ },
+ {
+ "condition": "AND",
+ "key": "name",
+ "operator": "=~",
+ "value": "/^$poolname$/"
+ }
+ ]
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Sync Write Queue",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": 0,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "description": "Latency histogram for the asynchronous writes of a ZFS pool",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 5,
+ "x": 5,
+ "y": 20
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 15,
+ "legend": {
+ "show": false
+ },
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "alias": "$tag_le",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "le"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "zpool_latency",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "async_write"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "last"
+ },
+ {
+ "params": [
+ "1s"
+ ],
+ "type": "non_negative_derivative"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "host",
+ "operator": "=~",
+ "value": "/^$hostname$/"
+ },
+ {
+ "condition": "AND",
+ "key": "name",
+ "operator": "=~",
+ "value": "/^$poolname$/"
+ }
+ ]
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Async Write Queue",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": 0,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "description": "Latency histogram for the trim or unmap operations of a ZFS pool",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 5,
+ "x": 10,
+ "y": 20
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 16,
+ "legend": {
+ "show": false
+ },
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "alias": "$tag_le",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "le"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "zpool_latency",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "trim"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "last"
+ },
+ {
+ "params": [
+ "1s"
+ ],
+ "type": "non_negative_derivative"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "host",
+ "operator": "=~",
+ "value": "/^$hostname$/"
+ },
+ {
+ "condition": "AND",
+ "key": "name",
+ "operator": "=~",
+ "value": "/^$poolname$/"
+ }
+ ]
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Trim Write Queue",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": 0,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateOranges",
+ "exponent": 0.5,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "description": "Latency histogram for the disk write operations of a ZFS pool",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 9,
+ "x": 15,
+ "y": 20
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 17,
+ "legend": {
+ "show": false
+ },
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "alias": "$tag_le",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "le"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "zpool_latency",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "disk_write"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "last"
+ },
+ {
+ "params": [
+ "1s"
+ ],
+ "type": "non_negative_derivative"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "host",
+ "operator": "=~",
+ "value": "/^$hostname$/"
+ },
+ {
+ "condition": "AND",
+ "key": "name",
+ "operator": "=~",
+ "value": "/^$poolname$/"
+ }
+ ]
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk Write Queue",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": 0,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
+ "collapsed": false,
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 28
+ },
+ "id": 19,
+ "panels": [],
+ "title": "About",
+ "type": "row"
+ },
+ {
+ "content": "I/O requests that are satisfied by accessing pool devices are managed by the ZIO scheduler.\nThe total latency is measured from the start of the I/O to completion by the disk.\nLatency through each queue is shown prior to its submission to the disk queue.\n\nThis view is useful for observing the effects of tuning the ZIO scheduler min and max values\n(see zfs-module-parameters(5) and [ZFS on Linux Module Parameters](https://openzfs.github.io/openzfs-docs/Performance%20and%20tuning/ZFS%20on%20Linux%20Module%20Parameters.html)):\n+ *zfs_vdev_max_active* controls the ZIO scheduler's disk queue depth (do not confuse with the block device's nr_requests)\n+ *zfs_vdev_sync_read_min_active* and *zfs_vdev_sync_read_max_active* control the synchronous queue for reads: most reads are sync\n+ *zfs_vdev_sync_write_min_active* and *zfs_vdev_sync_write_max_active* control the synchronous queue for writes: \nusually metadata or user data depending on the \"sync\" property setting or I/Os that are requested to be flushed\n+ *zfs_vdev_async_read_min_active* and *zfs_vdev_async_read_max_active* control the asynchronous queue for reads: usually prefetches\n+ *zfs_vdev_async_write_min_active* and *zfs_vdev_async_write_max_active* control the asynchronous queue for writes: \nusually the bulk of all writes at transaction group (txg) commit\n+ *zfs_vdev_scrub_min_active* and *zfs_vdev_scrub_max_active* controls the scan reads: usually scrub or resilver\n\n",
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 15,
+ "w": 16,
+ "x": 0,
+ "y": 29
+ },
+ "id": 21,
+ "mode": "markdown",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "About ZFS Pool All Queues Read/Write Latency Histograms",
+ "type": "text"
+ },
+ {
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "composites": [],
+ "content": "graph LR\nIO((I/O request)) --> SR(sync read queue)\nIO --> SW(sync write queue)\nIO --> AR(async read queue)\nIO --> AW(async write queue)\nIO --> SCRUB(scrub queue)\nIO --> TRIM(trim queue)\nSR --> DISKQ(disk queue)\nSW --> DISKQ\nAR --> DISKQ\nAW --> DISKQ\nSCRUB --> DISKQ\nTRIM --> DISKQ\nDISKQ --> DISK((disk))\n",
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "decimals": 2,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "graphId": "diagram_23",
+ "gridPos": {
+ "h": 15,
+ "w": 7,
+ "x": 16,
+ "y": 29
+ },
+ "id": 23,
+ "init": {
+ "arrowMarkerAbsolute": true,
+ "cloneCssStyles": true,
+ "flowchart": {
+ "htmlLabels": true,
+ "useMaxWidth": true
+ },
+ "gantt": {
+ "barGap": 4,
+ "barHeight": 20,
+ "fontFamily": "\"Open-Sans\", \"sans-serif\"",
+ "fontSize": 11,
+ "gridLineStartPadding": 35,
+ "leftPadding": 75,
+ "numberSectionStyles": 3,
+ "titleTopMargin": 25,
+ "topPadding": 50
+ },
+ "logLevel": 3,
+ "securityLevel": "loose",
+ "sequence": {
+ "actorMargin": 50,
+ "bottomMarginAdj": 1,
+ "boxMargin": 10,
+ "boxTextMargin": 5,
+ "diagramMarginX": 50,
+ "diagramMarginY": 10,
+ "height": 65,
+ "messageMargin": 35,
+ "mirrorActors": true,
+ "noteMargin": 10,
+ "useMaxWidth": true,
+ "width": 150
+ },
+ "startOnLoad": false,
+ "theme": "dark"
+ },
+ "legend": {
+ "avg": true,
+ "current": true,
+ "gradient": {
+ "enabled": true,
+ "show": true
+ },
+ "max": true,
+ "min": true,
+ "show": false,
+ "total": true
+ },
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "$$hashKey": "object:155",
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "$$hashKey": "object:156",
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "maxWidth": false,
+ "mermaidServiceUrl": "",
+ "metricCharacterReplacements": [],
+ "moddedSeriesVal": 0,
+ "mode": "content",
+ "nullPointMode": "connected",
+ "seriesOverrides": [],
+ "style": "",
+ "styleValues": {},
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "hide": true,
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "themes": [
+ "default",
+ "dark",
+ "forest",
+ "neutral"
+ ],
+ "thresholds": "0,10",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Panel Title",
+ "type": "jdbranham-diagram-panel",
+ "valueMaps": [
+ {
+ "$$hashKey": "object:151",
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg",
+ "valueOptions": [
+ "avg",
+ "min",
+ "max",
+ "total",
+ "current"
+ ]
+ }
+ ],
+ "refresh": false,
+ "schemaVersion": 22,
+ "style": "dark",
+ "tags": [
+ "ZFS",
+ "Latency",
+ "Histogram"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": null,
+ "current": {},
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "definition": "show tag values from \"zpool_latency\" with key = \"host\"",
+ "hide": 0,
+ "includeAll": false,
+ "index": -1,
+ "label": null,
+ "multi": false,
+ "name": "hostname",
+ "options": [],
+ "query": "show tag values from \"zpool_latency\" with key = \"host\"",
+ "refresh": 1,
+ "regex": "/([-a-zA-Z-0-9]+)/",
+ "skipUrlSync": false,
+ "sort": 5,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {},
+ "datasource": "${DS_MACBOOK-INFLUX}",
+ "definition": "show tag values from \"zpool_latency\" with key = \"name\" where \"host\" =~ /^$hostname/",
+ "hide": 0,
+ "includeAll": false,
+ "index": -1,
+ "label": null,
+ "multi": false,
+ "name": "poolname",
+ "options": [],
+ "query": "show tag values from \"zpool_latency\" with key = \"name\" where \"host\" =~ /^$hostname/",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 5,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "2020-05-25T21:34:30.137Z",
+ "to": "2020-05-25T21:39:54.445Z"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ]
+ },
+ "timezone": "",
+ "title": "ZFS Pool Latency Heatmaps Influxdb",
+ "uid": "TbB4-DkGz",
+ "variables": {
+ "list": []
+ },
+ "version": 2
+} \ No newline at end of file
diff --git a/sys/contrib/openzfs/cmd/zpool_influxdb/telegraf.d/README.md b/sys/contrib/openzfs/cmd/zpool_influxdb/telegraf.d/README.md
new file mode 100644
index 000000000000..74f411a15d34
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zpool_influxdb/telegraf.d/README.md
@@ -0,0 +1,7 @@
+This directory contains sample telegraf configurations for
+adding `zpool_influxdb` as an input plugin. Depending on your
+telegraf configuration, the installation can be as simple as
+copying one of these to the `/etc/telegraf/telegraf.d` directory
+and restarting `systemctl restart telegraf`
+
+See the telegraf docs for more information on input plugins.
diff --git a/sys/contrib/openzfs/cmd/zpool_influxdb/telegraf.d/exec_zpool_influxdb.conf b/sys/contrib/openzfs/cmd/zpool_influxdb/telegraf.d/exec_zpool_influxdb.conf
new file mode 100644
index 000000000000..a2efa61892ff
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zpool_influxdb/telegraf.d/exec_zpool_influxdb.conf
@@ -0,0 +1,15 @@
+# # Read metrics from zpool_influxdb
+[[inputs.exec]]
+# ## default installation location for zpool_influxdb command
+ commands = ["/usr/local/libexec/zfs/zpool_influxdb"]
+# ## Timeout for each command to complete.
+# timeout = "5s"
+#
+# ## measurement name suffix (for separating different commands)
+# name_suffix = "_mycollector"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+ data_format = "influx"
diff --git a/sys/contrib/openzfs/cmd/zpool_influxdb/telegraf.d/execd_zpool_influxdb.conf b/sys/contrib/openzfs/cmd/zpool_influxdb/telegraf.d/execd_zpool_influxdb.conf
new file mode 100644
index 000000000000..90737b8cb798
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zpool_influxdb/telegraf.d/execd_zpool_influxdb.conf
@@ -0,0 +1,23 @@
+# # Read metrics from zpool_influxdb
+[[inputs.execd]]
+# ## default installation location for zpool_influxdb command
+ command = ["/usr/local/libexec/zfs/zpool_influxdb", "--execd"]
+
+ ## Define how the process is signaled on each collection interval.
+ ## Valid values are:
+ ## "none" : Do not signal anything. (Recommended for service inputs)
+ ## The process must output metrics by itself.
+ ## "STDIN" : Send a newline on STDIN. (Recommended for gather inputs)
+ ## "SIGHUP" : Send a HUP signal. Not available on Windows. (not recommended)
+ ## "SIGUSR1" : Send a USR1 signal. Not available on Windows.
+ ## "SIGUSR2" : Send a USR2 signal. Not available on Windows.
+ signal = "STDIN"
+
+ ## Delay before the process is restarted after an unexpected termination
+ restart_delay = "10s"
+
+ ## Data format to consume.
+ ## Each data format has its own unique set of configuration options, read
+ ## more about them here:
+ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+ data_format = "influx"
diff --git a/sys/contrib/openzfs/cmd/zpool_influxdb/zpool_influxdb.c b/sys/contrib/openzfs/cmd/zpool_influxdb/zpool_influxdb.c
new file mode 100644
index 000000000000..71ffcb25381a
--- /dev/null
+++ b/sys/contrib/openzfs/cmd/zpool_influxdb/zpool_influxdb.c
@@ -0,0 +1,843 @@
+/*
+ * Gather top-level ZFS pool and resilver/scan statistics and print using
+ * influxdb line protocol
+ * usage: [options] [pool_name]
+ * where options are:
+ * --execd, -e run in telegraf execd input plugin mode, [CR] on
+ * stdin causes a sample to be printed and wait for
+ * the next [CR]
+ * --no-histograms, -n don't print histogram data (reduces cardinality
+ * if you don't care about histograms)
+ * --sum-histogram-buckets, -s sum histogram bucket values
+ *
+ * To integrate into telegraf use one of:
+ * 1. the `inputs.execd` plugin with the `--execd` option
+ * 2. the `inputs.exec` plugin to simply run with no options
+ *
+ * NOTE: libzfs is an unstable interface. YMMV.
+ *
+ * The design goals of this software include:
+ * + be as lightweight as possible
+ * + reduce the number of external dependencies as far as possible, hence
+ * there is no dependency on a client library for managing the metric
+ * collection -- info is printed, KISS
+ * + broken pools or kernel bugs can cause this process to hang in an
+ * unkillable state. For this reason, it is best to keep the damage limited
+ * to a small process like zpool_influxdb rather than a larger collector.
+ *
+ * Copyright 2018-2020 Richard Elling
+ *
+ * This software is dual-licensed MIT and CDDL.
+ *
+ * The MIT License (MIT)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License Version 1.0 (CDDL-1.0).
+ * You can obtain a copy of the license from the top-level file
+ * "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
+ * You may not use this file except in compliance with the license.
+ *
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * CDDL HEADER END
+ */
+#include <string.h>
+#include <getopt.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <libzfs_impl.h>
+
+#define POOL_MEASUREMENT "zpool_stats"
+#define SCAN_MEASUREMENT "zpool_scan_stats"
+#define VDEV_MEASUREMENT "zpool_vdev_stats"
+#define POOL_LATENCY_MEASUREMENT "zpool_latency"
+#define POOL_QUEUE_MEASUREMENT "zpool_vdev_queue"
+#define MIN_LAT_INDEX 10 /* minimum latency index 10 = 1024ns */
+#define POOL_IO_SIZE_MEASUREMENT "zpool_io_size"
+#define MIN_SIZE_INDEX 9 /* minimum size index 9 = 512 bytes */
+
+/* global options */
+int execd_mode = 0;
+int no_histograms = 0;
+int sum_histogram_buckets = 0;
+char metric_data_type = 'u';
+uint64_t metric_value_mask = UINT64_MAX;
+uint64_t timestamp = 0;
+int complained_about_sync = 0;
+char *tags = "";
+
+typedef int (*stat_printer_f)(nvlist_t *, const char *, const char *);
+
+/*
+ * influxdb line protocol rules for escaping are important because the
+ * zpool name can include characters that need to be escaped
+ *
+ * caller is responsible for freeing result
+ */
+static char *
+escape_string(char *s)
+{
+ char *c, *d;
+ char *t = (char *)malloc(ZFS_MAX_DATASET_NAME_LEN * 2);
+ if (t == NULL) {
+ fprintf(stderr, "error: cannot allocate memory\n");
+ exit(1);
+ }
+
+ for (c = s, d = t; *c != '\0'; c++, d++) {
+ switch (*c) {
+ case ' ':
+ case ',':
+ case '=':
+ case '\\':
+ *d++ = '\\';
+ default:
+ *d = *c;
+ }
+ }
+ *d = '\0';
+ return (t);
+}
+
+/*
+ * print key=value where value is a uint64_t
+ */
+static void
+print_kv(char *key, uint64_t value)
+{
+ printf("%s=%llu%c", key,
+ (u_longlong_t)value & metric_value_mask, metric_data_type);
+}
+
+/*
+ * print_scan_status() prints the details as often seen in the "zpool status"
+ * output. However, unlike the zpool command, which is intended for humans,
+ * this output is suitable for long-term tracking in influxdb.
+ * TODO: update to include issued scan data
+ */
+static int
+print_scan_status(nvlist_t *nvroot, const char *pool_name)
+{
+ uint_t c;
+ int64_t elapsed;
+ uint64_t examined, pass_exam, paused_time, paused_ts, rate;
+ uint64_t remaining_time;
+ pool_scan_stat_t *ps = NULL;
+ double pct_done;
+ char *state[DSS_NUM_STATES] = {
+ "none", "scanning", "finished", "canceled"};
+ char *func;
+
+ (void) nvlist_lookup_uint64_array(nvroot,
+ ZPOOL_CONFIG_SCAN_STATS,
+ (uint64_t **)&ps, &c);
+
+ /*
+ * ignore if there are no stats
+ */
+ if (ps == NULL)
+ return (0);
+
+ /*
+ * return error if state is bogus
+ */
+ if (ps->pss_state >= DSS_NUM_STATES ||
+ ps->pss_func >= POOL_SCAN_FUNCS) {
+ if (complained_about_sync % 1000 == 0) {
+ fprintf(stderr, "error: cannot decode scan stats: "
+ "ZFS is out of sync with compiled zpool_influxdb");
+ complained_about_sync++;
+ }
+ return (1);
+ }
+
+ switch (ps->pss_func) {
+ case POOL_SCAN_NONE:
+ func = "none_requested";
+ break;
+ case POOL_SCAN_SCRUB:
+ func = "scrub";
+ break;
+ case POOL_SCAN_RESILVER:
+ func = "resilver";
+ break;
+#ifdef POOL_SCAN_REBUILD
+ case POOL_SCAN_REBUILD:
+ func = "rebuild";
+ break;
+#endif
+ default:
+ func = "scan";
+ }
+
+ /* overall progress */
+ examined = ps->pss_examined ? ps->pss_examined : 1;
+ pct_done = 0.0;
+ if (ps->pss_to_examine > 0)
+ pct_done = 100.0 * examined / ps->pss_to_examine;
+
+#ifdef EZFS_SCRUB_PAUSED
+ paused_ts = ps->pss_pass_scrub_pause;
+ paused_time = ps->pss_pass_scrub_spent_paused;
+#else
+ paused_ts = 0;
+ paused_time = 0;
+#endif
+
+ /* calculations for this pass */
+ if (ps->pss_state == DSS_SCANNING) {
+ elapsed = (int64_t)time(NULL) - (int64_t)ps->pss_pass_start -
+ (int64_t)paused_time;
+ elapsed = (elapsed > 0) ? elapsed : 1;
+ pass_exam = ps->pss_pass_exam ? ps->pss_pass_exam : 1;
+ rate = pass_exam / elapsed;
+ rate = (rate > 0) ? rate : 1;
+ remaining_time = ps->pss_to_examine - examined / rate;
+ } else {
+ elapsed =
+ (int64_t)ps->pss_end_time - (int64_t)ps->pss_pass_start -
+ (int64_t)paused_time;
+ elapsed = (elapsed > 0) ? elapsed : 1;
+ pass_exam = ps->pss_pass_exam ? ps->pss_pass_exam : 1;
+ rate = pass_exam / elapsed;
+ remaining_time = 0;
+ }
+ rate = rate ? rate : 1;
+
+ /* influxdb line protocol format: "tags metrics timestamp" */
+ printf("%s%s,function=%s,name=%s,state=%s ",
+ SCAN_MEASUREMENT, tags, func, pool_name, state[ps->pss_state]);
+ print_kv("end_ts", ps->pss_end_time);
+ print_kv(",errors", ps->pss_errors);
+ print_kv(",examined", examined);
+ print_kv(",issued", ps->pss_issued);
+ print_kv(",pass_examined", pass_exam);
+ print_kv(",pass_issued", ps->pss_pass_issued);
+ print_kv(",paused_ts", paused_ts);
+ print_kv(",paused_t", paused_time);
+ printf(",pct_done=%.2f", pct_done);
+ print_kv(",processed", ps->pss_processed);
+ print_kv(",rate", rate);
+ print_kv(",remaining_t", remaining_time);
+ print_kv(",start_ts", ps->pss_start_time);
+ print_kv(",to_examine", ps->pss_to_examine);
+ print_kv(",to_process", ps->pss_to_process);
+ printf(" %llu\n", (u_longlong_t)timestamp);
+ return (0);
+}
+
+/*
+ * get a vdev name that corresponds to the top-level vdev names
+ * printed by `zpool status`
+ */
+static char *
+get_vdev_name(nvlist_t *nvroot, const char *parent_name)
+{
+ static char vdev_name[256];
+ char *vdev_type = NULL;
+ uint64_t vdev_id = 0;
+
+ if (nvlist_lookup_string(nvroot, ZPOOL_CONFIG_TYPE,
+ &vdev_type) != 0) {
+ vdev_type = "unknown";
+ }
+ if (nvlist_lookup_uint64(
+ nvroot, ZPOOL_CONFIG_ID, &vdev_id) != 0) {
+ vdev_id = UINT64_MAX;
+ }
+ if (parent_name == NULL) {
+ (void) snprintf(vdev_name, sizeof (vdev_name), "%s",
+ vdev_type);
+ } else {
+ (void) snprintf(vdev_name, sizeof (vdev_name),
+ "%s/%s-%llu",
+ parent_name, vdev_type, (u_longlong_t)vdev_id);
+ }
+ return (vdev_name);
+}
+
+/*
+ * get a string suitable for an influxdb tag that describes this vdev
+ *
+ * By default only the vdev hierarchical name is shown, separated by '/'
+ * If the vdev has an associated path, which is typical of leaf vdevs,
+ * then the path is added.
+ * It would be nice to have the devid instead of the path, but under
+ * Linux we cannot be sure a devid will exist and we'd rather have
+ * something than nothing, so we'll use path instead.
+ */
+static char *
+get_vdev_desc(nvlist_t *nvroot, const char *parent_name)
+{
+ static char vdev_desc[2 * MAXPATHLEN];
+ char *vdev_type = NULL;
+ uint64_t vdev_id = 0;
+ char vdev_value[MAXPATHLEN];
+ char *vdev_path = NULL;
+ char *s, *t;
+
+ if (nvlist_lookup_string(nvroot, ZPOOL_CONFIG_TYPE, &vdev_type) != 0) {
+ vdev_type = "unknown";
+ }
+ if (nvlist_lookup_uint64(nvroot, ZPOOL_CONFIG_ID, &vdev_id) != 0) {
+ vdev_id = UINT64_MAX;
+ }
+ if (nvlist_lookup_string(
+ nvroot, ZPOOL_CONFIG_PATH, &vdev_path) != 0) {
+ vdev_path = NULL;
+ }
+
+ if (parent_name == NULL) {
+ s = escape_string(vdev_type);
+ (void) snprintf(vdev_value, sizeof (vdev_value), "vdev=%s", s);
+ free(s);
+ } else {
+ s = escape_string((char *)parent_name);
+ t = escape_string(vdev_type);
+ (void) snprintf(vdev_value, sizeof (vdev_value),
+ "vdev=%s/%s-%llu", s, t, (u_longlong_t)vdev_id);
+ free(s);
+ free(t);
+ }
+ if (vdev_path == NULL) {
+ (void) snprintf(vdev_desc, sizeof (vdev_desc), "%s",
+ vdev_value);
+ } else {
+ s = escape_string(vdev_path);
+ (void) snprintf(vdev_desc, sizeof (vdev_desc), "path=%s,%s",
+ s, vdev_value);
+ free(s);
+ }
+ return (vdev_desc);
+}
+
+/*
+ * vdev summary stats are a combination of the data shown by
+ * `zpool status` and `zpool list -v`
+ */
+static int
+print_summary_stats(nvlist_t *nvroot, const char *pool_name,
+ const char *parent_name)
+{
+ uint_t c;
+ vdev_stat_t *vs;
+ char *vdev_desc = NULL;
+ vdev_desc = get_vdev_desc(nvroot, parent_name);
+ if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
+ (uint64_t **)&vs, &c) != 0) {
+ return (1);
+ }
+ printf("%s%s,name=%s,state=%s,%s ", POOL_MEASUREMENT, tags,
+ pool_name, zpool_state_to_name((vdev_state_t)vs->vs_state,
+ (vdev_aux_t)vs->vs_aux), vdev_desc);
+ print_kv("alloc", vs->vs_alloc);
+ print_kv(",free", vs->vs_space - vs->vs_alloc);
+ print_kv(",size", vs->vs_space);
+ print_kv(",read_bytes", vs->vs_bytes[ZIO_TYPE_READ]);
+ print_kv(",read_errors", vs->vs_read_errors);
+ print_kv(",read_ops", vs->vs_ops[ZIO_TYPE_READ]);
+ print_kv(",write_bytes", vs->vs_bytes[ZIO_TYPE_WRITE]);
+ print_kv(",write_errors", vs->vs_write_errors);
+ print_kv(",write_ops", vs->vs_ops[ZIO_TYPE_WRITE]);
+ print_kv(",checksum_errors", vs->vs_checksum_errors);
+ print_kv(",fragmentation", vs->vs_fragmentation);
+ printf(" %llu\n", (u_longlong_t)timestamp);
+ return (0);
+}
+
+/*
+ * vdev latency stats are histograms stored as nvlist arrays of uint64.
+ * Latency stats include the ZIO scheduler classes plus lower-level
+ * vdev latencies.
+ *
+ * In many cases, the top-level "root" view obscures the underlying
+ * top-level vdev operations. For example, if a pool has a log, special,
+ * or cache device, then each can behave very differently. It is useful
+ * to see how each is responding.
+ */
+static int
+print_vdev_latency_stats(nvlist_t *nvroot, const char *pool_name,
+ const char *parent_name)
+{
+ uint_t c, end = 0;
+ nvlist_t *nv_ex;
+ char *vdev_desc = NULL;
+
+ /* short_names become part of the metric name and are influxdb-ready */
+ struct lat_lookup {
+ char *name;
+ char *short_name;
+ uint64_t sum;
+ uint64_t *array;
+ };
+ struct lat_lookup lat_type[] = {
+ {ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, "total_read", 0},
+ {ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, "total_write", 0},
+ {ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, "disk_read", 0},
+ {ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, "disk_write", 0},
+ {ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO, "sync_read", 0},
+ {ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO, "sync_write", 0},
+ {ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO, "async_read", 0},
+ {ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO, "async_write", 0},
+ {ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO, "scrub", 0},
+#ifdef ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO
+ {ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, "trim", 0},
+#endif
+ {NULL, NULL}
+ };
+
+ if (nvlist_lookup_nvlist(nvroot,
+ ZPOOL_CONFIG_VDEV_STATS_EX, &nv_ex) != 0) {
+ return (6);
+ }
+
+ vdev_desc = get_vdev_desc(nvroot, parent_name);
+
+ for (int i = 0; lat_type[i].name; i++) {
+ if (nvlist_lookup_uint64_array(nv_ex,
+ lat_type[i].name, &lat_type[i].array, &c) != 0) {
+ fprintf(stderr, "error: can't get %s\n",
+ lat_type[i].name);
+ return (3);
+ }
+ /* end count count, all of the arrays are the same size */
+ end = c - 1;
+ }
+
+ for (int bucket = 0; bucket <= end; bucket++) {
+ if (bucket < MIN_LAT_INDEX) {
+ /* don't print, but collect the sum */
+ for (int i = 0; lat_type[i].name; i++) {
+ lat_type[i].sum += lat_type[i].array[bucket];
+ }
+ continue;
+ }
+ if (bucket < end) {
+ printf("%s%s,le=%0.6f,name=%s,%s ",
+ POOL_LATENCY_MEASUREMENT, tags,
+ (float)(1ULL << bucket) * 1e-9,
+ pool_name, vdev_desc);
+ } else {
+ printf("%s%s,le=+Inf,name=%s,%s ",
+ POOL_LATENCY_MEASUREMENT, tags, pool_name,
+ vdev_desc);
+ }
+ for (int i = 0; lat_type[i].name; i++) {
+ if (bucket <= MIN_LAT_INDEX || sum_histogram_buckets) {
+ lat_type[i].sum += lat_type[i].array[bucket];
+ } else {
+ lat_type[i].sum = lat_type[i].array[bucket];
+ }
+ print_kv(lat_type[i].short_name, lat_type[i].sum);
+ if (lat_type[i + 1].name != NULL) {
+ printf(",");
+ }
+ }
+ printf(" %llu\n", (u_longlong_t)timestamp);
+ }
+ return (0);
+}
+
+/*
+ * vdev request size stats are histograms stored as nvlist arrays of uint64.
+ * Request size stats include the ZIO scheduler classes plus lower-level
+ * vdev sizes. Both independent (ind) and aggregated (agg) sizes are reported.
+ *
+ * In many cases, the top-level "root" view obscures the underlying
+ * top-level vdev operations. For example, if a pool has a log, special,
+ * or cache device, then each can behave very differently. It is useful
+ * to see how each is responding.
+ */
+static int
+print_vdev_size_stats(nvlist_t *nvroot, const char *pool_name,
+ const char *parent_name)
+{
+ uint_t c, end = 0;
+ nvlist_t *nv_ex;
+ char *vdev_desc = NULL;
+
+ /* short_names become the field name */
+ struct size_lookup {
+ char *name;
+ char *short_name;
+ uint64_t sum;
+ uint64_t *array;
+ };
+ struct size_lookup size_type[] = {
+ {ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO, "sync_read_ind"},
+ {ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO, "sync_write_ind"},
+ {ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO, "async_read_ind"},
+ {ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO, "async_write_ind"},
+ {ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO, "scrub_read_ind"},
+ {ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO, "sync_read_agg"},
+ {ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO, "sync_write_agg"},
+ {ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO, "async_read_agg"},
+ {ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO, "async_write_agg"},
+ {ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO, "scrub_read_agg"},
+#ifdef ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO
+ {ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO, "trim_write_ind"},
+ {ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO, "trim_write_agg"},
+#endif
+ {NULL, NULL}
+ };
+
+ if (nvlist_lookup_nvlist(nvroot,
+ ZPOOL_CONFIG_VDEV_STATS_EX, &nv_ex) != 0) {
+ return (6);
+ }
+
+ vdev_desc = get_vdev_desc(nvroot, parent_name);
+
+ for (int i = 0; size_type[i].name; i++) {
+ if (nvlist_lookup_uint64_array(nv_ex, size_type[i].name,
+ &size_type[i].array, &c) != 0) {
+ fprintf(stderr, "error: can't get %s\n",
+ size_type[i].name);
+ return (3);
+ }
+ /* end count count, all of the arrays are the same size */
+ end = c - 1;
+ }
+
+ for (int bucket = 0; bucket <= end; bucket++) {
+ if (bucket < MIN_SIZE_INDEX) {
+ /* don't print, but collect the sum */
+ for (int i = 0; size_type[i].name; i++) {
+ size_type[i].sum += size_type[i].array[bucket];
+ }
+ continue;
+ }
+
+ if (bucket < end) {
+ printf("%s%s,le=%llu,name=%s,%s ",
+ POOL_IO_SIZE_MEASUREMENT, tags, 1ULL << bucket,
+ pool_name, vdev_desc);
+ } else {
+ printf("%s%s,le=+Inf,name=%s,%s ",
+ POOL_IO_SIZE_MEASUREMENT, tags, pool_name,
+ vdev_desc);
+ }
+ for (int i = 0; size_type[i].name; i++) {
+ if (bucket <= MIN_SIZE_INDEX || sum_histogram_buckets) {
+ size_type[i].sum += size_type[i].array[bucket];
+ } else {
+ size_type[i].sum = size_type[i].array[bucket];
+ }
+ print_kv(size_type[i].short_name, size_type[i].sum);
+ if (size_type[i + 1].name != NULL) {
+ printf(",");
+ }
+ }
+ printf(" %llu\n", (u_longlong_t)timestamp);
+ }
+ return (0);
+}
+
+/*
+ * ZIO scheduler queue stats are stored as gauges. This is unfortunate
+ * because the values can change very rapidly and any point-in-time
+ * value will quickly be obsoleted. It is also not easy to downsample.
+ * Thus only the top-level queue stats might be beneficial... maybe.
+ */
+static int
+print_queue_stats(nvlist_t *nvroot, const char *pool_name,
+ const char *parent_name)
+{
+ nvlist_t *nv_ex;
+ uint64_t value;
+
+ /* short_names are used for the field name */
+ struct queue_lookup {
+ char *name;
+ char *short_name;
+ };
+ struct queue_lookup queue_type[] = {
+ {ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, "sync_r_active"},
+ {ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, "sync_w_active"},
+ {ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, "async_r_active"},
+ {ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, "async_w_active"},
+ {ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, "async_scrub_active"},
+ {ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE, "sync_r_pend"},
+ {ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE, "sync_w_pend"},
+ {ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE, "async_r_pend"},
+ {ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE, "async_w_pend"},
+ {ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE, "async_scrub_pend"},
+ {NULL, NULL}
+ };
+
+ if (nvlist_lookup_nvlist(nvroot,
+ ZPOOL_CONFIG_VDEV_STATS_EX, &nv_ex) != 0) {
+ return (6);
+ }
+
+ printf("%s%s,name=%s,%s ", POOL_QUEUE_MEASUREMENT, tags, pool_name,
+ get_vdev_desc(nvroot, parent_name));
+ for (int i = 0; queue_type[i].name; i++) {
+ if (nvlist_lookup_uint64(nv_ex,
+ queue_type[i].name, &value) != 0) {
+ fprintf(stderr, "error: can't get %s\n",
+ queue_type[i].name);
+ return (3);
+ }
+ print_kv(queue_type[i].short_name, value);
+ if (queue_type[i + 1].name != NULL) {
+ printf(",");
+ }
+ }
+ printf(" %llu\n", (u_longlong_t)timestamp);
+ return (0);
+}
+
+/*
+ * top-level vdev stats are at the pool level
+ */
+static int
+print_top_level_vdev_stats(nvlist_t *nvroot, const char *pool_name)
+{
+ nvlist_t *nv_ex;
+ uint64_t value;
+
+ /* short_names become part of the metric name */
+ struct queue_lookup {
+ char *name;
+ char *short_name;
+ };
+ struct queue_lookup queue_type[] = {
+ {ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, "sync_r_active_queue"},
+ {ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, "sync_w_active_queue"},
+ {ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, "async_r_active_queue"},
+ {ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, "async_w_active_queue"},
+ {ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, "async_scrub_active_queue"},
+ {ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE, "sync_r_pend_queue"},
+ {ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE, "sync_w_pend_queue"},
+ {ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE, "async_r_pend_queue"},
+ {ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE, "async_w_pend_queue"},
+ {ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE, "async_scrub_pend_queue"},
+ {NULL, NULL}
+ };
+
+ if (nvlist_lookup_nvlist(nvroot,
+ ZPOOL_CONFIG_VDEV_STATS_EX, &nv_ex) != 0) {
+ return (6);
+ }
+
+ printf("%s%s,name=%s,vdev=root ", VDEV_MEASUREMENT, tags,
+ pool_name);
+ for (int i = 0; queue_type[i].name; i++) {
+ if (nvlist_lookup_uint64(nv_ex,
+ queue_type[i].name, &value) != 0) {
+ fprintf(stderr, "error: can't get %s\n",
+ queue_type[i].name);
+ return (3);
+ }
+ if (i > 0)
+ printf(",");
+ print_kv(queue_type[i].short_name, value);
+ }
+
+ printf(" %llu\n", (u_longlong_t)timestamp);
+ return (0);
+}
+
+/*
+ * recursive stats printer
+ */
+static int
+print_recursive_stats(stat_printer_f func, nvlist_t *nvroot,
+ const char *pool_name, const char *parent_name, int descend)
+{
+ uint_t c, children;
+ nvlist_t **child;
+ char vdev_name[256];
+ int err;
+
+ err = func(nvroot, pool_name, parent_name);
+ if (err)
+ return (err);
+
+ if (descend && nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) == 0) {
+ (void) strncpy(vdev_name, get_vdev_name(nvroot, parent_name),
+ sizeof (vdev_name));
+ vdev_name[sizeof (vdev_name) - 1] = '\0';
+
+ for (c = 0; c < children; c++) {
+ print_recursive_stats(func, child[c], pool_name,
+ vdev_name, descend);
+ }
+ }
+ return (0);
+}
+
+/*
+ * call-back to print the stats from the pool config
+ *
+ * Note: if the pool is broken, this can hang indefinitely and perhaps in an
+ * unkillable state.
+ */
+static int
+print_stats(zpool_handle_t *zhp, void *data)
+{
+ uint_t c;
+ int err;
+ boolean_t missing;
+ nvlist_t *config, *nvroot;
+ vdev_stat_t *vs;
+ struct timespec tv;
+ char *pool_name;
+
+ /* if not this pool return quickly */
+ if (data &&
+ strncmp(data, zhp->zpool_name, ZFS_MAX_DATASET_NAME_LEN) != 0) {
+ zpool_close(zhp);
+ return (0);
+ }
+
+ if (zpool_refresh_stats(zhp, &missing) != 0) {
+ zpool_close(zhp);
+ return (1);
+ }
+
+ config = zpool_get_config(zhp, NULL);
+ if (clock_gettime(CLOCK_REALTIME, &tv) != 0)
+ timestamp = (uint64_t)time(NULL) * 1000000000;
+ else
+ timestamp =
+ ((uint64_t)tv.tv_sec * 1000000000) + (uint64_t)tv.tv_nsec;
+
+ if (nvlist_lookup_nvlist(
+ config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) != 0) {
+ zpool_close(zhp);
+ return (2);
+ }
+ if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
+ (uint64_t **)&vs, &c) != 0) {
+ zpool_close(zhp);
+ return (3);
+ }
+
+ pool_name = escape_string(zhp->zpool_name);
+ err = print_recursive_stats(print_summary_stats, nvroot,
+ pool_name, NULL, 1);
+ /* if any of these return an error, skip the rest */
+ if (err == 0)
+ err = print_top_level_vdev_stats(nvroot, pool_name);
+
+ if (no_histograms == 0) {
+ if (err == 0)
+ err = print_recursive_stats(print_vdev_latency_stats, nvroot,
+ pool_name, NULL, 1);
+ if (err == 0)
+ err = print_recursive_stats(print_vdev_size_stats, nvroot,
+ pool_name, NULL, 1);
+ if (err == 0)
+ err = print_recursive_stats(print_queue_stats, nvroot,
+ pool_name, NULL, 0);
+ }
+ if (err == 0)
+ err = print_scan_status(nvroot, pool_name);
+
+ free(pool_name);
+ zpool_close(zhp);
+ return (err);
+}
+
+static void
+usage(char *name)
+{
+ fprintf(stderr, "usage: %s [--execd][--no-histograms]"
+ "[--sum-histogram-buckets] [--signed-int] [poolname]\n", name);
+ exit(EXIT_FAILURE);
+}
+
+int
+main(int argc, char *argv[])
+{
+ int opt;
+ int ret = 8;
+ char *line = NULL;
+ size_t len, tagslen = 0;
+ struct option long_options[] = {
+ {"execd", no_argument, NULL, 'e'},
+ {"help", no_argument, NULL, 'h'},
+ {"no-histograms", no_argument, NULL, 'n'},
+ {"signed-int", no_argument, NULL, 'i'},
+ {"sum-histogram-buckets", no_argument, NULL, 's'},
+ {"tags", required_argument, NULL, 't'},
+ {0, 0, 0, 0}
+ };
+ while ((opt = getopt_long(
+ argc, argv, "ehinst:", long_options, NULL)) != -1) {
+ switch (opt) {
+ case 'e':
+ execd_mode = 1;
+ break;
+ case 'i':
+ metric_data_type = 'i';
+ metric_value_mask = INT64_MAX;
+ break;
+ case 'n':
+ no_histograms = 1;
+ break;
+ case 's':
+ sum_histogram_buckets = 1;
+ break;
+ case 't':
+ tagslen = strlen(optarg) + 2;
+ tags = calloc(tagslen, 1);
+ if (tags == NULL) {
+ fprintf(stderr,
+ "error: cannot allocate memory "
+ "for tags\n");
+ exit(1);
+ }
+ (void) snprintf(tags, tagslen, ",%s", optarg);
+ break;
+ default:
+ usage(argv[0]);
+ }
+ }
+
+ libzfs_handle_t *g_zfs;
+ if ((g_zfs = libzfs_init()) == NULL) {
+ fprintf(stderr,
+ "error: cannot initialize libzfs. "
+ "Is the zfs module loaded or zrepl running?\n");
+ exit(EXIT_FAILURE);
+ }
+ if (execd_mode == 0) {
+ ret = zpool_iter(g_zfs, print_stats, argv[optind]);
+ return (ret);
+ }
+ while (getline(&line, &len, stdin) != -1) {
+ ret = zpool_iter(g_zfs, print_stats, argv[optind]);
+ fflush(stdout);
+ }
+ return (ret);
+}
diff --git a/sys/contrib/openzfs/cmd/zstream/zstream_redup.c b/sys/contrib/openzfs/cmd/zstream/zstream_redup.c
index 379025ce59e5..41f1068e3dfc 100644
--- a/sys/contrib/openzfs/cmd/zstream/zstream_redup.c
+++ b/sys/contrib/openzfs/cmd/zstream/zstream_redup.c
@@ -421,7 +421,7 @@ int
zstream_do_redup(int argc, char *argv[])
{
boolean_t verbose = B_FALSE;
- char c;
+ int c;
while ((c = getopt(argc, argv, "v")) != -1) {
switch (c) {
diff --git a/sys/contrib/openzfs/cmd/ztest/ztest.c b/sys/contrib/openzfs/cmd/ztest/ztest.c
index 31205a5bf8cf..f66772fa7285 100644
--- a/sys/contrib/openzfs/cmd/ztest/ztest.c
+++ b/sys/contrib/openzfs/cmd/ztest/ztest.c
@@ -104,6 +104,7 @@
#include <sys/zio.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
+#include <sys/vdev_draid.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_file.h>
#include <sys/vdev_initialize.h>
@@ -167,8 +168,11 @@ typedef struct ztest_shared_opts {
size_t zo_vdev_size;
int zo_ashift;
int zo_mirrors;
- int zo_raidz;
- int zo_raidz_parity;
+ int zo_raid_children;
+ int zo_raid_parity;
+ char zo_raid_type[8];
+ int zo_draid_data;
+ int zo_draid_spares;
int zo_datasets;
int zo_threads;
uint64_t zo_passtime;
@@ -191,9 +195,12 @@ static const ztest_shared_opts_t ztest_opts_defaults = {
.zo_vdevs = 5,
.zo_ashift = SPA_MINBLOCKSHIFT,
.zo_mirrors = 2,
- .zo_raidz = 4,
- .zo_raidz_parity = 1,
+ .zo_raid_children = 4,
+ .zo_raid_parity = 1,
+ .zo_raid_type = VDEV_TYPE_RAIDZ,
.zo_vdev_size = SPA_MINDEVSIZE * 4, /* 256m default size */
+ .zo_draid_data = 4, /* data drives */
+ .zo_draid_spares = 1, /* distributed spares */
.zo_datasets = 7,
.zo_threads = 23,
.zo_passtime = 60, /* 60 seconds */
@@ -232,7 +239,7 @@ static ztest_shared_ds_t *ztest_shared_ds;
#define BT_MAGIC 0x123456789abcdefULL
#define MAXFAULTS(zs) \
- (MAX((zs)->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1)
+ (MAX((zs)->zs_mirrors, 1) * (ztest_opts.zo_raid_parity + 1) - 1)
enum ztest_io_type {
ZTEST_IO_WRITE_TAG,
@@ -689,8 +696,11 @@ usage(boolean_t requested)
"\t[-s size_of_each_vdev (default: %s)]\n"
"\t[-a alignment_shift (default: %d)] use 0 for random\n"
"\t[-m mirror_copies (default: %d)]\n"
- "\t[-r raidz_disks (default: %d)]\n"
- "\t[-R raidz_parity (default: %d)]\n"
+ "\t[-r raidz_disks / draid_disks (default: %d)]\n"
+ "\t[-R raid_parity (default: %d)]\n"
+ "\t[-K raid_kind (default: random)] raidz|draid|random\n"
+ "\t[-D draid_data (default: %d)] in config\n"
+ "\t[-S draid_spares (default: %d)]\n"
"\t[-d datasets (default: %d)]\n"
"\t[-t threads (default: %d)]\n"
"\t[-g gang_block_threshold (default: %s)]\n"
@@ -716,8 +726,10 @@ usage(boolean_t requested)
nice_vdev_size, /* -s */
zo->zo_ashift, /* -a */
zo->zo_mirrors, /* -m */
- zo->zo_raidz, /* -r */
- zo->zo_raidz_parity, /* -R */
+ zo->zo_raid_children, /* -r */
+ zo->zo_raid_parity, /* -R */
+ zo->zo_draid_data, /* -D */
+ zo->zo_draid_spares, /* -S */
zo->zo_datasets, /* -d */
zo->zo_threads, /* -t */
nice_force_ganging, /* -g */
@@ -731,6 +743,21 @@ usage(boolean_t requested)
exit(requested ? 0 : 1);
}
+static uint64_t
+ztest_random(uint64_t range)
+{
+ uint64_t r;
+
+ ASSERT3S(ztest_fd_rand, >=, 0);
+
+ if (range == 0)
+ return (0);
+
+ if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r))
+ fatal(1, "short read from /dev/urandom");
+
+ return (r % range);
+}
static void
ztest_parse_name_value(const char *input, ztest_shared_opts_t *zo)
@@ -780,11 +807,12 @@ process_options(int argc, char **argv)
int opt;
uint64_t value;
char altdir[MAXNAMELEN] = { 0 };
+ char raid_kind[8] = { "random" };
bcopy(&ztest_opts_defaults, zo, sizeof (*zo));
while ((opt = getopt(argc, argv,
- "v:s:a:m:r:R:d:t:g:i:k:p:f:MVET:P:hF:B:C:o:G")) != EOF) {
+ "v:s:a:m:r:R:K:D:S:d:t:g:i:k:p:f:MVET:P:hF:B:C:o:G")) != EOF) {
value = 0;
switch (opt) {
case 'v':
@@ -793,6 +821,8 @@ process_options(int argc, char **argv)
case 'm':
case 'r':
case 'R':
+ case 'D':
+ case 'S':
case 'd':
case 't':
case 'g':
@@ -817,10 +847,19 @@ process_options(int argc, char **argv)
zo->zo_mirrors = value;
break;
case 'r':
- zo->zo_raidz = MAX(1, value);
+ zo->zo_raid_children = MAX(1, value);
break;
case 'R':
- zo->zo_raidz_parity = MIN(MAX(value, 1), 3);
+ zo->zo_raid_parity = MIN(MAX(value, 1), 3);
+ break;
+ case 'K':
+ (void) strlcpy(raid_kind, optarg, sizeof (raid_kind));
+ break;
+ case 'D':
+ zo->zo_draid_data = MAX(1, value);
+ break;
+ case 'S':
+ zo->zo_draid_spares = MAX(1, value);
break;
case 'd':
zo->zo_datasets = MAX(1, value);
@@ -895,7 +934,54 @@ process_options(int argc, char **argv)
}
}
- zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1);
+ /* When raid choice is 'random' add a draid pool 50% of the time */
+ if (strcmp(raid_kind, "random") == 0) {
+ (void) strlcpy(raid_kind, (ztest_random(2) == 0) ?
+ "draid" : "raidz", sizeof (raid_kind));
+
+ if (ztest_opts.zo_verbose >= 3)
+ (void) printf("choosing RAID type '%s'\n", raid_kind);
+ }
+
+ if (strcmp(raid_kind, "draid") == 0) {
+ uint64_t min_devsize;
+
+ /* With fewer disk use 256M, otherwise 128M is OK */
+ min_devsize = (ztest_opts.zo_raid_children < 16) ?
+ (256ULL << 20) : (128ULL << 20);
+
+ /* No top-level mirrors with dRAID for now */
+ zo->zo_mirrors = 0;
+
+ /* Use more appropriate defaults for dRAID */
+ if (zo->zo_vdevs == ztest_opts_defaults.zo_vdevs)
+ zo->zo_vdevs = 1;
+ if (zo->zo_raid_children ==
+ ztest_opts_defaults.zo_raid_children)
+ zo->zo_raid_children = 16;
+ if (zo->zo_ashift < 12)
+ zo->zo_ashift = 12;
+ if (zo->zo_vdev_size < min_devsize)
+ zo->zo_vdev_size = min_devsize;
+
+ if (zo->zo_draid_data + zo->zo_raid_parity >
+ zo->zo_raid_children - zo->zo_draid_spares) {
+ (void) fprintf(stderr, "error: too few draid "
+ "children (%d) for stripe width (%d)\n",
+ zo->zo_raid_children,
+ zo->zo_draid_data + zo->zo_raid_parity);
+ usage(B_FALSE);
+ }
+
+ (void) strlcpy(zo->zo_raid_type, VDEV_TYPE_DRAID,
+ sizeof (zo->zo_raid_type));
+
+ } else /* using raidz */ {
+ ASSERT0(strcmp(raid_kind, "raidz"));
+
+ zo->zo_raid_parity = MIN(zo->zo_raid_parity,
+ zo->zo_raid_children - 1);
+ }
zo->zo_vdevtime =
(zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs :
@@ -966,22 +1052,6 @@ ztest_kill(ztest_shared_t *zs)
(void) kill(getpid(), SIGKILL);
}
-static uint64_t
-ztest_random(uint64_t range)
-{
- uint64_t r;
-
- ASSERT3S(ztest_fd_rand, >=, 0);
-
- if (range == 0)
- return (0);
-
- if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r))
- fatal(1, "short read from /dev/urandom");
-
- return (r % range);
-}
-
/* ARGSUSED */
static void
ztest_record_enospc(const char *s)
@@ -997,12 +1067,27 @@ ztest_get_ashift(void)
return (ztest_opts.zo_ashift);
}
+static boolean_t
+ztest_is_draid_spare(const char *name)
+{
+ uint64_t spare_id = 0, parity = 0, vdev_id = 0;
+
+ if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",
+ (u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,
+ (u_longlong_t *)&spare_id) == 3) {
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
static nvlist_t *
make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift)
{
char *pathbuf;
uint64_t vdev;
nvlist_t *file;
+ boolean_t draid_spare = B_FALSE;
pathbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
@@ -1024,9 +1109,11 @@ make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift)
ztest_dev_template, ztest_opts.zo_dir,
pool == NULL ? ztest_opts.zo_pool : pool, vdev);
}
+ } else {
+ draid_spare = ztest_is_draid_spare(path);
}
- if (size != 0) {
+ if (size != 0 && !draid_spare) {
int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666);
if (fd == -1)
fatal(1, "can't open %s", path);
@@ -1035,20 +1122,21 @@ make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift)
(void) close(fd);
}
- VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0);
- VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
- VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0);
- VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
+ VERIFY0(nvlist_alloc(&file, NV_UNIQUE_NAME, 0));
+ VERIFY0(nvlist_add_string(file, ZPOOL_CONFIG_TYPE,
+ draid_spare ? VDEV_TYPE_DRAID_SPARE : VDEV_TYPE_FILE));
+ VERIFY0(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path));
+ VERIFY0(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift));
umem_free(pathbuf, MAXPATHLEN);
return (file);
}
static nvlist_t *
-make_vdev_raidz(char *path, char *aux, char *pool, size_t size,
+make_vdev_raid(char *path, char *aux, char *pool, size_t size,
uint64_t ashift, int r)
{
- nvlist_t *raidz, **child;
+ nvlist_t *raid, **child;
int c;
if (r < 2)
@@ -1058,20 +1146,41 @@ make_vdev_raidz(char *path, char *aux, char *pool, size_t size,
for (c = 0; c < r; c++)
child[c] = make_vdev_file(path, aux, pool, size, ashift);
- VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0);
- VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE,
- VDEV_TYPE_RAIDZ) == 0);
- VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY,
- ztest_opts.zo_raidz_parity) == 0);
- VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN,
- child, r) == 0);
+ VERIFY0(nvlist_alloc(&raid, NV_UNIQUE_NAME, 0));
+ VERIFY0(nvlist_add_string(raid, ZPOOL_CONFIG_TYPE,
+ ztest_opts.zo_raid_type));
+ VERIFY0(nvlist_add_uint64(raid, ZPOOL_CONFIG_NPARITY,
+ ztest_opts.zo_raid_parity));
+ VERIFY0(nvlist_add_nvlist_array(raid, ZPOOL_CONFIG_CHILDREN,
+ child, r));
+
+ if (strcmp(ztest_opts.zo_raid_type, VDEV_TYPE_DRAID) == 0) {
+ uint64_t ndata = ztest_opts.zo_draid_data;
+ uint64_t nparity = ztest_opts.zo_raid_parity;
+ uint64_t nspares = ztest_opts.zo_draid_spares;
+ uint64_t children = ztest_opts.zo_raid_children;
+ uint64_t ngroups = 1;
+
+ /*
+ * Calculate the minimum number of groups required to fill a
+ * slice. This is the LCM of the stripe width (data + parity)
+ * and the number of data drives (children - spares).
+ */
+ while (ngroups * (ndata + nparity) % (children - nspares) != 0)
+ ngroups++;
+
+ /* Store the basic dRAID configuration. */
+ fnvlist_add_uint64(raid, ZPOOL_CONFIG_DRAID_NDATA, ndata);
+ fnvlist_add_uint64(raid, ZPOOL_CONFIG_DRAID_NSPARES, nspares);
+ fnvlist_add_uint64(raid, ZPOOL_CONFIG_DRAID_NGROUPS, ngroups);
+ }
for (c = 0; c < r; c++)
nvlist_free(child[c]);
umem_free(child, r * sizeof (nvlist_t *));
- return (raidz);
+ return (raid);
}
static nvlist_t *
@@ -1082,12 +1191,12 @@ make_vdev_mirror(char *path, char *aux, char *pool, size_t size,
int c;
if (m < 1)
- return (make_vdev_raidz(path, aux, pool, size, ashift, r));
+ return (make_vdev_raid(path, aux, pool, size, ashift, r));
child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
for (c = 0; c < m; c++)
- child[c] = make_vdev_raidz(path, aux, pool, size, ashift, r);
+ child[c] = make_vdev_raid(path, aux, pool, size, ashift, r);
VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0);
VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE,
@@ -1332,7 +1441,11 @@ ztest_dmu_objset_own(const char *name, dmu_objset_type_t type,
VERIFY0(dsl_crypto_params_create_nvlist(DCP_CMD_NONE, NULL,
crypto_args, &dcp));
err = spa_keystore_load_wkey(ddname, dcp, B_FALSE);
- dsl_crypto_params_free(dcp, B_FALSE);
+ /*
+ * Note: if there was an error loading, the wkey was not
+ * consumed, and needs to be freed.
+ */
+ dsl_crypto_params_free(dcp, (err != 0));
fnvlist_free(crypto_args);
if (err == EINVAL) {
@@ -2809,6 +2922,10 @@ ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id)
if (ztest_opts.zo_mmp_test)
return;
+ /* dRAID added after feature flags, skip upgrade test. */
+ if (strcmp(ztest_opts.zo_raid_type, VDEV_TYPE_DRAID) == 0)
+ return;
+
mutex_enter(&ztest_vdev_lock);
name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool);
@@ -2818,13 +2935,13 @@ ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id)
(void) spa_destroy(name);
nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0,
- NULL, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1);
+ NULL, ztest_opts.zo_raid_children, ztest_opts.zo_mirrors, 1);
/*
* If we're configuring a RAIDZ device then make sure that the
* initial version is capable of supporting that feature.
*/
- switch (ztest_opts.zo_raidz_parity) {
+ switch (ztest_opts.zo_raid_parity) {
case 0:
case 1:
initial_version = SPA_VERSION_INITIAL;
@@ -2970,7 +3087,8 @@ ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
return;
mutex_enter(&ztest_vdev_lock);
- leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
+ leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) *
+ ztest_opts.zo_raid_children;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
@@ -2985,7 +3103,7 @@ ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
/*
* find the first real slog in log allocation class
*/
- mg = spa_log_class(spa)->mc_rotor;
+ mg = spa_log_class(spa)->mc_allocator[0].mca_rotor;
while (!mg->mg_vd->vdev_islog)
mg = mg->mg_next;
@@ -3024,7 +3142,8 @@ ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
*/
nvroot = make_vdev_root(NULL, NULL, NULL,
ztest_opts.zo_vdev_size, 0, (ztest_random(4) == 0) ?
- "log" : NULL, ztest_opts.zo_raidz, zs->zs_mirrors, 1);
+ "log" : NULL, ztest_opts.zo_raid_children, zs->zs_mirrors,
+ 1);
error = spa_vdev_add(spa, nvroot);
nvlist_free(nvroot);
@@ -3078,14 +3197,15 @@ ztest_vdev_class_add(ztest_ds_t *zd, uint64_t id)
return;
}
- leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
+ leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) *
+ ztest_opts.zo_raid_children;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
ztest_shared->zs_vdev_next_leaf = spa_num_top_vdevs(spa) * leaves;
spa_config_exit(spa, SCL_VDEV, FTAG);
nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
- class, ztest_opts.zo_raidz, zs->zs_mirrors, 1);
+ class, ztest_opts.zo_raid_children, zs->zs_mirrors, 1);
error = spa_vdev_add(spa, nvroot);
nvlist_free(nvroot);
@@ -3134,7 +3254,7 @@ ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
char *aux;
char *path;
uint64_t guid = 0;
- int error;
+ int error, ignore_err = 0;
if (ztest_opts.zo_mmp_test)
return;
@@ -3157,7 +3277,13 @@ ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
/*
* Pick a random device to remove.
*/
- guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid;
+ vdev_t *svd = sav->sav_vdevs[ztest_random(sav->sav_count)];
+
+ /* dRAID spares cannot be removed; try anyways to see ENOTSUP */
+ if (strstr(svd->vdev_path, VDEV_TYPE_DRAID) != NULL)
+ ignore_err = ENOTSUP;
+
+ guid = svd->vdev_guid;
} else {
/*
* Find an unused device we can add.
@@ -3214,7 +3340,9 @@ ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
case ZFS_ERR_DISCARDING_CHECKPOINT:
break;
default:
- fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
+ if (error != ignore_err)
+ fatal(0, "spa_vdev_remove(%llu) = %d", guid,
+ error);
}
}
@@ -3243,7 +3371,7 @@ ztest_split_pool(ztest_ds_t *zd, uint64_t id)
mutex_enter(&ztest_vdev_lock);
/* ensure we have a usable config; mirrors of raidz aren't supported */
- if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
+ if (zs->zs_mirrors < 3 || ztest_opts.zo_raid_children > 1) {
mutex_exit(&ztest_vdev_lock);
return;
}
@@ -3343,6 +3471,7 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
int replacing;
int oldvd_has_siblings = B_FALSE;
int newvd_is_spare = B_FALSE;
+ int newvd_is_dspare = B_FALSE;
int oldvd_is_log;
int error, expected_error;
@@ -3353,7 +3482,7 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
newpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
mutex_enter(&ztest_vdev_lock);
- leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
+ leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raid_children;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
@@ -3365,8 +3494,7 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
*/
if (ztest_device_removal_active) {
spa_config_exit(spa, SCL_ALL, FTAG);
- mutex_exit(&ztest_vdev_lock);
- return;
+ goto out;
}
/*
@@ -3393,14 +3521,17 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
if (zs->zs_mirrors >= 1) {
ASSERT(oldvd->vdev_ops == &vdev_mirror_ops);
ASSERT(oldvd->vdev_children >= zs->zs_mirrors);
- oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz];
+ oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raid_children];
}
/* pick a child out of the raidz group */
- if (ztest_opts.zo_raidz > 1) {
- ASSERT(oldvd->vdev_ops == &vdev_raidz_ops);
- ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz);
- oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz];
+ if (ztest_opts.zo_raid_children > 1) {
+ if (strcmp(oldvd->vdev_ops->vdev_op_type, "raidz") == 0)
+ ASSERT(oldvd->vdev_ops == &vdev_raidz_ops);
+ else
+ ASSERT(oldvd->vdev_ops == &vdev_draid_ops);
+ ASSERT(oldvd->vdev_children == ztest_opts.zo_raid_children);
+ oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raid_children];
}
/*
@@ -3447,6 +3578,10 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
if (sav->sav_count != 0 && ztest_random(3) == 0) {
newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
newvd_is_spare = B_TRUE;
+
+ if (newvd->vdev_ops == &vdev_draid_spare_ops)
+ newvd_is_dspare = B_TRUE;
+
(void) strcpy(newpath, newvd->vdev_path);
} else {
(void) snprintf(newpath, MAXPATHLEN, ztest_dev_template,
@@ -3480,6 +3615,9 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
* If newvd is already part of the pool, it should fail with EBUSY.
*
* If newvd is too small, it should fail with EOVERFLOW.
+ *
+ * If newvd is a distributed spare and it's being attached to a
+ * dRAID which is not its parent it should fail with EINVAL.
*/
if (pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_root_ops && (!replacing ||
@@ -3492,10 +3630,12 @@ ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
expected_error = replacing ? 0 : EBUSY;
else if (vdev_lookup_by_path(rvd, newpath) != NULL)
expected_error = EBUSY;
- else if (newsize < oldsize)
+ else if (!newvd_is_dspare && newsize < oldsize)
expected_error = EOVERFLOW;
else if (ashift > oldvd->vdev_top->vdev_ashift)
expected_error = EDOM;
+ else if (newvd_is_dspare && pvd != vdev_draid_spare_get_parent(newvd))
+ expected_error = ENOTSUP;
else
expected_error = 0;
@@ -4880,13 +5020,13 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
- VERIFY(0 == dmu_read(os, packobj, packoff,
+ VERIFY0(dmu_read(os, packobj, packoff,
packsize, packcheck, DMU_READ_PREFETCH));
- VERIFY(0 == dmu_read(os, bigobj, bigoff,
+ VERIFY0(dmu_read(os, bigobj, bigoff,
bigsize, bigcheck, DMU_READ_PREFETCH));
- ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
- ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
+ ASSERT0(bcmp(packbuf, packcheck, packsize));
+ ASSERT0(bcmp(bigbuf, bigcheck, bigsize));
umem_free(packcheck, packsize);
umem_free(bigcheck, bigsize);
@@ -5761,7 +5901,7 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
}
maxfaults = MAXFAULTS(zs);
- leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
+ leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raid_children;
mirror_save = zs->zs_mirrors;
mutex_exit(&ztest_vdev_lock);
@@ -6011,7 +6151,7 @@ out:
/*
* By design ztest will never inject uncorrectable damage in to the pool.
* Issue a scrub, wait for it to complete, and verify there is never any
- * any persistent damage.
+ * persistent damage.
*
* Only after a full scrub has been completed is it safe to start injecting
* data corruption. See the comment in zfs_fault_inject().
@@ -7016,6 +7156,7 @@ ztest_import_impl(ztest_shared_t *zs)
VERIFY0(zpool_find_config(NULL, ztest_opts.zo_pool, &cfg, &args,
&libzpool_config_ops));
VERIFY0(spa_import(ztest_opts.zo_pool, cfg, NULL, flags));
+ fnvlist_free(cfg);
}
/*
@@ -7347,7 +7488,7 @@ ztest_init(ztest_shared_t *zs)
zs->zs_splits = 0;
zs->zs_mirrors = ztest_opts.zo_mirrors;
nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
- NULL, ztest_opts.zo_raidz, zs->zs_mirrors, 1);
+ NULL, ztest_opts.zo_raid_children, zs->zs_mirrors, 1);
props = make_random_props();
/*
@@ -7683,10 +7824,12 @@ main(int argc, char **argv)
if (ztest_opts.zo_verbose >= 1) {
(void) printf("%llu vdevs, %d datasets, %d threads,"
- " %llu seconds...\n",
+ "%d %s disks, %llu seconds...\n\n",
(u_longlong_t)ztest_opts.zo_vdevs,
ztest_opts.zo_datasets,
ztest_opts.zo_threads,
+ ztest_opts.zo_raid_children,
+ ztest_opts.zo_raid_type,
(u_longlong_t)ztest_opts.zo_time);
}
diff --git a/sys/contrib/openzfs/config/Abigail.am b/sys/contrib/openzfs/config/Abigail.am
new file mode 100644
index 000000000000..599f611942b0
--- /dev/null
+++ b/sys/contrib/openzfs/config/Abigail.am
@@ -0,0 +1,29 @@
+#
+# When performing an ABI check the following options are applied:
+#
+# --no-unreferenced-symbols: Exclude symbols which are not referenced by
+# any debug information. Without this _init() and _fini() are incorrectly
+# reported on CentOS7 for libuutil.so.
+#
+# --headers-dir1: Limit ABI checks to public OpenZFS headers, otherwise
+# changes in public system headers are also reported.
+#
+# --suppressions: Honor a suppressions file for each library to provide
+# a mechanism for suppressing harmless warnings.
+#
+
+PHONY += checkabi storeabi
+
+checkabi:
+ for lib in $(lib_LTLIBRARIES) ; do \
+ abidiff --no-unreferenced-symbols \
+ --headers-dir1 ../../include \
+ --suppressions $${lib%.la}.suppr \
+ $${lib%.la}.abi .libs/$${lib%.la}.so ; \
+ done
+
+storeabi:
+ cd .libs ; \
+ for lib in $(lib_LTLIBRARIES) ; do \
+ abidw $${lib%.la}.so > ../$${lib%.la}.abi ; \
+ done
diff --git a/sys/contrib/openzfs/config/always-python.m4 b/sys/contrib/openzfs/config/always-python.m4
index c01e631a8f4f..76b06fcd8488 100644
--- a/sys/contrib/openzfs/config/always-python.m4
+++ b/sys/contrib/openzfs/config/always-python.m4
@@ -7,7 +7,7 @@ dnl # set the PYTHON environment variable accordingly.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYTHON], [
AC_ARG_WITH([python],
- AC_HELP_STRING([--with-python[=VERSION]],
+ AS_HELP_STRING([--with-python[=VERSION]],
[default system python version @<:@default=check@:>@]),
[with_python=$withval],
[with_python=check])
diff --git a/sys/contrib/openzfs/config/always-pyzfs.m4 b/sys/contrib/openzfs/config/always-pyzfs.m4
index f620a8f9a18b..76e07b593df2 100644
--- a/sys/contrib/openzfs/config/always-pyzfs.m4
+++ b/sys/contrib/openzfs/config/always-pyzfs.m4
@@ -22,7 +22,7 @@ dnl # Determines if pyzfs can be built, requires Python 2.7 or later.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYZFS], [
AC_ARG_ENABLE([pyzfs],
- AC_HELP_STRING([--enable-pyzfs],
+ AS_HELP_STRING([--enable-pyzfs],
[install libzfs_core python bindings @<:@default=check@:>@]),
[enable_pyzfs=$enableval],
[enable_pyzfs=check])
diff --git a/sys/contrib/openzfs/config/always-sed.m4 b/sys/contrib/openzfs/config/always-sed.m4
index 19633e118aed..3d7ae285ba1b 100644
--- a/sys/contrib/openzfs/config/always-sed.m4
+++ b/sys/contrib/openzfs/config/always-sed.m4
@@ -4,7 +4,7 @@ dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_SED], [
AC_REQUIRE([AC_PROG_SED])dnl
AC_CACHE_CHECK([for sed --in-place], [ac_cv_inplace], [
- tmpfile=$(mktemp conftest.XXX)
+ tmpfile=$(mktemp conftest.XXXXXX)
echo foo >$tmpfile
AS_IF([$SED --in-place 's#foo#bar#' $tmpfile 2>/dev/null],
[ac_cv_inplace="--in-place"],
diff --git a/sys/contrib/openzfs/config/deb.am b/sys/contrib/openzfs/config/deb.am
index 79063e407fe3..639a46efddbf 100644
--- a/sys/contrib/openzfs/config/deb.am
+++ b/sys/contrib/openzfs/config/deb.am
@@ -41,11 +41,11 @@ deb-utils: deb-local rpm-utils-initramfs
arch=`$(RPM) -qp $${name}-$${version}.src.rpm --qf %{arch} | tail -1`; \
debarch=`$(DPKG) --print-architecture`; \
pkg1=$${name}-$${version}.$${arch}.rpm; \
- pkg2=libnvpair1-$${version}.$${arch}.rpm; \
- pkg3=libuutil1-$${version}.$${arch}.rpm; \
- pkg4=libzfs2-$${version}.$${arch}.rpm; \
- pkg5=libzpool2-$${version}.$${arch}.rpm; \
- pkg6=libzfs2-devel-$${version}.$${arch}.rpm; \
+ pkg2=libnvpair3-$${version}.$${arch}.rpm; \
+ pkg3=libuutil3-$${version}.$${arch}.rpm; \
+ pkg4=libzfs4-$${version}.$${arch}.rpm; \
+ pkg5=libzpool4-$${version}.$${arch}.rpm; \
+ pkg6=libzfs4-devel-$${version}.$${arch}.rpm; \
pkg7=$${name}-test-$${version}.$${arch}.rpm; \
pkg8=$${name}-dracut-$${version}.noarch.rpm; \
pkg9=$${name}-initramfs-$${version}.$${arch}.rpm; \
@@ -53,10 +53,10 @@ deb-utils: deb-local rpm-utils-initramfs
## Arguments need to be passed to dh_shlibdeps. Alien provides no mechanism
## to do this, so we install a shim onto the path which calls the real
## dh_shlibdeps with the required arguments.
- path_prepend=`mktemp -d /tmp/intercept.XXX`; \
+ path_prepend=`mktemp -d /tmp/intercept.XXXXXX`; \
echo "#$(SHELL)" > $${path_prepend}/dh_shlibdeps; \
echo "`which dh_shlibdeps` -- \
- -xlibuutil1linux -xlibnvpair1linux -xlibzfs2linux -xlibzpool2linux" \
+ -xlibuutil3linux -xlibnvpair3linux -xlibzfs4linux -xlibzpool4linux" \
>> $${path_prepend}/dh_shlibdeps; \
## These -x arguments are passed to dpkg-shlibdeps, which exclude the
## Debianized packages from the auto-generated dependencies of the new debs,
diff --git a/sys/contrib/openzfs/config/kernel-acl.m4 b/sys/contrib/openzfs/config/kernel-acl.m4
index 0f1c24656730..e02ce665323f 100644
--- a/sys/contrib/openzfs/config/kernel-acl.m4
+++ b/sys/contrib/openzfs/config/kernel-acl.m4
@@ -11,7 +11,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_POSIX_ACL_RELEASE], [
], [
struct posix_acl *tmp = posix_acl_alloc(1, 0);
posix_acl_release(tmp);
- ], [], [$ZFS_META_LICENSE])
+ ], [], [ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_KERNEL_POSIX_ACL_RELEASE], [
@@ -50,7 +50,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_SET_CACHED_ACL_USABLE], [
struct posix_acl *acl = posix_acl_alloc(1, 0);
set_cached_acl(ip, ACL_TYPE_ACCESS, acl);
forget_cached_acl(ip, ACL_TYPE_ACCESS);
- ], [], [$ZFS_META_LICENSE])
+ ], [], [ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_KERNEL_SET_CACHED_ACL_USABLE], [
diff --git a/sys/contrib/openzfs/config/kernel-bio.m4 b/sys/contrib/openzfs/config/kernel-bio.m4
index 534282780d3e..0c533531dceb 100644
--- a/sys/contrib/openzfs/config/kernel-bio.m4
+++ b/sys/contrib/openzfs/config/kernel-bio.m4
@@ -188,7 +188,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BIO_SET_DEV], [
struct block_device *bdev = NULL;
struct bio *bio = NULL;
bio_set_dev(bio, bdev);
- ], [], [$ZFS_META_LICENSE])
+ ], [], [ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_KERNEL_BIO_SET_DEV], [
@@ -347,7 +347,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKG_TRYGET], [
struct blkcg_gq blkg __attribute__ ((unused)) = {};
bool rc __attribute__ ((unused));
rc = blkg_tryget(&blkg);
- ], [], [$ZFS_META_LICENSE])
+ ], [], [ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKG_TRYGET], [
diff --git a/sys/contrib/openzfs/config/kernel-blk-queue.m4 b/sys/contrib/openzfs/config/kernel-blk-queue.m4
index 382ebefd34a3..ff2da92e9ee1 100644
--- a/sys/contrib/openzfs/config/kernel-blk-queue.m4
+++ b/sys/contrib/openzfs/config/kernel-blk-queue.m4
@@ -179,7 +179,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLUSH], [
], [
struct request_queue *q = NULL;
(void) blk_queue_flush(q, REQ_FLUSH);
- ], [$NO_UNUSED_BUT_SET_VARIABLE], [$ZFS_META_LICENSE])
+ ], [$NO_UNUSED_BUT_SET_VARIABLE], [ZFS_META_LICENSE])
ZFS_LINUX_TEST_SRC([blk_queue_write_cache], [
#include <linux/kernel.h>
@@ -187,7 +187,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLUSH], [
], [
struct request_queue *q = NULL;
blk_queue_write_cache(q, true, true);
- ], [$NO_UNUSED_BUT_SET_VARIABLE], [$ZFS_META_LICENSE])
+ ], [$NO_UNUSED_BUT_SET_VARIABLE], [ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_FLUSH], [
diff --git a/sys/contrib/openzfs/config/kernel-blkdev.m4 b/sys/contrib/openzfs/config/kernel-blkdev.m4
index 2644555f5524..4b80d4dd29a5 100644
--- a/sys/contrib/openzfs/config/kernel-blkdev.m4
+++ b/sys/contrib/openzfs/config/kernel-blkdev.m4
@@ -78,6 +78,59 @@ AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_REREAD_PART], [
])
dnl #
+dnl # check_disk_change() was removed in 5.10
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_CHECK_DISK_CHANGE], [
+ ZFS_LINUX_TEST_SRC([check_disk_change], [
+ #include <linux/fs.h>
+ #include <linux/blkdev.h>
+ ], [
+ struct block_device *bdev = NULL;
+ bool error;
+
+ error = check_disk_change(bdev);
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_CHECK_DISK_CHANGE], [
+ AC_MSG_CHECKING([whether check_disk_change() exists])
+ ZFS_LINUX_TEST_RESULT([check_disk_change], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_CHECK_DISK_CHANGE, 1,
+ [check_disk_change() exists])
+ ], [
+ AC_MSG_RESULT(no)
+ ])
+])
+
+dnl #
+dnl # 5.10 API, check_disk_change() is removed, in favor of
+dnl # bdev_check_media_change(), which doesn't force revalidation
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_CHECK_MEDIA_CHANGE], [
+ ZFS_LINUX_TEST_SRC([bdev_check_media_change], [
+ #include <linux/fs.h>
+ #include <linux/blkdev.h>
+ ], [
+ struct block_device *bdev = NULL;
+ int error;
+
+ error = bdev_check_media_change(bdev);
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_CHECK_MEDIA_CHANGE], [
+ AC_MSG_CHECKING([whether bdev_disk_changed() exists])
+ ZFS_LINUX_TEST_RESULT([bdev_check_media_change], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BDEV_CHECK_MEDIA_CHANGE, 1,
+ [bdev_check_media_change() exists])
+ ], [
+ AC_MSG_RESULT(no)
+ ])
+])
+
+dnl #
dnl # 2.6.22 API change
dnl # Single argument invalidate_bdev()
dnl #
@@ -101,42 +154,69 @@ AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_INVALIDATE_BDEV], [
])
dnl #
-dnl # 2.6.27, lookup_bdev() was exported.
-dnl # 4.4.0-6.21 - lookup_bdev() takes 2 arguments.
+dnl # 5.11 API, lookup_bdev() takes dev_t argument.
+dnl # 2.6.27 API, lookup_bdev() was first exported.
+dnl # 4.4.0-6.21 API, lookup_bdev() on Ubuntu takes mode argument.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_LOOKUP_BDEV], [
+ ZFS_LINUX_TEST_SRC([lookup_bdev_devt], [
+ #include <linux/blkdev.h>
+ ], [
+ int error __attribute__ ((unused));
+ const char path[] = "/example/path";
+ dev_t dev;
+
+ error = lookup_bdev(path, &dev);
+ ])
+
ZFS_LINUX_TEST_SRC([lookup_bdev_1arg], [
#include <linux/fs.h>
#include <linux/blkdev.h>
], [
- lookup_bdev(NULL);
+ struct block_device *bdev __attribute__ ((unused));
+ const char path[] = "/example/path";
+
+ bdev = lookup_bdev(path);
])
- ZFS_LINUX_TEST_SRC([lookup_bdev_2args], [
+ ZFS_LINUX_TEST_SRC([lookup_bdev_mode], [
#include <linux/fs.h>
], [
- lookup_bdev(NULL, FMODE_READ);
+ struct block_device *bdev __attribute__ ((unused));
+ const char path[] = "/example/path";
+
+ bdev = lookup_bdev(path, FMODE_READ);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_LOOKUP_BDEV], [
- AC_MSG_CHECKING([whether lookup_bdev() wants 1 arg])
- ZFS_LINUX_TEST_RESULT_SYMBOL([lookup_bdev_1arg],
+ AC_MSG_CHECKING([whether lookup_bdev() wants dev_t arg])
+ ZFS_LINUX_TEST_RESULT_SYMBOL([lookup_bdev_devt],
[lookup_bdev], [fs/block_dev.c], [
AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_1ARG_LOOKUP_BDEV, 1,
- [lookup_bdev() wants 1 arg])
+ AC_DEFINE(HAVE_DEVT_LOOKUP_BDEV, 1,
+ [lookup_bdev() wants dev_t arg])
], [
AC_MSG_RESULT(no)
- AC_MSG_CHECKING([whether lookup_bdev() wants 2 args])
- ZFS_LINUX_TEST_RESULT_SYMBOL([lookup_bdev_2args],
+ AC_MSG_CHECKING([whether lookup_bdev() wants 1 arg])
+ ZFS_LINUX_TEST_RESULT_SYMBOL([lookup_bdev_1arg],
[lookup_bdev], [fs/block_dev.c], [
AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_2ARGS_LOOKUP_BDEV, 1,
- [lookup_bdev() wants 2 args])
+ AC_DEFINE(HAVE_1ARG_LOOKUP_BDEV, 1,
+ [lookup_bdev() wants 1 arg])
], [
- ZFS_LINUX_TEST_ERROR([lookup_bdev()])
+ AC_MSG_RESULT(no)
+
+ AC_MSG_CHECKING([whether lookup_bdev() wants mode arg])
+ ZFS_LINUX_TEST_RESULT_SYMBOL([lookup_bdev_mode],
+ [lookup_bdev], [fs/block_dev.c], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_MODE_LOOKUP_BDEV, 1,
+ [lookup_bdev() wants mode arg])
+ ], [
+ ZFS_LINUX_TEST_ERROR([lookup_bdev()])
+ ])
])
])
])
@@ -191,6 +271,29 @@ AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_LOGICAL_BLOCK_SIZE], [
])
])
+dnl #
+dnl # 5.11 API change
+dnl # Added bdev_whole() helper.
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_WHOLE], [
+ ZFS_LINUX_TEST_SRC([bdev_whole], [
+ #include <linux/blkdev.h>
+ ],[
+ struct block_device *bdev = NULL;
+ bdev = bdev_whole(bdev);
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BDEV_WHOLE], [
+ AC_MSG_CHECKING([whether bdev_whole() is available])
+ ZFS_LINUX_TEST_RESULT([bdev_whole], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BDEV_WHOLE, 1, [bdev_whole() is available])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+])
+
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV], [
ZFS_AC_KERNEL_SRC_BLKDEV_GET_BY_PATH
ZFS_AC_KERNEL_SRC_BLKDEV_PUT
@@ -199,6 +302,9 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV], [
ZFS_AC_KERNEL_SRC_BLKDEV_LOOKUP_BDEV
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_LOGICAL_BLOCK_SIZE
ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_PHYSICAL_BLOCK_SIZE
+ ZFS_AC_KERNEL_SRC_BLKDEV_CHECK_DISK_CHANGE
+ ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_CHECK_MEDIA_CHANGE
+ ZFS_AC_KERNEL_SRC_BLKDEV_BDEV_WHOLE
])
AC_DEFUN([ZFS_AC_KERNEL_BLKDEV], [
@@ -209,4 +315,7 @@ AC_DEFUN([ZFS_AC_KERNEL_BLKDEV], [
ZFS_AC_KERNEL_BLKDEV_LOOKUP_BDEV
ZFS_AC_KERNEL_BLKDEV_BDEV_LOGICAL_BLOCK_SIZE
ZFS_AC_KERNEL_BLKDEV_BDEV_PHYSICAL_BLOCK_SIZE
+ ZFS_AC_KERNEL_BLKDEV_CHECK_DISK_CHANGE
+ ZFS_AC_KERNEL_BLKDEV_BDEV_CHECK_MEDIA_CHANGE
+ ZFS_AC_KERNEL_BLKDEV_BDEV_WHOLE
])
diff --git a/sys/contrib/openzfs/config/kernel-config-defined.m4 b/sys/contrib/openzfs/config/kernel-config-defined.m4
index fe778e649454..9b9468269ca3 100644
--- a/sys/contrib/openzfs/config/kernel-config-defined.m4
+++ b/sys/contrib/openzfs/config/kernel-config-defined.m4
@@ -86,7 +86,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_CONFIG_DEBUG_LOCK_ALLOC], [
mutex_init(&lock);
mutex_lock(&lock);
mutex_unlock(&lock);
- ], [], [$ZFS_META_LICENSE])
+ ], [], [ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_KERNEL_CONFIG_DEBUG_LOCK_ALLOC], [
diff --git a/sys/contrib/openzfs/config/kernel-fpu.m4 b/sys/contrib/openzfs/config/kernel-fpu.m4
index 3c7933413d18..4d6fe052289c 100644
--- a/sys/contrib/openzfs/config/kernel-fpu.m4
+++ b/sys/contrib/openzfs/config/kernel-fpu.m4
@@ -42,7 +42,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_FPU], [
], [
kernel_fpu_begin();
kernel_fpu_end();
- ], [], [$ZFS_META_LICENSE])
+ ], [], [ZFS_META_LICENSE])
ZFS_LINUX_TEST_SRC([__kernel_fpu], [
#include <linux/types.h>
@@ -55,7 +55,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_FPU], [
], [
__kernel_fpu_begin();
__kernel_fpu_end();
- ], [], [$ZFS_META_LICENSE])
+ ], [], [ZFS_META_LICENSE])
ZFS_LINUX_TEST_SRC([fpu_internal], [
#if defined(__x86_64) || defined(__x86_64__) || \
diff --git a/sys/contrib/openzfs/config/kernel-generic_io_acct.m4 b/sys/contrib/openzfs/config/kernel-generic_io_acct.m4
index 423b3e5a3521..e4ab503d5e1c 100644
--- a/sys/contrib/openzfs/config/kernel-generic_io_acct.m4
+++ b/sys/contrib/openzfs/config/kernel-generic_io_acct.m4
@@ -2,6 +2,16 @@ dnl #
dnl # Check for generic io accounting interface.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_GENERIC_IO_ACCT], [
+ ZFS_LINUX_TEST_SRC([bio_io_acct], [
+ #include <linux/blkdev.h>
+ ], [
+ struct bio *bio = NULL;
+ unsigned long start_time;
+
+ start_time = bio_start_io_acct(bio);
+ bio_end_io_acct(bio, start_time);
+ ])
+
ZFS_LINUX_TEST_SRC([generic_acct_3args], [
#include <linux/bio.h>
@@ -29,36 +39,49 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_GENERIC_IO_ACCT], [
AC_DEFUN([ZFS_AC_KERNEL_GENERIC_IO_ACCT], [
dnl #
- dnl # 3.19 API addition
+ dnl # 5.7 API,
dnl #
- dnl # torvalds/linux@394ffa50 allows us to increment iostat
- dnl # counters without generic_make_request().
+ dnl # Added bio_start_io_acct() and bio_end_io_acct() helpers.
dnl #
- AC_MSG_CHECKING([whether generic IO accounting wants 3 args])
- ZFS_LINUX_TEST_RESULT_SYMBOL([generic_acct_3args],
- [generic_start_io_acct], [block/bio.c], [
+ AC_MSG_CHECKING([whether generic bio_*_io_acct() are available])
+ ZFS_LINUX_TEST_RESULT([bio_io_acct], [
AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_GENERIC_IO_ACCT_3ARG, 1,
- [generic_start_io_acct()/generic_end_io_acct() available])
+ AC_DEFINE(HAVE_BIO_IO_ACCT, 1, [bio_*_io_acct() available])
], [
AC_MSG_RESULT(no)
dnl #
- dnl # Linux 4.14 API,
+ dnl # 4.14 API,
dnl #
dnl # generic_start_io_acct/generic_end_io_acct now require
dnl # request_queue to be provided. No functional changes,
dnl # but preparation for inflight accounting.
dnl #
- AC_MSG_CHECKING([whether generic IO accounting wants 4 args])
+ AC_MSG_CHECKING([whether generic_*_io_acct wants 4 args])
ZFS_LINUX_TEST_RESULT_SYMBOL([generic_acct_4args],
[generic_start_io_acct], [block/bio.c], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GENERIC_IO_ACCT_4ARG, 1,
- [generic_start_io_acct()/generic_end_io_acct() ]
- [4 arg available])
+ [generic_*_io_acct() 4 arg available])
], [
AC_MSG_RESULT(no)
+
+ dnl #
+ dnl # 3.19 API addition
+ dnl #
+ dnl # torvalds/linux@394ffa50 allows us to increment
+ dnl # iostat counters without generic_make_request().
+ dnl #
+ AC_MSG_CHECKING(
+ [whether generic_*_io_acct wants 3 args])
+ ZFS_LINUX_TEST_RESULT_SYMBOL([generic_acct_3args],
+ [generic_start_io_acct], [block/bio.c], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_GENERIC_IO_ACCT_3ARG, 1,
+ [generic_*_io_acct() 3 arg available])
+ ], [
+ AC_MSG_RESULT(no)
+ ])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-get-disk-and-module.m4 b/sys/contrib/openzfs/config/kernel-get-disk-and-module.m4
index 51cf7743cf0b..e69de29bb2d1 100644
--- a/sys/contrib/openzfs/config/kernel-get-disk-and-module.m4
+++ b/sys/contrib/openzfs/config/kernel-get-disk-and-module.m4
@@ -1,24 +0,0 @@
-dnl #
-dnl # 4.16 API change
-dnl # Verify if get_disk_and_module() symbol is available.
-dnl #
-AC_DEFUN([ZFS_AC_KERNEL_SRC_GET_DISK_AND_MODULE], [
- ZFS_LINUX_TEST_SRC([get_disk_and_module], [
- #include <linux/genhd.h>
- ], [
- struct gendisk *disk = NULL;
- (void) get_disk_and_module(disk);
- ])
-])
-
-AC_DEFUN([ZFS_AC_KERNEL_GET_DISK_AND_MODULE], [
- AC_MSG_CHECKING([whether get_disk_and_module() is available])
- ZFS_LINUX_TEST_RESULT_SYMBOL([get_disk_and_module],
- [get_disk_and_module], [block/genhd.c], [
- AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_GET_DISK_AND_MODULE,
- 1, [get_disk_and_module() is available])
- ], [
- AC_MSG_RESULT(no)
- ])
-])
diff --git a/sys/contrib/openzfs/config/kernel-hotplug.m4 b/sys/contrib/openzfs/config/kernel-hotplug.m4
new file mode 100644
index 000000000000..e796a6d2e8e8
--- /dev/null
+++ b/sys/contrib/openzfs/config/kernel-hotplug.m4
@@ -0,0 +1,26 @@
+dnl #
+dnl # 4.6 API change
+dnl # Added CPU hotplug APIs
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_CPU_HOTPLUG], [
+ ZFS_LINUX_TEST_SRC([cpu_hotplug], [
+ #include <linux/cpuhotplug.h>
+ ],[
+ enum cpuhp_state state = CPUHP_ONLINE;
+ int (*fp)(unsigned int, struct hlist_node *) = NULL;
+ cpuhp_state_add_instance_nocalls(0, (struct hlist_node *)NULL);
+ cpuhp_state_remove_instance_nocalls(0, (struct hlist_node *)NULL);
+ cpuhp_setup_state_multi(state, "", fp, fp);
+ cpuhp_remove_multi_state(0);
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_CPU_HOTPLUG], [
+ AC_MSG_CHECKING([whether CPU hotplug APIs exist])
+ ZFS_LINUX_TEST_RESULT([cpu_hotplug], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_CPU_HOTPLUG, 1, [yes])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+])
diff --git a/sys/contrib/openzfs/config/kernel-make-request-fn.m4 b/sys/contrib/openzfs/config/kernel-make-request-fn.m4
index 1576fece1368..290ef6b8da7d 100644
--- a/sys/contrib/openzfs/config/kernel-make-request-fn.m4
+++ b/sys/contrib/openzfs/config/kernel-make-request-fn.m4
@@ -27,6 +27,15 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN], [
q = blk_alloc_queue(make_request, NUMA_NO_NODE);
])
+ ZFS_LINUX_TEST_SRC([blk_alloc_queue_request_fn_rh], [
+ #include <linux/blkdev.h>
+ blk_qc_t make_request(struct request_queue *q,
+ struct bio *bio) { return (BLK_QC_T_NONE); }
+ ],[
+ struct request_queue *q __attribute__ ((unused));
+ q = blk_alloc_queue_rh(make_request, NUMA_NO_NODE);
+ ])
+
ZFS_LINUX_TEST_SRC([block_device_operations_submit_bio], [
#include <linux/blkdev.h>
],[
@@ -47,7 +56,9 @@ AC_DEFUN([ZFS_AC_KERNEL_MAKE_REQUEST_FN], [
AC_DEFINE(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS, 1,
[submit_bio is member of struct block_device_operations])
- ],[
+ ],[
+ AC_MSG_RESULT(no)
+
dnl # Checked as part of the blk_alloc_queue_request_fn test
dnl #
dnl # Linux 5.7 API Change
@@ -55,6 +66,9 @@ AC_DEFUN([ZFS_AC_KERNEL_MAKE_REQUEST_FN], [
dnl #
AC_MSG_CHECKING([whether blk_alloc_queue() expects request function])
ZFS_LINUX_TEST_RESULT([blk_alloc_queue_request_fn], [
+ AC_MSG_RESULT(yes)
+
+ dnl # This is currently always the case.
AC_MSG_CHECKING([whether make_request_fn() returns blk_qc_t])
AC_MSG_RESULT(yes)
@@ -66,34 +80,59 @@ AC_DEFUN([ZFS_AC_KERNEL_MAKE_REQUEST_FN], [
[Noting that make_request_fn() returns blk_qc_t])
],[
dnl #
- dnl # Linux 3.2 API Change
- dnl # make_request_fn returns void.
+ dnl # CentOS Stream 4.18.0-257 API Change
+ dnl # The Linux 5.7 blk_alloc_queue() change was back-
+ dnl # ported and the symbol renamed blk_alloc_queue_rh().
+ dnl # As of this kernel version they're not providing
+ dnl # any compatibility code in the kernel for this.
dnl #
- AC_MSG_CHECKING([whether make_request_fn() returns void])
- ZFS_LINUX_TEST_RESULT([make_request_fn_void], [
+ ZFS_LINUX_TEST_RESULT([blk_alloc_queue_request_fn_rh], [
AC_MSG_RESULT(yes)
- AC_DEFINE(MAKE_REQUEST_FN_RET, void,
+
+ dnl # This is currently always the case.
+ AC_MSG_CHECKING([whether make_request_fn_rh() returns blk_qc_t])
+ AC_MSG_RESULT(yes)
+
+ AC_DEFINE(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH, 1,
+ [blk_alloc_queue_rh() expects request function])
+ AC_DEFINE(MAKE_REQUEST_FN_RET, blk_qc_t,
[make_request_fn() return type])
- AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_VOID, 1,
- [Noting that make_request_fn() returns void])
+ AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_QC, 1,
+ [Noting that make_request_fn() returns blk_qc_t])
],[
AC_MSG_RESULT(no)
dnl #
- dnl # Linux 4.4 API Change
- dnl # make_request_fn returns blk_qc_t.
+ dnl # Linux 3.2 API Change
+ dnl # make_request_fn returns void.
dnl #
AC_MSG_CHECKING(
- [whether make_request_fn() returns blk_qc_t])
- ZFS_LINUX_TEST_RESULT([make_request_fn_blk_qc_t], [
+ [whether make_request_fn() returns void])
+ ZFS_LINUX_TEST_RESULT([make_request_fn_void], [
AC_MSG_RESULT(yes)
- AC_DEFINE(MAKE_REQUEST_FN_RET, blk_qc_t,
+ AC_DEFINE(MAKE_REQUEST_FN_RET, void,
[make_request_fn() return type])
- AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_QC, 1,
- [Noting that make_request_fn() ]
- [returns blk_qc_t])
+ AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_VOID, 1,
+ [Noting that make_request_fn() returns void])
],[
- ZFS_LINUX_TEST_ERROR([make_request_fn])
+ AC_MSG_RESULT(no)
+
+ dnl #
+ dnl # Linux 4.4 API Change
+ dnl # make_request_fn returns blk_qc_t.
+ dnl #
+ AC_MSG_CHECKING(
+ [whether make_request_fn() returns blk_qc_t])
+ ZFS_LINUX_TEST_RESULT([make_request_fn_blk_qc_t], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(MAKE_REQUEST_FN_RET, blk_qc_t,
+ [make_request_fn() return type])
+ AC_DEFINE(HAVE_MAKE_REQUEST_FN_RET_QC, 1,
+ [Noting that make_request_fn() ]
+ [returns blk_qc_t])
+ ],[
+ ZFS_LINUX_TEST_ERROR([make_request_fn])
+ ])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-objtool.m4 b/sys/contrib/openzfs/config/kernel-objtool.m4
index c560c41954c6..f9f9d657d805 100644
--- a/sys/contrib/openzfs/config/kernel-objtool.m4
+++ b/sys/contrib/openzfs/config/kernel-objtool.m4
@@ -1,4 +1,25 @@
dnl #
+dnl # Detect objtool functionality.
+dnl #
+
+dnl #
+dnl # Kernel 5.10: linux/frame.h was renamed linux/objtool.h
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_OBJTOOL_HEADER], [
+ AC_MSG_CHECKING([whether objtool header is available])
+ ZFS_LINUX_TRY_COMPILE([
+ #include <linux/objtool.h>
+ ],[
+ ],[
+ AC_DEFINE(HAVE_KERNEL_OBJTOOL_HEADER, 1,
+ [kernel has linux/objtool.h])
+ AC_MSG_RESULT(linux/objtool.h)
+ ],[
+ AC_MSG_RESULT(linux/frame.h)
+ ])
+])
+
+dnl #
dnl # Check for objtool support.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_OBJTOOL], [
@@ -16,7 +37,11 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_OBJTOOL], [
dnl # 4.6 API added STACK_FRAME_NON_STANDARD macro
ZFS_LINUX_TEST_SRC([stack_frame_non_standard], [
+ #ifdef HAVE_KERNEL_OBJTOOL_HEADER
+ #include <linux/objtool.h>
+ #else
#include <linux/frame.h>
+ #endif
],[
#if !defined(STACK_FRAME_NON_STANDARD)
#error "STACK_FRAME_NON_STANDARD is not defined."
diff --git a/sys/contrib/openzfs/config/kernel-percpu.m4 b/sys/contrib/openzfs/config/kernel-percpu.m4
index e9654a69ee0a..700d97a25853 100644
--- a/sys/contrib/openzfs/config/kernel-percpu.m4
+++ b/sys/contrib/openzfs/config/kernel-percpu.m4
@@ -25,10 +25,36 @@ AC_DEFUN([ZFS_AC_KERNEL_PERCPU_COUNTER_INIT], [
])
])
+dnl #
+dnl # 5.10 API change,
+dnl # The "count" was moved into ref->data, from ref
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_PERCPU_REF_COUNT_IN_DATA], [
+ ZFS_LINUX_TEST_SRC([percpu_ref_count_in_data], [
+ #include <linux/percpu-refcount.h>
+ ],[
+ struct percpu_ref_data d;
+
+ atomic_long_set(&d.count, 1L);
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_PERCPU_REF_COUNT_IN_DATA], [
+ AC_MSG_CHECKING([whether is inside percpu_ref.data])
+ ZFS_LINUX_TEST_RESULT([percpu_ref_count_in_data], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(ZFS_PERCPU_REF_COUNT_IN_DATA, 1,
+ [count is located in percpu_ref.data])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+])
AC_DEFUN([ZFS_AC_KERNEL_SRC_PERCPU], [
ZFS_AC_KERNEL_SRC_PERCPU_COUNTER_INIT
+ ZFS_AC_KERNEL_SRC_PERCPU_REF_COUNT_IN_DATA
])
AC_DEFUN([ZFS_AC_KERNEL_PERCPU], [
ZFS_AC_KERNEL_PERCPU_COUNTER_INIT
+ ZFS_AC_KERNEL_PERCPU_REF_COUNT_IN_DATA
])
diff --git a/sys/contrib/openzfs/config/kernel-revalidate-disk-size.m4 b/sys/contrib/openzfs/config/kernel-revalidate-disk-size.m4
new file mode 100644
index 000000000000..a7d0cb3cdab4
--- /dev/null
+++ b/sys/contrib/openzfs/config/kernel-revalidate-disk-size.m4
@@ -0,0 +1,46 @@
+dnl #
+dnl # 5.11 API change
+dnl # revalidate_disk_size() has been removed entirely.
+dnl #
+dnl # 5.10 API change
+dnl # revalidate_disk() was replaced by revalidate_disk_size()
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_REVALIDATE_DISK], [
+
+ ZFS_LINUX_TEST_SRC([revalidate_disk_size], [
+ #include <linux/genhd.h>
+ ], [
+ struct gendisk *disk = NULL;
+ (void) revalidate_disk_size(disk, false);
+ ])
+
+ ZFS_LINUX_TEST_SRC([revalidate_disk], [
+ #include <linux/genhd.h>
+ ], [
+ struct gendisk *disk = NULL;
+ (void) revalidate_disk(disk);
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_REVALIDATE_DISK], [
+
+ AC_MSG_CHECKING([whether revalidate_disk_size() is available])
+ ZFS_LINUX_TEST_RESULT_SYMBOL([revalidate_disk_size],
+ [revalidate_disk_size], [block/genhd.c], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_REVALIDATE_DISK_SIZE, 1,
+ [revalidate_disk_size() is available])
+ ], [
+ AC_MSG_RESULT(no)
+
+ AC_MSG_CHECKING([whether revalidate_disk() is available])
+ ZFS_LINUX_TEST_RESULT_SYMBOL([revalidate_disk],
+ [revalidate_disk], [block/genhd.c], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_REVALIDATE_DISK, 1,
+ [revalidate_disk() is available])
+ ], [
+ AC_MSG_RESULT(no)
+ ])
+ ])
+])
diff --git a/sys/contrib/openzfs/config/kernel-rwsem.m4 b/sys/contrib/openzfs/config/kernel-rwsem.m4
index 824f4a3ffd41..d3a64a8efa19 100644
--- a/sys/contrib/openzfs/config/kernel-rwsem.m4
+++ b/sys/contrib/openzfs/config/kernel-rwsem.m4
@@ -1,30 +1,4 @@
dnl #
-dnl # 3.1 API Change
-dnl #
-dnl # The rw_semaphore.wait_lock member was changed from spinlock_t to
-dnl # raw_spinlock_t at commit ddb6c9b58a19edcfac93ac670b066c836ff729f1.
-dnl #
-AC_DEFUN([ZFS_AC_KERNEL_SRC_RWSEM_SPINLOCK_IS_RAW], [
- ZFS_LINUX_TEST_SRC([rwsem_spinlock_is_raw], [
- #include <linux/rwsem.h>
- ],[
- struct rw_semaphore dummy_semaphore __attribute__ ((unused));
- raw_spinlock_t dummy_lock __attribute__ ((unused)) =
- __RAW_SPIN_LOCK_INITIALIZER(dummy_lock);
- dummy_semaphore.wait_lock = dummy_lock;
- ])
-])
-
-AC_DEFUN([ZFS_AC_KERNEL_RWSEM_SPINLOCK_IS_RAW], [
- AC_MSG_CHECKING([whether struct rw_semaphore member wait_lock is raw])
- ZFS_LINUX_TEST_RESULT([rwsem_spinlock_is_raw], [
- AC_MSG_RESULT(yes)
- ],[
- ZFS_LINUX_TEST_ERROR([rwsem_spinlock_is_raw])
- ])
-])
-
-dnl #
dnl # 3.16 API Change
dnl #
dnl # rwsem-spinlock "->activity" changed to "->count"
@@ -76,13 +50,11 @@ AC_DEFUN([ZFS_AC_KERNEL_RWSEM_ATOMIC_LONG_COUNT], [
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_RWSEM], [
- ZFS_AC_KERNEL_SRC_RWSEM_SPINLOCK_IS_RAW
ZFS_AC_KERNEL_SRC_RWSEM_ACTIVITY
ZFS_AC_KERNEL_SRC_RWSEM_ATOMIC_LONG_COUNT
])
AC_DEFUN([ZFS_AC_KERNEL_RWSEM], [
- ZFS_AC_KERNEL_RWSEM_SPINLOCK_IS_RAW
ZFS_AC_KERNEL_RWSEM_ACTIVITY
ZFS_AC_KERNEL_RWSEM_ATOMIC_LONG_COUNT
])
diff --git a/sys/contrib/openzfs/config/kernel-vfs-iov_iter.m4 b/sys/contrib/openzfs/config/kernel-vfs-iov_iter.m4
new file mode 100644
index 000000000000..69db11b6882b
--- /dev/null
+++ b/sys/contrib/openzfs/config/kernel-vfs-iov_iter.m4
@@ -0,0 +1,206 @@
+dnl #
+dnl # Check for available iov_iter functionality.
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_IOV_ITER], [
+ ZFS_LINUX_TEST_SRC([iov_iter_types], [
+ #include <linux/fs.h>
+ #include <linux/uio.h>
+ ],[
+ int type __attribute__ ((unused)) =
+ ITER_IOVEC | ITER_KVEC | ITER_BVEC | ITER_PIPE;
+ ])
+
+ ZFS_LINUX_TEST_SRC([iov_iter_init], [
+ #include <linux/fs.h>
+ #include <linux/uio.h>
+ ],[
+ struct iov_iter iter = { 0 };
+ struct iovec iov;
+ unsigned long nr_segs = 1;
+ size_t count = 1024;
+
+ iov_iter_init(&iter, WRITE, &iov, nr_segs, count);
+ ])
+
+ ZFS_LINUX_TEST_SRC([iov_iter_init_legacy], [
+ #include <linux/fs.h>
+ #include <linux/uio.h>
+ ],[
+ struct iov_iter iter = { 0 };
+ struct iovec iov;
+ unsigned long nr_segs = 1;
+ size_t count = 1024;
+ size_t written = 0;
+
+ iov_iter_init(&iter, &iov, nr_segs, count, written);
+ ])
+
+ ZFS_LINUX_TEST_SRC([iov_iter_advance], [
+ #include <linux/fs.h>
+ #include <linux/uio.h>
+ ],[
+ struct iov_iter iter = { 0 };
+ size_t advance = 512;
+
+ iov_iter_advance(&iter, advance);
+ ])
+
+ ZFS_LINUX_TEST_SRC([iov_iter_revert], [
+ #include <linux/fs.h>
+ #include <linux/uio.h>
+ ],[
+ struct iov_iter iter = { 0 };
+ size_t revert = 512;
+
+ iov_iter_revert(&iter, revert);
+ ])
+
+ ZFS_LINUX_TEST_SRC([iov_iter_fault_in_readable], [
+ #include <linux/fs.h>
+ #include <linux/uio.h>
+ ],[
+ struct iov_iter iter = { 0 };
+ size_t size = 512;
+ int error __attribute__ ((unused));
+
+ error = iov_iter_fault_in_readable(&iter, size);
+ ])
+
+ ZFS_LINUX_TEST_SRC([iov_iter_count], [
+ #include <linux/fs.h>
+ #include <linux/uio.h>
+ ],[
+ struct iov_iter iter = { 0 };
+ size_t bytes __attribute__ ((unused));
+
+ bytes = iov_iter_count(&iter);
+ ])
+
+ ZFS_LINUX_TEST_SRC([copy_to_iter], [
+ #include <linux/fs.h>
+ #include <linux/uio.h>
+ ],[
+ struct iov_iter iter = { 0 };
+ char buf[512] = { 0 };
+ size_t size = 512;
+ size_t bytes __attribute__ ((unused));
+
+ bytes = copy_to_iter((const void *)&buf, size, &iter);
+ ])
+
+ ZFS_LINUX_TEST_SRC([copy_from_iter], [
+ #include <linux/fs.h>
+ #include <linux/uio.h>
+ ],[
+ struct iov_iter iter = { 0 };
+ char buf[512] = { 0 };
+ size_t size = 512;
+ size_t bytes __attribute__ ((unused));
+
+ bytes = copy_from_iter((void *)&buf, size, &iter);
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_VFS_IOV_ITER], [
+ enable_vfs_iov_iter="yes"
+
+ AC_MSG_CHECKING([whether iov_iter types are available])
+ ZFS_LINUX_TEST_RESULT([iov_iter_types], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_IOV_ITER_TYPES, 1,
+ [iov_iter types are available])
+ ],[
+ AC_MSG_RESULT(no)
+ enable_vfs_iov_iter="no"
+ ])
+
+ dnl #
+ dnl # 'iov_iter_init' available in Linux 3.16 and newer.
+ dnl # 'iov_iter_init_legacy' available in Linux 3.15 and older.
+ dnl #
+ AC_MSG_CHECKING([whether iov_iter_init() is available])
+ ZFS_LINUX_TEST_RESULT([iov_iter_init], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_IOV_ITER_INIT, 1,
+ [iov_iter_init() is available])
+ ],[
+ ZFS_LINUX_TEST_RESULT([iov_iter_init_legacy], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_IOV_ITER_INIT_LEGACY, 1,
+ [iov_iter_init() is available])
+ ],[
+ ZFS_LINUX_TEST_ERROR([iov_iter_init()])
+ ])
+ ])
+
+ AC_MSG_CHECKING([whether iov_iter_advance() is available])
+ ZFS_LINUX_TEST_RESULT([iov_iter_advance], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_IOV_ITER_ADVANCE, 1,
+ [iov_iter_advance() is available])
+ ],[
+ AC_MSG_RESULT(no)
+ enable_vfs_iov_iter="no"
+ ])
+
+ AC_MSG_CHECKING([whether iov_iter_revert() is available])
+ ZFS_LINUX_TEST_RESULT([iov_iter_revert], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_IOV_ITER_REVERT, 1,
+ [iov_iter_revert() is available])
+ ],[
+ AC_MSG_RESULT(no)
+ enable_vfs_iov_iter="no"
+ ])
+
+ AC_MSG_CHECKING([whether iov_iter_fault_in_readable() is available])
+ ZFS_LINUX_TEST_RESULT([iov_iter_fault_in_readable], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_IOV_ITER_FAULT_IN_READABLE, 1,
+ [iov_iter_fault_in_readable() is available])
+ ],[
+ AC_MSG_RESULT(no)
+ enable_vfs_iov_iter="no"
+ ])
+
+ AC_MSG_CHECKING([whether iov_iter_count() is available])
+ ZFS_LINUX_TEST_RESULT([iov_iter_count], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_IOV_ITER_COUNT, 1,
+ [iov_iter_count() is available])
+ ],[
+ AC_MSG_RESULT(no)
+ enable_vfs_iov_iter="no"
+ ])
+
+ AC_MSG_CHECKING([whether copy_to_iter() is available])
+ ZFS_LINUX_TEST_RESULT([copy_to_iter], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_COPY_TO_ITER, 1,
+ [copy_to_iter() is available])
+ ],[
+ AC_MSG_RESULT(no)
+ enable_vfs_iov_iter="no"
+ ])
+
+ AC_MSG_CHECKING([whether copy_from_iter() is available])
+ ZFS_LINUX_TEST_RESULT([copy_from_iter], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_COPY_FROM_ITER, 1,
+ [copy_from_iter() is available])
+ ],[
+ AC_MSG_RESULT(no)
+ enable_vfs_iov_iter="no"
+ ])
+
+ dnl #
+ dnl # As of the 4.9 kernel support is provided for iovecs, kvecs,
+ dnl # bvecs and pipes in the iov_iter structure. As long as the
+ dnl # other support interfaces are all available the iov_iter can
+ dnl # be correctly used in the uio structure.
+ dnl #
+ AS_IF([test "x$enable_vfs_iov_iter" = "xyes"], [
+ AC_DEFINE(HAVE_VFS_IOV_ITER, 1,
+ [All required iov_iter interfaces are available])
+ ])
+])
diff --git a/sys/contrib/openzfs/config/kernel.m4 b/sys/contrib/openzfs/config/kernel.m4
index ec52f014a7a3..f31be845f5d9 100644
--- a/sys/contrib/openzfs/config/kernel.m4
+++ b/sys/contrib/openzfs/config/kernel.m4
@@ -13,6 +13,7 @@ AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
dnl # Sequential ZFS_LINUX_TRY_COMPILE tests
ZFS_AC_KERNEL_FPU_HEADER
+ ZFS_AC_KERNEL_OBJTOOL_HEADER
ZFS_AC_KERNEL_WAIT_QUEUE_ENTRY_T
ZFS_AC_KERNEL_MISC_MINOR
ZFS_AC_KERNEL_DECLARE_EVENT_CLASS
@@ -60,7 +61,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_BIO
ZFS_AC_KERNEL_SRC_BLKDEV
ZFS_AC_KERNEL_SRC_BLK_QUEUE
- ZFS_AC_KERNEL_SRC_GET_DISK_AND_MODULE
+ ZFS_AC_KERNEL_SRC_REVALIDATE_DISK
ZFS_AC_KERNEL_SRC_GET_DISK_RO
ZFS_AC_KERNEL_SRC_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_SRC_DISCARD_GRANULARITY
@@ -104,6 +105,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_VFS_DIRECT_IO
ZFS_AC_KERNEL_SRC_VFS_RW_ITERATE
ZFS_AC_KERNEL_SRC_VFS_GENERIC_WRITE_CHECKS
+ ZFS_AC_KERNEL_SRC_VFS_IOV_ITER
ZFS_AC_KERNEL_SRC_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_SRC_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN
@@ -122,6 +124,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_TOTALHIGH_PAGES
ZFS_AC_KERNEL_SRC_KSTRTOUL
ZFS_AC_KERNEL_SRC_PERCPU
+ ZFS_AC_KERNEL_SRC_CPU_HOTPLUG
AC_MSG_CHECKING([for available kernel interfaces])
ZFS_LINUX_TEST_COMPILE_ALL([kabi])
@@ -156,7 +159,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_BIO
ZFS_AC_KERNEL_BLKDEV
ZFS_AC_KERNEL_BLK_QUEUE
- ZFS_AC_KERNEL_GET_DISK_AND_MODULE
+ ZFS_AC_KERNEL_REVALIDATE_DISK
ZFS_AC_KERNEL_GET_DISK_RO
ZFS_AC_KERNEL_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_DISCARD_GRANULARITY
@@ -200,6 +203,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_VFS_DIRECT_IO
ZFS_AC_KERNEL_VFS_RW_ITERATE
ZFS_AC_KERNEL_VFS_GENERIC_WRITE_CHECKS
+ ZFS_AC_KERNEL_VFS_IOV_ITER
ZFS_AC_KERNEL_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_MAKE_REQUEST_FN
@@ -218,6 +222,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_TOTALHIGH_PAGES
ZFS_AC_KERNEL_KSTRTOUL
ZFS_AC_KERNEL_PERCPU
+ ZFS_AC_KERNEL_CPU_HOTPLUG
])
dnl #
@@ -317,19 +322,15 @@ AC_DEFUN([ZFS_AC_KERNEL], [
utsrelease2=$kernelbuild/include/linux/utsrelease.h
utsrelease3=$kernelbuild/include/generated/utsrelease.h
AS_IF([test -r $utsrelease1 && fgrep -q UTS_RELEASE $utsrelease1], [
- utsrelease=linux/version.h
+ utsrelease=$utsrelease1
], [test -r $utsrelease2 && fgrep -q UTS_RELEASE $utsrelease2], [
- utsrelease=linux/utsrelease.h
+ utsrelease=$utsrelease2
], [test -r $utsrelease3 && fgrep -q UTS_RELEASE $utsrelease3], [
- utsrelease=generated/utsrelease.h
+ utsrelease=$utsrelease3
])
- AS_IF([test "$utsrelease"], [
- kernsrcver=`(echo "#include <$utsrelease>";
- echo "kernsrcver=UTS_RELEASE") |
- ${CPP} -I $kernelbuild/include - |
- grep "^kernsrcver=" | cut -d \" -f 2`
-
+ AS_IF([test -n "$utsrelease"], [
+ kernsrcver=$($AWK '/UTS_RELEASE/ { gsub(/"/, "", $[3]); print $[3] }' $utsrelease)
AS_IF([test -z "$kernsrcver"], [
AC_MSG_RESULT([Not found])
AC_MSG_ERROR([
@@ -536,7 +537,9 @@ dnl #
dnl # ZFS_LINUX_TEST_PROGRAM(C)([PROLOGUE], [BODY])
dnl #
m4_define([ZFS_LINUX_TEST_PROGRAM], [
+#include <linux/module.h>
$1
+
int
main (void)
{
@@ -544,6 +547,11 @@ $2
;
return 0;
}
+
+MODULE_DESCRIPTION("conftest");
+MODULE_AUTHOR(ZFS_META_AUTHOR);
+MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE);
+MODULE_LICENSE($3);
])
dnl #
@@ -683,19 +691,21 @@ dnl # $3 - source
dnl # $4 - extra cflags
dnl # $5 - check license-compatibility
dnl #
+dnl # Check if the test source is buildable at all and then if it is
+dnl # license compatible.
+dnl #
dnl # N.B because all of the test cases are compiled in parallel they
dnl # must never depend on the results of previous tests. Each test
dnl # needs to be entirely independent.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_SRC], [
- ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM([[$2]], [[$3]])], [$1])
+ ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM([[$2]], [[$3]],
+ [["Dual BSD/GPL"]])], [$1])
ZFS_LINUX_CONFTEST_MAKEFILE([$1], [yes], [$4])
AS_IF([ test -n "$5" ], [
- ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM([[
- #include <linux/module.h>
- MODULE_LICENSE("$5");
- $2]], [[$3]])], [$1_license])
+ ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM(
+ [[$2]], [[$3]], [[$5]])], [$1_license])
ZFS_LINUX_CONFTEST_MAKEFILE([$1_license], [yes], [$4])
])
])
@@ -785,11 +795,13 @@ dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE], [
AS_IF([test "x$enable_linux_builtin" = "xyes"], [
ZFS_LINUX_COMPILE_IFELSE(
- [ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]])],
+ [ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
+ [[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.o], [$3], [$4])
], [
ZFS_LINUX_COMPILE_IFELSE(
- [ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]])],
+ [ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
+ [[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko], [$3], [$4])
])
])
@@ -855,7 +867,7 @@ dnl # provided via the fifth parameter
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE_HEADER], [
ZFS_LINUX_COMPILE_IFELSE(
- [ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]])],
+ [ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]], [[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko],
[$3], [$4], [$5])
])
diff --git a/sys/contrib/openzfs/config/mount-helper.m4 b/sys/contrib/openzfs/config/mount-helper.m4
index 0a6c7670840b..e559b9ab2734 100644
--- a/sys/contrib/openzfs/config/mount-helper.m4
+++ b/sys/contrib/openzfs/config/mount-helper.m4
@@ -1,6 +1,6 @@
AC_DEFUN([ZFS_AC_CONFIG_USER_MOUNT_HELPER], [
AC_ARG_WITH(mounthelperdir,
- AC_HELP_STRING([--with-mounthelperdir=DIR],
+ AS_HELP_STRING([--with-mounthelperdir=DIR],
[install mount.zfs in dir [[/sbin]]]),
mounthelperdir=$withval,mounthelperdir=/sbin)
diff --git a/sys/contrib/openzfs/config/user-dracut.m4 b/sys/contrib/openzfs/config/user-dracut.m4
index 95f800bda47a..b9705297f744 100644
--- a/sys/contrib/openzfs/config/user-dracut.m4
+++ b/sys/contrib/openzfs/config/user-dracut.m4
@@ -1,7 +1,7 @@
AC_DEFUN([ZFS_AC_CONFIG_USER_DRACUT], [
AC_MSG_CHECKING(for dracut directory)
AC_ARG_WITH([dracutdir],
- AC_HELP_STRING([--with-dracutdir=DIR],
+ AS_HELP_STRING([--with-dracutdir=DIR],
[install dracut helpers @<:@default=check@:>@]),
[dracutdir=$withval],
[dracutdir=check])
diff --git a/sys/contrib/openzfs/config/user-libexec.m4 b/sys/contrib/openzfs/config/user-libexec.m4
index 31bcea3fcfd3..5379c25b4a0c 100644
--- a/sys/contrib/openzfs/config/user-libexec.m4
+++ b/sys/contrib/openzfs/config/user-libexec.m4
@@ -1,6 +1,6 @@
AC_DEFUN([ZFS_AC_CONFIG_USER_ZFSEXEC], [
AC_ARG_WITH(zfsexecdir,
- AC_HELP_STRING([--with-zfsexecdir=DIR],
+ AS_HELP_STRING([--with-zfsexecdir=DIR],
[install scripts [[@<:@libexecdir@:>@/zfs]]]),
[zfsexecdir=$withval],
[zfsexecdir="${libexecdir}/zfs"])
diff --git a/sys/contrib/openzfs/config/user-makedev.m4 b/sys/contrib/openzfs/config/user-makedev.m4
index 4383681a8f4c..8986107aef80 100644
--- a/sys/contrib/openzfs/config/user-makedev.m4
+++ b/sys/contrib/openzfs/config/user-makedev.m4
@@ -3,13 +3,12 @@ dnl # glibc 2.25
dnl #
AC_DEFUN([ZFS_AC_CONFIG_USER_MAKEDEV_IN_SYSMACROS], [
AC_MSG_CHECKING([makedev() is declared in sys/sysmacros.h])
- AC_TRY_COMPILE(
- [
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include <sys/sysmacros.h>
- ],[
+ ]], [[
int k;
k = makedev(0,0);
- ],[
+ ]])],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_MAKEDEV_IN_SYSMACROS, 1,
[makedev() is declared in sys/sysmacros.h])
@@ -23,13 +22,12 @@ dnl # glibc X < Y < 2.25
dnl #
AC_DEFUN([ZFS_AC_CONFIG_USER_MAKEDEV_IN_MKDEV], [
AC_MSG_CHECKING([makedev() is declared in sys/mkdev.h])
- AC_TRY_COMPILE(
- [
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include <sys/mkdev.h>
- ],[
+ ]], [[
int k;
k = makedev(0,0);
- ],[
+ ]])],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_MAKEDEV_IN_MKDEV, 1,
[makedev() is declared in sys/mkdev.h])
diff --git a/sys/contrib/openzfs/config/user-systemd.m4 b/sys/contrib/openzfs/config/user-systemd.m4
index 3e6a4a281f3c..63f02ad2a82b 100644
--- a/sys/contrib/openzfs/config/user-systemd.m4
+++ b/sys/contrib/openzfs/config/user-systemd.m4
@@ -1,27 +1,27 @@
AC_DEFUN([ZFS_AC_CONFIG_USER_SYSTEMD], [
AC_ARG_ENABLE(systemd,
- AC_HELP_STRING([--enable-systemd],
+ AS_HELP_STRING([--enable-systemd],
[install systemd unit/preset files [[default: yes]]]),
[enable_systemd=$enableval],
[enable_systemd=check])
AC_ARG_WITH(systemdunitdir,
- AC_HELP_STRING([--with-systemdunitdir=DIR],
+ AS_HELP_STRING([--with-systemdunitdir=DIR],
[install systemd unit files in dir [[/usr/lib/systemd/system]]]),
systemdunitdir=$withval,systemdunitdir=/usr/lib/systemd/system)
AC_ARG_WITH(systemdpresetdir,
- AC_HELP_STRING([--with-systemdpresetdir=DIR],
+ AS_HELP_STRING([--with-systemdpresetdir=DIR],
[install systemd preset files in dir [[/usr/lib/systemd/system-preset]]]),
systemdpresetdir=$withval,systemdpresetdir=/usr/lib/systemd/system-preset)
AC_ARG_WITH(systemdmodulesloaddir,
- AC_HELP_STRING([--with-systemdmodulesloaddir=DIR],
+ AS_HELP_STRING([--with-systemdmodulesloaddir=DIR],
[install systemd module load files into dir [[/usr/lib/modules-load.d]]]),
systemdmodulesloaddir=$withval,systemdmodulesloaddir=/usr/lib/modules-load.d)
AC_ARG_WITH(systemdgeneratordir,
- AC_HELP_STRING([--with-systemdgeneratordir=DIR],
+ AS_HELP_STRING([--with-systemdgeneratordir=DIR],
[install systemd generators in dir [[/usr/lib/systemd/system-generators]]]),
systemdgeneratordir=$withval,systemdgeneratordir=/usr/lib/systemd/system-generators)
diff --git a/sys/contrib/openzfs/config/user-sysvinit.m4 b/sys/contrib/openzfs/config/user-sysvinit.m4
index 65dcc3819231..b6b63f1cfa36 100644
--- a/sys/contrib/openzfs/config/user-sysvinit.m4
+++ b/sys/contrib/openzfs/config/user-sysvinit.m4
@@ -1,6 +1,6 @@
AC_DEFUN([ZFS_AC_CONFIG_USER_SYSVINIT], [
AC_ARG_ENABLE(sysvinit,
- AC_HELP_STRING([--enable-sysvinit],
+ AS_HELP_STRING([--enable-sysvinit],
[install SysV init scripts [default: yes]]),
[],enable_sysvinit=yes)
diff --git a/sys/contrib/openzfs/config/user-udev.m4 b/sys/contrib/openzfs/config/user-udev.m4
index 65dc79fb4847..e6120fc8fef6 100644
--- a/sys/contrib/openzfs/config/user-udev.m4
+++ b/sys/contrib/openzfs/config/user-udev.m4
@@ -1,7 +1,7 @@
AC_DEFUN([ZFS_AC_CONFIG_USER_UDEV], [
AC_MSG_CHECKING(for udev directories)
AC_ARG_WITH(udevdir,
- AC_HELP_STRING([--with-udevdir=DIR],
+ AS_HELP_STRING([--with-udevdir=DIR],
[install udev helpers @<:@default=check@:>@]),
[udevdir=$withval],
[udevdir=check])
@@ -18,7 +18,7 @@ AC_DEFUN([ZFS_AC_CONFIG_USER_UDEV], [
])
AC_ARG_WITH(udevruledir,
- AC_HELP_STRING([--with-udevruledir=DIR],
+ AS_HELP_STRING([--with-udevruledir=DIR],
[install udev rules [[UDEVDIR/rules.d]]]),
[udevruledir=$withval],
[udevruledir="${udevdir}/rules.d"])
diff --git a/sys/contrib/openzfs/config/zfs-build.m4 b/sys/contrib/openzfs/config/zfs-build.m4
index 7754eda3f6a2..f0eb47035d1e 100644
--- a/sys/contrib/openzfs/config/zfs-build.m4
+++ b/sys/contrib/openzfs/config/zfs-build.m4
@@ -180,7 +180,7 @@ AC_DEFUN([ZFS_AC_CONFIG], [
[Config file 'kernel|user|all|srpm']),
[ZFS_CONFIG="$withval"])
AC_ARG_ENABLE([linux-builtin],
- [AC_HELP_STRING([--enable-linux-builtin],
+ [AS_HELP_STRING([--enable-linux-builtin],
[Configure for builtin in-tree kernel modules @<:@default=no@:>@])],
[],
[enable_linux_builtin=no])
diff --git a/sys/contrib/openzfs/configure.ac b/sys/contrib/openzfs/configure.ac
index 9323aa7a0c28..4520a290a9a5 100644
--- a/sys/contrib/openzfs/configure.ac
+++ b/sys/contrib/openzfs/configure.ac
@@ -36,7 +36,7 @@ AC_LANG(C)
ZFS_AC_META
AC_CONFIG_AUX_DIR([config])
AC_CONFIG_MACRO_DIR([config])
-AC_CANONICAL_SYSTEM
+AC_CANONICAL_TARGET
AM_MAINTAINER_MODE
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
AM_INIT_AUTOMAKE([subdir-objects])
@@ -45,9 +45,9 @@ AC_CONFIG_HEADERS([zfs_config.h], [
awk -f ${ac_srcdir}/config/config.awk zfs_config.h.tmp >zfs_config.h &&
rm zfs_config.h.tmp) || exit 1])
+LT_INIT
AC_PROG_INSTALL
AC_PROG_CC
-AC_PROG_LIBTOOL
PKG_PROG_PKG_CONFIG
AM_PROG_AS
AM_PROG_CC_C_O
@@ -86,6 +86,7 @@ AC_CONFIG_FILES([
cmd/ztest/Makefile
cmd/zvol_id/Makefile
cmd/zvol_wait/Makefile
+ cmd/zpool_influxdb/Makefile
contrib/Makefile
contrib/bash_completion.d/Makefile
contrib/bpftrace/Makefile
@@ -208,6 +209,7 @@ AC_CONFIG_FILES([
tests/zfs-tests/cmd/btree_test/Makefile
tests/zfs-tests/cmd/chg_usr_exec/Makefile
tests/zfs-tests/cmd/devname2devid/Makefile
+ tests/zfs-tests/cmd/draid/Makefile
tests/zfs-tests/cmd/dir_rd_update/Makefile
tests/zfs-tests/cmd/file_check/Makefile
tests/zfs-tests/cmd/file_trunc/Makefile
@@ -342,6 +344,7 @@ AC_CONFIG_FILES([
tests/zfs-tests/tests/functional/inheritance/Makefile
tests/zfs-tests/tests/functional/inuse/Makefile
tests/zfs-tests/tests/functional/io/Makefile
+ tests/zfs-tests/tests/functional/l2arc/Makefile
tests/zfs-tests/tests/functional/large_files/Makefile
tests/zfs-tests/tests/functional/largest_pool/Makefile
tests/zfs-tests/tests/functional/libzfs/Makefile
@@ -358,7 +361,6 @@ AC_CONFIG_FILES([
tests/zfs-tests/tests/functional/nopwrite/Makefile
tests/zfs-tests/tests/functional/online_offline/Makefile
tests/zfs-tests/tests/functional/pam/Makefile
- tests/zfs-tests/tests/functional/persist_l2arc/Makefile
tests/zfs-tests/tests/functional/pool_checkpoint/Makefile
tests/zfs-tests/tests/functional/pool_names/Makefile
tests/zfs-tests/tests/functional/poolversion/Makefile
@@ -394,6 +396,7 @@ AC_CONFIG_FILES([
tests/zfs-tests/tests/functional/vdev_zaps/Makefile
tests/zfs-tests/tests/functional/write_dirs/Makefile
tests/zfs-tests/tests/functional/xattr/Makefile
+ tests/zfs-tests/tests/functional/zpool_influxdb/Makefile
tests/zfs-tests/tests/functional/zvol/Makefile
tests/zfs-tests/tests/functional/zvol/zvol_ENOSPC/Makefile
tests/zfs-tests/tests/functional/zvol/zvol_cli/Makefile
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/export-zfs.sh.in b/sys/contrib/openzfs/contrib/dracut/90zfs/export-zfs.sh.in
index 09e4a3cc0e5e..892650383475 100755
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/export-zfs.sh.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/export-zfs.sh.in
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
. /lib/dracut-zfs-lib.sh
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/module-setup.sh.in b/sys/contrib/openzfs/contrib/dracut/90zfs/module-setup.sh.in
index db5670cd5253..42afda60278c 100755
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/module-setup.sh.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/module-setup.sh.in
@@ -85,7 +85,13 @@ install() {
fi
# Synchronize initramfs and system hostid
- zgenhostid -o "${initdir}/etc/hostid" "$(hostid)"
+ if [ -f @sysconfdir@/hostid ]; then
+ inst @sysconfdir@/hostid
+ type mark_hostonly >/dev/null 2>&1 && mark_hostonly @sysconfdir@/hostid
+ elif HOSTID="$(hostid 2>/dev/null)" && [ "${HOSTID}" != "00000000" ]; then
+ zgenhostid -o "${initdir}@sysconfdir@/hostid" "${HOSTID}"
+ type mark_hostonly >/dev/null 2>&1 && mark_hostonly @sysconfdir@/hostid
+ fi
if dracut_module_included "systemd"; then
mkdir -p "${initdir}/$systemdsystemunitdir/zfs-import.target.wants"
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/mount-zfs.sh.in b/sys/contrib/openzfs/contrib/dracut/90zfs/mount-zfs.sh.in
index f5b3d9056c17..4a892e9382cf 100755
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/mount-zfs.sh.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/mount-zfs.sh.in
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
. /lib/dracut-zfs-lib.sh
@@ -58,7 +58,7 @@ ZFS_POOL="${ZFS_DATASET%%/*}"
if import_pool "${ZFS_POOL}" ; then
# Load keys if we can or if we need to
- if [ $(zpool list -H -o feature@encryption $(echo "${ZFS_POOL}" | awk -F\/ '{print $1}')) = 'active' ]; then
+ if [ "$(zpool list -H -o feature@encryption "$(echo "${ZFS_POOL}" | awk -F/ '{print $1}')")" = 'active' ]; then
# if the root dataset has encryption enabled
ENCRYPTIONROOT="$(zfs get -H -o value encryptionroot "${ZFS_DATASET}")"
if ! [ "${ENCRYPTIONROOT}" = "-" ]; then
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/parse-zfs.sh.in b/sys/contrib/openzfs/contrib/dracut/90zfs/parse-zfs.sh.in
index 2ff76d8fa080..768de9dd2512 100755
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/parse-zfs.sh.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/parse-zfs.sh.in
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
. /lib/dracut-lib.sh
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-generator.sh.in b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-generator.sh.in
index 120b9ecf957e..59cdadcbeae5 100755
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-generator.sh.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-generator.sh.in
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/sh
echo "zfs-generator: starting" >> /dev/kmsg
@@ -11,7 +11,7 @@ GENERATOR_DIR="$1"
[ -f /lib/dracut-lib.sh ] && dracutlib=/lib/dracut-lib.sh
[ -f /usr/lib/dracut/modules.d/99base/dracut-lib.sh ] && dracutlib=/usr/lib/dracut/modules.d/99base/dracut-lib.sh
-type getarg >/dev/null 2>&1 || {
+command -v getarg >/dev/null 2>&1 || {
echo "zfs-generator: loading Dracut library from $dracutlib" >> /dev/kmsg
. "$dracutlib"
}
@@ -22,16 +22,17 @@ type getarg >/dev/null 2>&1 || {
# If root is not ZFS= or zfs: or rootfstype is not zfs
# then we are not supposed to handle it.
-[ "${root##zfs:}" = "${root}" -a "${root##ZFS=}" = "${root}" -a "$rootfstype" != "zfs" ] && exit 0
+[ "${root##zfs:}" = "${root}" ] &&
+ [ "${root##ZFS=}" = "${root}" ] &&
+ [ "$rootfstype" != "zfs" ] &&
+ exit 0
rootfstype=zfs
-if echo "${rootflags}" | grep -Eq '^zfsutil$|^zfsutil,|,zfsutil$|,zfsutil,' ; then
- true
-elif test -n "${rootflags}" ; then
- rootflags="zfsutil,${rootflags}"
-else
- rootflags=zfsutil
-fi
+case ",${rootflags}," in
+ *,zfsutil,*) ;;
+ ,,) rootflags=zfsutil ;;
+ *) rootflags="zfsutil,${rootflags}" ;;
+esac
echo "zfs-generator: writing extension for sysroot.mount to $GENERATOR_DIR"/sysroot.mount.d/zfs-enhancement.conf >> /dev/kmsg
@@ -58,4 +59,4 @@ echo "zfs-generator: writing extension for sysroot.mount to $GENERATOR_DIR"/sysr
[ -d "$GENERATOR_DIR"/initrd-root-fs.target.requires ] || mkdir -p "$GENERATOR_DIR"/initrd-root-fs.target.requires
ln -s ../sysroot.mount "$GENERATOR_DIR"/initrd-root-fs.target.requires/sysroot.mount
-echo "zfs-generator: finished" >> /dev/kmsg \ No newline at end of file
+echo "zfs-generator: finished" >> /dev/kmsg
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-lib.sh.in b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-lib.sh.in
index f470bfcc54ae..c39cc5cfff1f 100755
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-lib.sh.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-lib.sh.in
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
command -v getarg >/dev/null || . /lib/dracut-lib.sh
command -v getargbool >/dev/null || {
@@ -144,7 +144,7 @@ ask_for_password() {
{ flock -s 9;
# Prompt for password with plymouth, if installed and running.
- if type plymouth >/dev/null 2>&1 && plymouth --ping 2>/dev/null; then
+ if plymouth --ping 2>/dev/null; then
plymouth ask-for-password \
--prompt "$ply_prompt" --number-of-tries="$ply_tries" \
--command="$ply_cmd"
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-load-key.sh.in b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-load-key.sh.in
index ff586ef654b8..e29501418919 100755
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-load-key.sh.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-load-key.sh.in
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
# only run this on systemd systems, we handle the decrypt in mount-zfs.sh in the mount hook otherwise
[ -e /bin/systemctl ] || return 0
@@ -17,10 +17,8 @@
[ "${root##zfs:}" = "${root}" ] && [ "${root##ZFS=}" = "${root}" ] && [ "$rootfstype" != "zfs" ] && exit 0
# There is a race between the zpool import and the pre-mount hooks, so we wait for a pool to be imported
-while true; do
- zpool list -H | grep -q -v '^$' && break
- [ "$(systemctl is-failed zfs-import-cache.service)" = 'failed' ] && exit 1
- [ "$(systemctl is-failed zfs-import-scan.service)" = 'failed' ] && exit 1
+while [ "$(zpool list -H)" = "" ]; do
+ systemctl is-failed --quiet zfs-import-cache.service zfs-import-scan.service && exit 1
sleep 0.1s
done
@@ -34,11 +32,11 @@ else
fi
# if pool encryption is active and the zfs command understands '-o encryption'
-if [ "$(zpool list -H -o feature@encryption $(echo "${BOOTFS}" | awk -F\/ '{print $1}'))" = 'active' ]; then
+if [ "$(zpool list -H -o feature@encryption "$(echo "${BOOTFS}" | awk -F/ '{print $1}')")" = 'active' ]; then
# if the root dataset has encryption enabled
- ENCRYPTIONROOT=$(zfs get -H -o value encryptionroot "${BOOTFS}")
+ ENCRYPTIONROOT="$(zfs get -H -o value encryptionroot "${BOOTFS}")"
# where the key is stored (in a file or loaded via prompt)
- KEYLOCATION=$(zfs get -H -o value keylocation "${ENCRYPTIONROOT}")
+ KEYLOCATION="$(zfs get -H -o value keylocation "${ENCRYPTIONROOT}")"
if ! [ "${ENCRYPTIONROOT}" = "-" ]; then
KEYSTATUS="$(zfs get -H -o value keystatus "${ENCRYPTIONROOT}")"
# continue only if the key needs to be loaded
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-needshutdown.sh.in b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-needshutdown.sh.in
index ddd3edae0014..dd6de30c2704 100755
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-needshutdown.sh.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-needshutdown.sh.in
@@ -1,6 +1,6 @@
-#!/bin/bash
+#!/bin/sh
-type getarg >/dev/null 2>&1 || . /lib/dracut-lib.sh
+command -v getarg >/dev/null 2>&1 || . /lib/dracut-lib.sh
if zpool list 2>&1 | grep -q 'no pools available' ; then
info "ZFS: No active pools, no need to export anything."
diff --git a/sys/contrib/openzfs/contrib/initramfs/hooks/zfsunlock.in b/sys/contrib/openzfs/contrib/initramfs/hooks/zfsunlock.in
index c8ae86363981..d451726545db 100644
--- a/sys/contrib/openzfs/contrib/initramfs/hooks/zfsunlock.in
+++ b/sys/contrib/openzfs/contrib/initramfs/hooks/zfsunlock.in
@@ -15,4 +15,4 @@ esac
. /usr/share/initramfs-tools/hook-functions
-copy_exec /usr/share/initramfs-tools/zfsunlock /usr/bin
+copy_exec /usr/share/initramfs-tools/zfsunlock /usr/bin/zfsunlock
diff --git a/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c b/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
index 0a96f19a3cd0..4cafc37b9b47 100644
--- a/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
+++ b/sys/contrib/openzfs/contrib/pam_zfs_key/pam_zfs_key.c
@@ -386,6 +386,8 @@ unmount_unload(pam_handle_t *pamh, const char *ds_name)
typedef struct {
char *homes_prefix;
char *runstatedir;
+ char *homedir;
+ char *dsname;
uid_t uid;
const char *username;
int unmount_and_unload;
@@ -423,6 +425,8 @@ zfs_key_config_load(pam_handle_t *pamh, zfs_key_config_t *config,
config->uid = entry->pw_uid;
config->username = name;
config->unmount_and_unload = 1;
+ config->dsname = NULL;
+ config->homedir = NULL;
for (int c = 0; c < argc; c++) {
if (strncmp(argv[c], "homes=", 6) == 0) {
free(config->homes_prefix);
@@ -432,6 +436,8 @@ zfs_key_config_load(pam_handle_t *pamh, zfs_key_config_t *config,
config->runstatedir = strdup(argv[c] + 12);
} else if (strcmp(argv[c], "nounmount") == 0) {
config->unmount_and_unload = 0;
+ } else if (strcmp(argv[c], "prop_mountpoint") == 0) {
+ config->homedir = strdup(entry->pw_dir);
}
}
return (0);
@@ -441,11 +447,59 @@ static void
zfs_key_config_free(zfs_key_config_t *config)
{
free(config->homes_prefix);
+ free(config->runstatedir);
+ free(config->homedir);
+ free(config->dsname);
+}
+
+static int
+find_dsname_by_prop_value(zfs_handle_t *zhp, void *data)
+{
+ zfs_type_t type = zfs_get_type(zhp);
+ zfs_key_config_t *target = data;
+ char mountpoint[ZFS_MAXPROPLEN];
+
+ /* Skip any datasets whose type does not match */
+ if ((type & ZFS_TYPE_FILESYSTEM) == 0) {
+ zfs_close(zhp);
+ return (0);
+ }
+
+ /* Skip any datasets whose mountpoint does not match */
+ (void) zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
+ sizeof (mountpoint), NULL, NULL, 0, B_FALSE);
+ if (strcmp(target->homedir, mountpoint) != 0) {
+ zfs_close(zhp);
+ return (0);
+ }
+
+ target->dsname = strdup(zfs_get_name(zhp));
+ zfs_close(zhp);
+ return (1);
}
static char *
zfs_key_config_get_dataset(zfs_key_config_t *config)
{
+ if (config->homedir != NULL &&
+ config->homes_prefix != NULL) {
+ zfs_handle_t *zhp = zfs_open(g_zfs, config->homes_prefix,
+ ZFS_TYPE_FILESYSTEM);
+ if (zhp == NULL) {
+ pam_syslog(NULL, LOG_ERR, "dataset %s not found",
+ config->homes_prefix);
+ zfs_close(zhp);
+ return (NULL);
+ }
+
+ (void) zfs_iter_filesystems(zhp, find_dsname_by_prop_value,
+ config);
+ zfs_close(zhp);
+ char *dsname = config->dsname;
+ config->dsname = NULL;
+ return (dsname);
+ }
+
size_t len = ZFS_MAX_DATASET_NAME_LEN;
size_t total_len = strlen(config->homes_prefix) + 1
+ strlen(config->username);
diff --git a/sys/contrib/openzfs/etc/systemd/system/zfs-share.service.in b/sys/contrib/openzfs/etc/systemd/system/zfs-share.service.in
index b720085874e5..745077513c30 100644
--- a/sys/contrib/openzfs/etc/systemd/system/zfs-share.service.in
+++ b/sys/contrib/openzfs/etc/systemd/system/zfs-share.service.in
@@ -8,6 +8,7 @@ Wants=zfs-mount.service
After=zfs-mount.service
PartOf=nfs-server.service nfs-kernel-server.service
PartOf=smb.service
+ConditionPathIsDirectory=/sys/module/zfs
[Service]
Type=oneshot
diff --git a/sys/contrib/openzfs/etc/systemd/system/zfs-volume-wait.service.in b/sys/contrib/openzfs/etc/systemd/system/zfs-volume-wait.service.in
index 75bd9fcdd56c..4c77724d8bbb 100644
--- a/sys/contrib/openzfs/etc/systemd/system/zfs-volume-wait.service.in
+++ b/sys/contrib/openzfs/etc/systemd/system/zfs-volume-wait.service.in
@@ -3,6 +3,7 @@ Description=Wait for ZFS Volume (zvol) links in /dev
DefaultDependencies=no
After=systemd-udev-settle.service
After=zfs-import.target
+ConditionPathIsDirectory=/sys/module/zfs
[Service]
Type=oneshot
diff --git a/sys/contrib/openzfs/etc/systemd/system/zfs-zed.service.in b/sys/contrib/openzfs/etc/systemd/system/zfs-zed.service.in
index f4313625ee5e..008075138f02 100644
--- a/sys/contrib/openzfs/etc/systemd/system/zfs-zed.service.in
+++ b/sys/contrib/openzfs/etc/systemd/system/zfs-zed.service.in
@@ -1,6 +1,7 @@
[Unit]
Description=ZFS Event Daemon (zed)
Documentation=man:zed(8)
+ConditionPathIsDirectory=/sys/module/zfs
[Service]
ExecStart=@sbindir@/zed -F
diff --git a/sys/contrib/openzfs/include/libzfs.h b/sys/contrib/openzfs/include/libzfs.h
index e0b2676a441f..66cedd0ee0fe 100644
--- a/sys/contrib/openzfs/include/libzfs.h
+++ b/sys/contrib/openzfs/include/libzfs.h
@@ -88,8 +88,8 @@ typedef enum zfs_error {
EZFS_ZONED, /* used improperly in local zone */
EZFS_MOUNTFAILED, /* failed to mount dataset */
EZFS_UMOUNTFAILED, /* failed to unmount dataset */
- EZFS_UNSHARENFSFAILED, /* unshare(1M) failed */
- EZFS_SHARENFSFAILED, /* share(1M) failed */
+ EZFS_UNSHARENFSFAILED, /* failed to unshare over nfs */
+ EZFS_SHARENFSFAILED, /* failed to share over nfs */
EZFS_PERM, /* permission denied */
EZFS_NOSPC, /* out of space */
EZFS_FAULT, /* bad address */
@@ -455,6 +455,7 @@ extern void zpool_explain_recover(libzfs_handle_t *, const char *, int,
nvlist_t *);
extern int zpool_checkpoint(zpool_handle_t *);
extern int zpool_discard_checkpoint(zpool_handle_t *);
+extern boolean_t zpool_is_draid_spare(const char *);
/*
* Basic handle manipulations. These functions do not create or destroy the
@@ -556,7 +557,7 @@ extern void zfs_prune_proplist(zfs_handle_t *, uint8_t *);
/*
* zpool property management
*/
-extern int zpool_expand_proplist(zpool_handle_t *, zprop_list_t **);
+extern int zpool_expand_proplist(zpool_handle_t *, zprop_list_t **, boolean_t);
extern int zpool_prop_get_feature(zpool_handle_t *, const char *, char *,
size_t);
extern const char *zpool_prop_default_string(zpool_prop_t);
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/misc.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/misc.h
index e39bb07b2f4c..3481507d2c33 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/misc.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/misc.h
@@ -30,6 +30,7 @@
#define _OPENSOLARIS_SYS_MISC_H_
#include <sys/limits.h>
+#include <sys/filio.h>
#define MAXUID UID_MAX
@@ -40,8 +41,8 @@
#define _FIOGDIO (INT_MIN+1)
#define _FIOSDIO (INT_MIN+2)
-#define _FIO_SEEK_DATA FIOSEEKDATA
-#define _FIO_SEEK_HOLE FIOSEEKHOLE
+#define F_SEEK_DATA FIOSEEKDATA
+#define F_SEEK_HOLE FIOSEEKHOLE
struct opensolaris_utsname {
char *sysname;
@@ -53,4 +54,7 @@ struct opensolaris_utsname {
extern char hw_serial[11];
+#define task_io_account_read(n)
+#define task_io_account_write(n)
+
#endif /* _OPENSOLARIS_SYS_MISC_H_ */
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h
index ec1da1a46ae6..5b3b3271e39e 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/mod_os.h
@@ -57,6 +57,8 @@
#define ZFS_MODULE_PARAM_CALL(scope_prefix, name_prefix, name, func, _, perm, desc) \
ZFS_MODULE_PARAM_CALL_IMPL(_vfs_ ## scope_prefix, name, perm, func ## _args(name_prefix ## name), desc)
+#define ZFS_MODULE_VIRTUAL_PARAM_CALL ZFS_MODULE_PARAM_CALL
+
#define param_set_arc_long_args(var) \
CTLTYPE_ULONG, &var, 0, param_set_arc_long, "LU"
@@ -84,6 +86,9 @@
#define param_set_max_auto_ashift_args(var) \
CTLTYPE_U64, &var, 0, param_set_max_auto_ashift, "QU"
+#define fletcher_4_param_set_args(var) \
+ CTLTYPE_STRING, NULL, 0, fletcher_4_param, "A"
+
#include <sys/kernel.h>
#define module_init(fn) \
static void \
@@ -93,6 +98,13 @@ wrap_ ## fn(void *dummy __unused) \
} \
SYSINIT(zfs_ ## fn, SI_SUB_LAST, SI_ORDER_FIRST, wrap_ ## fn, NULL)
+#define module_init_early(fn) \
+static void \
+wrap_ ## fn(void *dummy __unused) \
+{ \
+ fn(); \
+} \
+SYSINIT(zfs_ ## fn, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_FIRST, wrap_ ## fn, NULL)
#define module_exit(fn) \
static void \
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/policy.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/policy.h
index 3a05da12b3aa..909ae3886e9c 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/policy.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/policy.h
@@ -34,6 +34,7 @@
#include <sys/vnode.h>
struct mount;
struct vattr;
+struct znode;
int secpolicy_nfs(cred_t *cr);
int secpolicy_zfs(cred_t *crd);
@@ -57,7 +58,7 @@ int secpolicy_vnode_setattr(cred_t *cr, vnode_t *vp, struct vattr *vap,
int unlocked_access(void *, int, cred_t *), void *node);
int secpolicy_vnode_create_gid(cred_t *cr);
int secpolicy_vnode_setids_setgids(vnode_t *vp, cred_t *cr, gid_t gid);
-int secpolicy_vnode_setid_retain(vnode_t *vp, cred_t *cr,
+int secpolicy_vnode_setid_retain(struct znode *zp, cred_t *cr,
boolean_t issuidroot);
void secpolicy_setid_clear(struct vattr *vap, vnode_t *vp, cred_t *cr);
int secpolicy_setid_setsticky_clear(vnode_t *vp, struct vattr *vap,
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/sysmacros.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/sysmacros.h
index 2d0164cb129f..7e3ab8915542 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/sysmacros.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/sysmacros.h
@@ -80,6 +80,7 @@ extern "C" {
#define kpreempt_disable() critical_enter()
#define kpreempt_enable() critical_exit()
#define CPU_SEQID curcpu
+#define CPU_SEQID_UNSTABLE curcpu
#define is_system_labeled() 0
/*
* Convert a single byte to/from binary-coded decimal (BCD).
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/types.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/types.h
index 3f895362881a..ecb91fd1bb89 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/types.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/types.h
@@ -64,7 +64,7 @@ typedef u_int uint_t;
typedef u_char uchar_t;
typedef u_short ushort_t;
typedef u_long ulong_t;
-typedef u_int minor_t;
+typedef int minor_t;
/* END CSTYLED */
#ifndef _OFF64_T_DECLARED
#define _OFF64_T_DECLARED
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/uio.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/uio.h
index cb577df105e9..11b2189cda45 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/uio.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/uio.h
@@ -43,27 +43,6 @@ typedef struct uio uio_t;
typedef struct iovec iovec_t;
typedef enum uio_seg uio_seg_t;
-typedef enum xuio_type {
- UIOTYPE_ASYNCIO,
- UIOTYPE_ZEROCOPY
-} xuio_type_t;
-
-typedef struct xuio {
- uio_t xu_uio;
-
- /* Extended uio fields */
- enum xuio_type xu_type; /* What kind of uio structure? */
- union {
- struct {
- int xu_zc_rw;
- void *xu_zc_priv;
- } xu_zc;
- } xu_ext;
-} xuio_t;
-
-#define XUIO_XUZC_PRIV(xuio) xuio->xu_ext.xu_zc.xu_zc_priv
-#define XUIO_XUZC_RW(xuio) xuio->xu_ext.xu_zc.xu_zc_rw
-
static __inline int
zfs_uiomove(void *cp, size_t n, enum uio_rw dir, uio_t *uio)
{
@@ -82,6 +61,8 @@ void uioskip(uio_t *uiop, size_t n);
#define uio_iovcnt(uio) (uio)->uio_iovcnt
#define uio_iovlen(uio, idx) (uio)->uio_iov[(idx)].iov_len
#define uio_iovbase(uio, idx) (uio)->uio_iov[(idx)].iov_base
+#define uio_fault_disable(uio, set)
+#define uio_prefaultpages(size, uio) (0)
static inline void
uio_iov_at_index(uio_t *uio, uint_t idx, void **base, uint64_t *len)
diff --git a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/Makefile.am b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/Makefile.am
index bf5cc39eba74..392bb4ae3477 100644
--- a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/Makefile.am
+++ b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/Makefile.am
@@ -8,7 +8,7 @@ KERNEL_H = \
zfs_dir.h \
zfs_ioctl_compat.h \
zfs_vfsops_os.h \
- zfs_vnops.h \
+ zfs_vnops_os.h \
zfs_znode_impl.h \
zpl.h
diff --git a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_context_os.h b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_context_os.h
index 0316f93b27ec..8dbe907d098c 100644
--- a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_context_os.h
+++ b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_context_os.h
@@ -56,7 +56,6 @@
#define tsd_set(key, value) osd_thread_set(curthread, (key), (value))
#define fm_panic panic
-#define cond_resched() kern_yield(PRI_USER)
extern int zfs_debug_level;
extern struct mtx zfs_debug_mtx;
#define ZFS_LOG(lvl, ...) do { \
diff --git a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_vnops.h b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_vnops.h
index 587650af6ce3..bf5e03b24c06 100644
--- a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_vnops.h
+++ b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_vnops.h
@@ -26,8 +26,9 @@
* $FreeBSD$
*/
-#ifndef _SYS_ZFS_VNOPS_H_
-#define _SYS_ZFS_VNOPS_H_
+#ifndef _SYS_FS_ZFS_VNOPS_OS_H
+#define _SYS_FS_ZFS_VNOPS_OS_H
+
int dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size, struct vm_page **ppa, dmu_tx_t *tx);
int dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
diff --git a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_znode_impl.h b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_znode_impl.h
index ff61935e741e..ac2625d9a8ab 100644
--- a/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_znode_impl.h
+++ b/sys/contrib/openzfs/include/os/freebsd/zfs/sys/zfs_znode_impl.h
@@ -39,6 +39,7 @@
#include <sys/zfs_acl.h>
#include <sys/zil.h>
#include <sys/zfs_project.h>
+#include <vm/vm_object.h>
#ifdef __cplusplus
extern "C" {
@@ -113,7 +114,10 @@ extern minor_t zfsdev_minor_alloc(void);
#define Z_ISBLK(type) ((type) == VBLK)
#define Z_ISCHR(type) ((type) == VCHR)
#define Z_ISLNK(type) ((type) == VLNK)
+#define Z_ISDIR(type) ((type) == VDIR)
+#define zn_has_cached_data(zp) vn_has_cached_data(ZTOV(zp))
+#define zn_rlimit_fsize(zp, uio, td) vn_rlimit_fsize(ZTOV(zp), (uio), (td))
/* Called on entry to each ZFS vnode and vfs operation */
#define ZFS_ENTER(zfsvfs) \
@@ -169,13 +173,12 @@ extern void zfs_tstamp_update_setup_ext(struct znode *,
uint_t, uint64_t [2], uint64_t [2], boolean_t have_tx);
extern void zfs_znode_free(struct znode *);
-extern zil_get_data_t zfs_get_data;
extern zil_replay_func_t *zfs_replay_vector[TX_MAX_TYPE];
extern int zfsfstype;
extern int zfs_znode_parent_and_name(struct znode *zp, struct znode **dzpp,
char *buf);
-
+extern void zfs_inode_update(struct znode *);
#ifdef __cplusplus
}
#endif
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h
index 1cdc300a6f85..e41b248b0405 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h
@@ -99,14 +99,6 @@ blk_queue_set_read_ahead(struct request_queue *q, unsigned long ra_pages)
#endif
}
-#if !defined(HAVE_GET_DISK_AND_MODULE)
-static inline struct kobject *
-get_disk_and_module(struct gendisk *disk)
-{
- return (get_disk(disk));
-}
-#endif
-
#ifdef HAVE_BIO_BVEC_ITER
#define BIO_BI_SECTOR(bio) (bio)->bi_iter.bi_sector
#define BIO_BI_SIZE(bio) (bio)->bi_iter.bi_size
@@ -268,12 +260,48 @@ bio_set_bi_error(struct bio *bio, int error)
*
* For older kernels trigger a re-reading of the partition table by calling
* check_disk_change() which calls flush_disk() to invalidate the device.
+ *
+ * For newer kernels (as of 5.10), bdev_check_media_chage is used, in favor of
+ * check_disk_change(), with the modification that invalidation is no longer
+ * forced.
*/
+#ifdef HAVE_CHECK_DISK_CHANGE
+#define zfs_check_media_change(bdev) check_disk_change(bdev)
#ifdef HAVE_BLKDEV_REREAD_PART
#define vdev_bdev_reread_part(bdev) blkdev_reread_part(bdev)
#else
#define vdev_bdev_reread_part(bdev) check_disk_change(bdev)
#endif /* HAVE_BLKDEV_REREAD_PART */
+#else
+#ifdef HAVE_BDEV_CHECK_MEDIA_CHANGE
+static inline int
+zfs_check_media_change(struct block_device *bdev)
+{
+ struct gendisk *gd = bdev->bd_disk;
+ const struct block_device_operations *bdo = gd->fops;
+
+ if (!bdev_check_media_change(bdev))
+ return (0);
+
+ /*
+ * Force revalidation, to mimic the old behavior of
+ * check_disk_change()
+ */
+ if (bdo->revalidate_disk)
+ bdo->revalidate_disk(gd);
+
+ return (0);
+}
+#define vdev_bdev_reread_part(bdev) zfs_check_media_change(bdev)
+#else
+/*
+ * This is encountered if check_disk_change() and bdev_check_media_change()
+ * are not available in the kernel - likely due to an API change that needs
+ * to be chased down.
+ */
+#error "Unsupported kernel: no usable disk change check"
+#endif /* HAVE_BDEV_CHECK_MEDIA_CHANGE */
+#endif /* HAVE_CHECK_DISK_CHANGE */
/*
* 2.6.27 API change
@@ -282,16 +310,38 @@ bio_set_bi_error(struct bio *bio, int error)
*
* 4.4.0-6.21 API change for Ubuntu
* lookup_bdev() gained a second argument, FMODE_*, to check inode permissions.
+ *
+ * 5.11 API change
+ * Changed to take a dev_t argument which is set on success and return a
+ * non-zero error code on failure.
*/
-#ifdef HAVE_1ARG_LOOKUP_BDEV
-#define vdev_lookup_bdev(path) lookup_bdev(path)
-#else
-#ifdef HAVE_2ARGS_LOOKUP_BDEV
-#define vdev_lookup_bdev(path) lookup_bdev(path, 0)
+static inline int
+vdev_lookup_bdev(const char *path, dev_t *dev)
+{
+#if defined(HAVE_DEVT_LOOKUP_BDEV)
+ return (lookup_bdev(path, dev));
+#elif defined(HAVE_1ARG_LOOKUP_BDEV)
+ struct block_device *bdev = lookup_bdev(path);
+ if (IS_ERR(bdev))
+ return (PTR_ERR(bdev));
+
+ *dev = bdev->bd_dev;
+ bdput(bdev);
+
+ return (0);
+#elif defined(HAVE_MODE_LOOKUP_BDEV)
+ struct block_device *bdev = lookup_bdev(path, FMODE_READ);
+ if (IS_ERR(bdev))
+ return (PTR_ERR(bdev));
+
+ *dev = bdev->bd_dev;
+ bdput(bdev);
+
+ return (0);
#else
#error "Unsupported kernel"
-#endif /* HAVE_2ARGS_LOOKUP_BDEV */
-#endif /* HAVE_1ARG_LOOKUP_BDEV */
+#endif
+}
/*
* Kernels without bio_set_op_attrs use bi_rw for the bio flags.
@@ -465,25 +515,38 @@ blk_queue_discard_secure(struct request_queue *q)
*/
#define VDEV_HOLDER ((void *)0x2401de7)
-static inline void
-blk_generic_start_io_acct(struct request_queue *q, int rw,
- unsigned long sectors, struct hd_struct *part)
+static inline unsigned long
+blk_generic_start_io_acct(struct request_queue *q __attribute__((unused)),
+ struct gendisk *disk __attribute__((unused)),
+ int rw __attribute__((unused)), struct bio *bio)
{
-#if defined(HAVE_GENERIC_IO_ACCT_3ARG)
- generic_start_io_acct(rw, sectors, part);
+#if defined(HAVE_BIO_IO_ACCT)
+ return (bio_start_io_acct(bio));
+#elif defined(HAVE_GENERIC_IO_ACCT_3ARG)
+ unsigned long start_time = jiffies;
+ generic_start_io_acct(rw, bio_sectors(bio), &disk->part0);
+ return (start_time);
#elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
- generic_start_io_acct(q, rw, sectors, part);
+ unsigned long start_time = jiffies;
+ generic_start_io_acct(q, rw, bio_sectors(bio), &disk->part0);
+ return (start_time);
+#else
+ /* Unsupported */
+ return (0);
#endif
}
static inline void
-blk_generic_end_io_acct(struct request_queue *q, int rw,
- struct hd_struct *part, unsigned long start_time)
+blk_generic_end_io_acct(struct request_queue *q __attribute__((unused)),
+ struct gendisk *disk __attribute__((unused)),
+ int rw __attribute__((unused)), struct bio *bio, unsigned long start_time)
{
-#if defined(HAVE_GENERIC_IO_ACCT_3ARG)
- generic_end_io_acct(rw, part, start_time);
+#if defined(HAVE_BIO_IO_ACCT)
+ bio_end_io_acct(bio, start_time);
+#elif defined(HAVE_GENERIC_IO_ACCT_3ARG)
+ generic_end_io_acct(rw, &disk->part0, start_time);
#elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
- generic_end_io_acct(q, rw, part, start_time);
+ generic_end_io_acct(q, rw, &disk->part0, start_time);
#endif
}
@@ -493,6 +556,8 @@ blk_generic_alloc_queue(make_request_fn make_request, int node_id)
{
#if defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN)
return (blk_alloc_queue(make_request, node_id));
+#elif defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH)
+ return (blk_alloc_queue_rh(make_request, node_id));
#else
struct request_queue *q = blk_alloc_queue(GFP_KERNEL);
if (q != NULL)
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/mod_compat.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/mod_compat.h
index 1c48df5cbd81..e96e95313009 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/mod_compat.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/mod_compat.h
@@ -74,6 +74,7 @@ enum scope_prefix_types {
zfs_vdev_cache,
zfs_vdev_file,
zfs_vdev_mirror,
+ zfs_vnops,
zfs_zevent,
zfs_zio,
zfs_zil
@@ -143,6 +144,17 @@ enum scope_prefix_types {
MODULE_PARM_DESC(name_prefix ## name, desc)
/* END CSTYLED */
+/*
+ * As above, but there is no variable with the name name_prefix ## name,
+ * so NULL is passed to module_param_call instead.
+ */
+/* BEGIN CSTYLED */
+#define ZFS_MODULE_VIRTUAL_PARAM_CALL(scope_prefix, name_prefix, name, setfunc, getfunc, perm, desc) \
+ CTASSERT_GLOBAL((sizeof (scope_prefix) == sizeof (enum scope_prefix_types))); \
+ module_param_call(name_prefix ## name, setfunc, getfunc, NULL, perm); \
+ MODULE_PARM_DESC(name_prefix ## name, desc)
+/* END CSTYLED */
+
#define ZFS_MODULE_PARAM_ARGS const char *buf, zfs_kernel_param_t *kp
#define ZFS_MODULE_DESCRIPTION(s) MODULE_DESCRIPTION(s)
@@ -150,4 +162,6 @@ enum scope_prefix_types {
#define ZFS_MODULE_LICENSE(s) MODULE_LICENSE(s)
#define ZFS_MODULE_VERSION(s) MODULE_VERSION(s)
+#define module_init_early(fn) module_init(fn)
+
#endif /* _MOD_COMPAT_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/sysmacros.h b/sys/contrib/openzfs/include/os/linux/spl/sys/sysmacros.h
index eb3494bc7904..98d1ab1d7f8a 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/sysmacros.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/sysmacros.h
@@ -76,6 +76,7 @@
#define max_ncpus num_possible_cpus()
#define boot_ncpus num_online_cpus()
#define CPU_SEQID smp_processor_id()
+#define CPU_SEQID_UNSTABLE raw_smp_processor_id()
#define is_system_labeled() 0
#ifndef RLIM64_INFINITY
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/taskq.h b/sys/contrib/openzfs/include/os/linux/spl/sys/taskq.h
index 16f4349e78e4..b50175a10873 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/taskq.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/taskq.h
@@ -84,6 +84,8 @@ typedef struct taskq {
int tq_nthreads; /* # of existing threads */
int tq_nspawn; /* # of threads being spawned */
int tq_maxthreads; /* # of threads maximum */
+ /* If PERCPU flag is set, percent of NCPUs to have as threads */
+ int tq_cpu_pct;
int tq_pri; /* priority */
int tq_minalloc; /* min taskq_ent_t pool size */
int tq_maxalloc; /* max taskq_ent_t pool size */
@@ -99,6 +101,9 @@ typedef struct taskq {
spl_wait_queue_head_t tq_work_waitq; /* new work waitq */
spl_wait_queue_head_t tq_wait_waitq; /* wait waitq */
tq_lock_role_t tq_lock_class; /* class when taking tq_lock */
+ /* list node for the cpu hotplug callback */
+ struct hlist_node tq_hp_cb_node;
+ boolean_t tq_hp_support;
} taskq_t;
typedef struct taskq_ent {
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h b/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h
index abcd90dd570c..6e850c5fe7b1 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h
@@ -44,14 +44,19 @@ typedef enum uio_rw {
typedef enum uio_seg {
UIO_USERSPACE = 0,
UIO_SYSSPACE = 1,
- UIO_USERISPACE = 2,
- UIO_BVEC = 3,
+ UIO_BVEC = 2,
+#if defined(HAVE_VFS_IOV_ITER)
+ UIO_ITER = 3,
+#endif
} uio_seg_t;
typedef struct uio {
union {
const struct iovec *uio_iov;
const struct bio_vec *uio_bvec;
+#if defined(HAVE_VFS_IOV_ITER)
+ struct iov_iter *uio_iter;
+#endif
};
int uio_iovcnt;
offset_t uio_loffset;
@@ -59,60 +64,17 @@ typedef struct uio {
boolean_t uio_fault_disable;
uint16_t uio_fmode;
uint16_t uio_extflg;
- offset_t uio_limit;
ssize_t uio_resid;
size_t uio_skip;
} uio_t;
-typedef struct aio_req {
- uio_t *aio_uio;
- void *aio_private;
-} aio_req_t;
-
-typedef enum xuio_type {
- UIOTYPE_ASYNCIO,
- UIOTYPE_ZEROCOPY,
-} xuio_type_t;
-
-
-#define UIOA_IOV_MAX 16
-
-typedef struct uioa_page_s {
- int uioa_pfncnt;
- void **uioa_ppp;
- caddr_t uioa_base;
- size_t uioa_len;
-} uioa_page_t;
-
-typedef struct xuio {
- uio_t xu_uio;
- enum xuio_type xu_type;
- union {
- struct {
- uint32_t xu_a_state;
- ssize_t xu_a_mbytes;
- uioa_page_t *xu_a_lcur;
- void **xu_a_lppp;
- void *xu_a_hwst[4];
- uioa_page_t xu_a_locked[UIOA_IOV_MAX];
- } xu_aio;
-
- struct {
- int xu_zc_rw;
- void *xu_zc_priv;
- } xu_zc;
- } xu_ext;
-} xuio_t;
-
-#define XUIO_XUZC_PRIV(xuio) xuio->xu_ext.xu_zc.xu_zc_priv
-#define XUIO_XUZC_RW(xuio) xuio->xu_ext.xu_zc.xu_zc_rw
-
#define uio_segflg(uio) (uio)->uio_segflg
#define uio_offset(uio) (uio)->uio_loffset
#define uio_resid(uio) (uio)->uio_resid
#define uio_iovcnt(uio) (uio)->uio_iovcnt
#define uio_iovlen(uio, idx) (uio)->uio_iov[(idx)].iov_len
#define uio_iovbase(uio, idx) (uio)->uio_iov[(idx)].iov_base
+#define uio_fault_disable(uio, set) (uio)->uio_fault_disable = set
static inline void
uio_iov_at_index(uio_t *uio, uint_t idx, void **base, uint64_t *len)
@@ -140,4 +102,65 @@ uio_index_at_offset(uio_t *uio, offset_t off, uint_t *vec_idx)
return (off);
}
+static inline void
+iov_iter_init_compat(struct iov_iter *iter, unsigned int dir,
+ const struct iovec *iov, unsigned long nr_segs, size_t count)
+{
+#if defined(HAVE_IOV_ITER_INIT)
+ iov_iter_init(iter, dir, iov, nr_segs, count);
+#elif defined(HAVE_IOV_ITER_INIT_LEGACY)
+ iov_iter_init(iter, iov, nr_segs, count, 0);
+#else
+#error "Unsupported kernel"
+#endif
+}
+
+static inline void
+uio_iovec_init(uio_t *uio, const struct iovec *iov, unsigned long nr_segs,
+ offset_t offset, uio_seg_t seg, ssize_t resid, size_t skip)
+{
+ ASSERT(seg == UIO_USERSPACE || seg == UIO_SYSSPACE);
+
+ uio->uio_iov = iov;
+ uio->uio_iovcnt = nr_segs;
+ uio->uio_loffset = offset;
+ uio->uio_segflg = seg;
+ uio->uio_fault_disable = B_FALSE;
+ uio->uio_fmode = 0;
+ uio->uio_extflg = 0;
+ uio->uio_resid = resid;
+ uio->uio_skip = skip;
+}
+
+static inline void
+uio_bvec_init(uio_t *uio, struct bio *bio)
+{
+ uio->uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
+ uio->uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
+ uio->uio_loffset = BIO_BI_SECTOR(bio) << 9;
+ uio->uio_segflg = UIO_BVEC;
+ uio->uio_fault_disable = B_FALSE;
+ uio->uio_fmode = 0;
+ uio->uio_extflg = 0;
+ uio->uio_resid = BIO_BI_SIZE(bio);
+ uio->uio_skip = BIO_BI_SKIP(bio);
+}
+
+#if defined(HAVE_VFS_IOV_ITER)
+static inline void
+uio_iov_iter_init(uio_t *uio, struct iov_iter *iter, offset_t offset,
+ ssize_t resid, size_t skip)
+{
+ uio->uio_iter = iter;
+ uio->uio_iovcnt = iter->nr_segs;
+ uio->uio_loffset = offset;
+ uio->uio_segflg = UIO_ITER;
+ uio->uio_fault_disable = B_FALSE;
+ uio->uio_fmode = 0;
+ uio->uio_extflg = 0;
+ uio->uio_resid = resid;
+ uio->uio_skip = skip;
+}
+#endif
+
#endif /* SPL_UIO_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/vnode.h b/sys/contrib/openzfs/include/os/linux/spl/sys/vnode.h
index 6f17db89fe53..64c270650225 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/vnode.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/vnode.h
@@ -52,6 +52,12 @@
#define F_FREESP 11 /* Free file space */
+
+#if defined(SEEK_HOLE) && defined(SEEK_DATA)
+#define F_SEEK_DATA SEEK_DATA
+#define F_SEEK_HOLE SEEK_HOLE
+#endif
+
/*
* The vnode AT_ flags are mapped to the Linux ATTR_* flags.
* This allows them to be used safely with an iattr structure.
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/Makefile.am b/sys/contrib/openzfs/include/os/linux/zfs/sys/Makefile.am
index a5f2502d20e8..a075db476e40 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/Makefile.am
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/Makefile.am
@@ -21,7 +21,7 @@ KERNEL_H = \
zfs_ctldir.h \
zfs_dir.h \
zfs_vfsops_os.h \
- zfs_vnops.h \
+ zfs_vnops_os.h \
zfs_znode_impl.h \
zpl.h
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/policy.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/policy.h
index 77a73ad149c5..61afc3765504 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/policy.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/policy.h
@@ -35,6 +35,8 @@
#include <sys/xvattr.h>
#include <sys/zpl.h>
+struct znode;
+
int secpolicy_nfs(const cred_t *);
int secpolicy_sys_config(const cred_t *, boolean_t);
int secpolicy_vnode_access2(const cred_t *, struct inode *,
@@ -44,7 +46,7 @@ int secpolicy_vnode_chown(const cred_t *, uid_t);
int secpolicy_vnode_create_gid(const cred_t *);
int secpolicy_vnode_remove(const cred_t *);
int secpolicy_vnode_setdac(const cred_t *, uid_t);
-int secpolicy_vnode_setid_retain(const cred_t *, boolean_t);
+int secpolicy_vnode_setid_retain(struct znode *, const cred_t *, boolean_t);
int secpolicy_vnode_setids_setgids(const cred_t *, gid_t);
int secpolicy_zinject(const cred_t *);
int secpolicy_zfs(const cred_t *);
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_acl.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_acl.h
index 083560952f0b..4707fc6f4112 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_acl.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_acl.h
@@ -52,7 +52,6 @@ DECLARE_EVENT_CLASS(zfs_ace_class,
__field(uint8_t, z_unlinked)
__field(uint8_t, z_atime_dirty)
__field(uint8_t, z_zn_prefetch)
- __field(uint8_t, z_moved)
__field(uint_t, z_blksz)
__field(uint_t, z_seq)
__field(uint64_t, z_mapcnt)
@@ -86,7 +85,6 @@ DECLARE_EVENT_CLASS(zfs_ace_class,
__entry->z_unlinked = zn->z_unlinked;
__entry->z_atime_dirty = zn->z_atime_dirty;
__entry->z_zn_prefetch = zn->z_zn_prefetch;
- __entry->z_moved = zn->z_moved;
__entry->z_blksz = zn->z_blksz;
__entry->z_seq = zn->z_seq;
__entry->z_mapcnt = zn->z_mapcnt;
@@ -116,7 +114,7 @@ DECLARE_EVENT_CLASS(zfs_ace_class,
__entry->mask_matched = mask_matched;
),
TP_printk("zn { id %llu unlinked %u atime_dirty %u "
- "zn_prefetch %u moved %u blksz %u seq %u "
+ "zn_prefetch %u blksz %u seq %u "
"mapcnt %llu size %llu pflags %llu "
"sync_cnt %u mode 0x%x is_sa %d "
"is_mapped %d is_ctldir %d is_stale %d inode { "
@@ -124,7 +122,7 @@ DECLARE_EVENT_CLASS(zfs_ace_class,
"blkbits %u bytes %u mode 0x%x generation %x } } "
"ace { type %u flags %u access_mask %u } mask_matched %u",
__entry->z_id, __entry->z_unlinked, __entry->z_atime_dirty,
- __entry->z_zn_prefetch, __entry->z_moved, __entry->z_blksz,
+ __entry->z_zn_prefetch, __entry->z_blksz,
__entry->z_seq, __entry->z_mapcnt, __entry->z_size,
__entry->z_pflags, __entry->z_sync_cnt, __entry->z_mode,
__entry->z_is_sa, __entry->z_is_mapped,
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_vnops.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_vnops.h
index 2b41f3863425..df307fc0350d 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_vnops.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_vnops.h
@@ -22,8 +22,8 @@
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
*/
-#ifndef _SYS_FS_ZFS_VNOPS_H
-#define _SYS_FS_ZFS_VNOPS_H
+#ifndef _SYS_FS_ZFS_VNOPS_OS_H
+#define _SYS_FS_ZFS_VNOPS_OS_H
#include <sys/vnode.h>
#include <sys/xvattr.h>
@@ -40,12 +40,8 @@ extern "C" {
extern int zfs_open(struct inode *ip, int mode, int flag, cred_t *cr);
extern int zfs_close(struct inode *ip, int flag, cred_t *cr);
-extern int zfs_holey(struct inode *ip, int cmd, loff_t *off);
-extern int zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr);
-extern int zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr);
extern int zfs_write_simple(znode_t *zp, const void *data, size_t len,
loff_t pos, size_t *resid);
-extern int zfs_access(struct inode *ip, int mode, int flag, cred_t *cr);
extern int zfs_lookup(znode_t *dzp, char *nm, znode_t **zpp, int flags,
cred_t *cr, int *direntflags, pathname_t *realpnp);
extern int zfs_create(znode_t *dzp, char *name, vattr_t *vap, int excl,
@@ -58,7 +54,6 @@ extern int zfs_mkdir(znode_t *dzp, char *dirname, vattr_t *vap,
extern int zfs_rmdir(znode_t *dzp, char *name, znode_t *cwd,
cred_t *cr, int flags);
extern int zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr);
-extern int zfs_fsync(znode_t *zp, int syncflag, cred_t *cr);
extern int zfs_getattr_fast(struct inode *ip, struct kstat *sp);
extern int zfs_setattr(znode_t *zp, vattr_t *vap, int flag, cred_t *cr);
extern int zfs_rename(znode_t *sdzp, char *snm, znode_t *tdzp,
@@ -72,10 +67,6 @@ extern void zfs_inactive(struct inode *ip);
extern int zfs_space(znode_t *zp, int cmd, flock64_t *bfp, int flag,
offset_t offset, cred_t *cr);
extern int zfs_fid(struct inode *ip, fid_t *fidp);
-extern int zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag,
- cred_t *cr);
-extern int zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag,
- cred_t *cr);
extern int zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages);
extern int zfs_putpage(struct inode *ip, struct page *pp,
struct writeback_control *wbc);
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_znode_impl.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_znode_impl.h
index e010970dd540..13e5fb653f5b 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_znode_impl.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_znode_impl.h
@@ -68,6 +68,10 @@ extern "C" {
#define Z_ISCHR(type) S_ISCHR(type)
#define Z_ISLNK(type) S_ISLNK(type)
#define Z_ISDEV(type) (S_ISCHR(type) || S_ISBLK(type) || S_ISFIFO(type))
+#define Z_ISDIR(type) S_ISDIR(type)
+
+#define zn_has_cached_data(zp) ((zp)->z_is_mapped)
+#define zn_rlimit_fsize(zp, uio, td) (0)
#define zhold(zp) igrab(ZTOI((zp)))
#define zrele(zp) iput(ZTOI((zp)))
@@ -90,7 +94,11 @@ do { \
zfs_exit_fs(zfsvfs); \
ZFS_EXIT_READ(zfsvfs, FTAG); \
} while (0)
-#define ZPL_EXIT(zfsvfs) ZFS_EXIT(zfsvfs)
+
+#define ZPL_EXIT(zfsvfs) \
+do { \
+ rrm_exit(&(zfsvfs)->z_teardown_lock, FTAG); \
+} while (0)
/* Verifies the znode is valid. */
#define ZFS_VERIFY_ZP_ERROR(zp, error) \
@@ -143,6 +151,8 @@ do { \
} while (0)
#endif /* HAVE_INODE_TIMESPEC64_TIMES */
+#define ZFS_ACCESSTIME_STAMP(zfsvfs, zp)
+
struct znode;
extern int zfs_sync(struct super_block *, int, cred_t *);
@@ -157,7 +167,6 @@ extern caddr_t zfs_map_page(page_t *, enum seg_rw);
extern void zfs_unmap_page(page_t *, caddr_t);
#endif /* HAVE_UIO_RW */
-extern zil_get_data_t zfs_get_data;
extern zil_replay_func_t *zfs_replay_vector[TX_MAX_TYPE];
extern int zfsfstype;
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h
index ef5a0b842d09..b0bb9c29c0b4 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h
@@ -46,15 +46,6 @@ extern const struct inode_operations zpl_dir_inode_operations;
extern const struct inode_operations zpl_symlink_inode_operations;
extern const struct inode_operations zpl_special_inode_operations;
extern dentry_operations_t zpl_dentry_operations;
-
-/* zpl_file.c */
-extern ssize_t zpl_read_common(struct inode *ip, const char *buf,
- size_t len, loff_t *ppos, uio_seg_t segment, int flags,
- cred_t *cr);
-extern ssize_t zpl_write_common(struct inode *ip, const char *buf,
- size_t len, loff_t *ppos, uio_seg_t segment, int flags,
- cred_t *cr);
-
extern const struct address_space_operations zpl_address_space_operations;
extern const struct file_operations zpl_file_operations;
extern const struct file_operations zpl_dir_file_operations;
diff --git a/sys/contrib/openzfs/include/sys/Makefile.am b/sys/contrib/openzfs/include/sys/Makefile.am
index a944c5ea834d..c3ebf17b5288 100644
--- a/sys/contrib/openzfs/include/sys/Makefile.am
+++ b/sys/contrib/openzfs/include/sys/Makefile.am
@@ -82,6 +82,7 @@ COMMON_H = \
vdev_disk.h \
vdev_file.h \
vdev.h \
+ vdev_draid.h \
vdev_impl.h \
vdev_indirect_births.h \
vdev_indirect_mapping.h \
@@ -117,6 +118,7 @@ COMMON_H = \
zfs_stat.h \
zfs_sysfs.h \
zfs_vfsops.h \
+ zfs_vnops.h \
zfs_znode.h \
zil.h \
zil_impl.h \
diff --git a/sys/contrib/openzfs/include/sys/arc.h b/sys/contrib/openzfs/include/sys/arc.h
index a0852b4d5a70..f58fa53b6003 100644
--- a/sys/contrib/openzfs/include/sys/arc.h
+++ b/sys/contrib/openzfs/include/sys/arc.h
@@ -155,6 +155,11 @@ typedef enum arc_flags
ARC_FLAG_CACHED_ONLY = 1 << 22,
/*
+ * Don't instantiate an arc_buf_t for arc_read_done.
+ */
+ ARC_FLAG_NO_BUF = 1 << 23,
+
+ /*
* The arc buffer's compression mode is stored in the top 7 bits of the
* flags field, so these dummy flags are included so that MDB can
* interpret the enum properly.
@@ -305,6 +310,7 @@ int arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg);
uint64_t arc_all_memory(void);
uint64_t arc_default_max(uint64_t min, uint64_t allmem);
uint64_t arc_target_bytes(void);
+void arc_set_limits(uint64_t);
void arc_init(void);
void arc_fini(void);
diff --git a/sys/contrib/openzfs/include/sys/arc_impl.h b/sys/contrib/openzfs/include/sys/arc_impl.h
index c5061695d944..94123fc10e67 100644
--- a/sys/contrib/openzfs/include/sys/arc_impl.h
+++ b/sys/contrib/openzfs/include/sys/arc_impl.h
@@ -99,6 +99,7 @@ struct arc_callback {
boolean_t acb_encrypted;
boolean_t acb_compressed;
boolean_t acb_noauth;
+ boolean_t acb_nobuf;
zbookmark_phys_t acb_zb;
zio_t *acb_zio_dummy;
zio_t *acb_zio_head;
@@ -350,6 +351,8 @@ typedef struct l2arc_lb_ptr_buf {
#define L2BLK_SET_TYPE(field, x) BF64_SET((field), 48, 8, x)
#define L2BLK_GET_PROTECTED(field) BF64_GET((field), 56, 1)
#define L2BLK_SET_PROTECTED(field, x) BF64_SET((field), 56, 1, x)
+#define L2BLK_GET_STATE(field) BF64_GET((field), 57, 4)
+#define L2BLK_SET_STATE(field, x) BF64_SET((field), 57, 4, x)
#define PTR_SWAP(x, y) \
do { \
@@ -445,6 +448,7 @@ typedef struct l2arc_buf_hdr {
l2arc_dev_t *b_dev; /* L2ARC device */
uint64_t b_daddr; /* disk address, offset byte */
uint32_t b_hits;
+ arc_state_type_t b_arcs_state;
list_node_t b_l2node;
} l2arc_buf_hdr_t;
@@ -546,6 +550,8 @@ typedef struct arc_stats {
kstat_named_t arcstat_evict_not_enough;
kstat_named_t arcstat_evict_l2_cached;
kstat_named_t arcstat_evict_l2_eligible;
+ kstat_named_t arcstat_evict_l2_eligible_mfu;
+ kstat_named_t arcstat_evict_l2_eligible_mru;
kstat_named_t arcstat_evict_l2_ineligible;
kstat_named_t arcstat_evict_l2_skip;
kstat_named_t arcstat_hash_elements;
@@ -744,6 +750,18 @@ typedef struct arc_stats {
kstat_named_t arcstat_mfu_ghost_evictable_metadata;
kstat_named_t arcstat_l2_hits;
kstat_named_t arcstat_l2_misses;
+ /*
+ * Allocated size (in bytes) of L2ARC cached buffers by ARC state.
+ */
+ kstat_named_t arcstat_l2_prefetch_asize;
+ kstat_named_t arcstat_l2_mru_asize;
+ kstat_named_t arcstat_l2_mfu_asize;
+ /*
+ * Allocated size (in bytes) of L2ARC cached buffers by buffer content
+ * type.
+ */
+ kstat_named_t arcstat_l2_bufc_data_asize;
+ kstat_named_t arcstat_l2_bufc_metadata_asize;
kstat_named_t arcstat_l2_feeds;
kstat_named_t arcstat_l2_rw_clash;
kstat_named_t arcstat_l2_read_bytes;
@@ -909,6 +927,8 @@ extern int arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg);
extern uint64_t arc_free_memory(void);
extern int64_t arc_available_memory(void);
extern void arc_tuning_update(boolean_t);
+extern void arc_register_hotplug(void);
+extern void arc_unregister_hotplug(void);
extern int param_set_arc_long(ZFS_MODULE_PARAM_ARGS);
extern int param_set_arc_int(ZFS_MODULE_PARAM_ARGS);
diff --git a/sys/contrib/openzfs/include/sys/dbuf.h b/sys/contrib/openzfs/include/sys/dbuf.h
index 04338b2c491b..d221eac4c816 100644
--- a/sys/contrib/openzfs/include/sys/dbuf.h
+++ b/sys/contrib/openzfs/include/sys/dbuf.h
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
@@ -130,6 +130,16 @@ typedef struct dbuf_dirty_record {
/* list link for dbuf dirty records */
list_node_t dr_dbuf_node;
+ /*
+ * The dnode we are part of. Note that the dnode can not be moved or
+ * evicted due to the hold that's added by dnode_setdirty() or
+ * dmu_objset_sync_dnodes(), and released by dnode_rele_task() or
+ * userquota_updates_task(). This hold is necessary for
+ * dirty_lightweight_leaf-type dirty records, which don't have a hold
+ * on a dbuf.
+ */
+ dnode_t *dr_dnode;
+
/* pointer to parent dirty record */
struct dbuf_dirty_record *dr_parent;
@@ -171,6 +181,17 @@ typedef struct dbuf_dirty_record {
uint8_t dr_iv[ZIO_DATA_IV_LEN];
uint8_t dr_mac[ZIO_DATA_MAC_LEN];
} dl;
+ struct dirty_lightweight_leaf {
+ /*
+ * This dirty record refers to a leaf (level=0)
+ * block, whose dbuf has not been instantiated for
+ * performance reasons.
+ */
+ uint64_t dr_blkid;
+ abd_t *dr_abd;
+ zio_prop_t dr_props;
+ enum zio_flag dr_flags;
+ } dll;
} dt;
} dbuf_dirty_record_t;
@@ -309,6 +330,8 @@ typedef struct dbuf_hash_table {
kmutex_t hash_mutexes[DBUF_MUTEXES];
} dbuf_hash_table_t;
+typedef void (*dbuf_prefetch_fn)(void *, boolean_t);
+
uint64_t dbuf_whichblock(const struct dnode *di, const int64_t level,
const uint64_t offset);
@@ -324,7 +347,10 @@ int dbuf_hold_impl(struct dnode *dn, uint8_t level, uint64_t blkid,
boolean_t fail_sparse, boolean_t fail_uncached,
void *tag, dmu_buf_impl_t **dbp);
-void dbuf_prefetch(struct dnode *dn, int64_t level, uint64_t blkid,
+int dbuf_prefetch_impl(struct dnode *dn, int64_t level, uint64_t blkid,
+ zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
+ void *arg);
+int dbuf_prefetch(struct dnode *dn, int64_t level, uint64_t blkid,
zio_priority_t prio, arc_flags_t aflags);
void dbuf_add_ref(dmu_buf_impl_t *db, void *tag);
@@ -344,11 +370,16 @@ void dmu_buf_will_fill(dmu_buf_t *db, dmu_tx_t *tx);
void dmu_buf_fill_done(dmu_buf_t *db, dmu_tx_t *tx);
void dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx);
dbuf_dirty_record_t *dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
+dbuf_dirty_record_t *dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid,
+ dmu_tx_t *tx);
arc_buf_t *dbuf_loan_arcbuf(dmu_buf_impl_t *db);
void dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
bp_embedded_type_t etype, enum zio_compress comp,
int uncompressed_size, int compressed_size, int byteorder, dmu_tx_t *tx);
+int dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd,
+ const struct zio_prop *zp, enum zio_flag flags, dmu_tx_t *tx);
+
void dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx);
void dbuf_destroy(dmu_buf_impl_t *db);
diff --git a/sys/contrib/openzfs/include/sys/dmu.h b/sys/contrib/openzfs/include/sys/dmu.h
index 54fdbc9ad227..0c50d0409b2b 100644
--- a/sys/contrib/openzfs/include/sys/dmu.h
+++ b/sys/contrib/openzfs/include/sys/dmu.h
@@ -864,18 +864,6 @@ int dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset,
int dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset,
struct arc_buf *buf, dmu_tx_t *tx);
#define dmu_assign_arcbuf dmu_assign_arcbuf_by_dbuf
-#ifdef HAVE_UIO_ZEROCOPY
-int dmu_xuio_init(struct xuio *uio, int niov);
-void dmu_xuio_fini(struct xuio *uio);
-int dmu_xuio_add(struct xuio *uio, struct arc_buf *abuf, offset_t off,
- size_t n);
-int dmu_xuio_cnt(struct xuio *uio);
-struct arc_buf *dmu_xuio_arcbuf(struct xuio *uio, int i);
-void dmu_xuio_clear(struct xuio *uio, int i);
-#endif /* HAVE_UIO_ZEROCOPY */
-void xuio_stat_wbuf_copied(void);
-void xuio_stat_wbuf_nocopy(void);
-
extern int zfs_prefetch_disable;
extern int zfs_max_recordsize;
diff --git a/sys/contrib/openzfs/include/sys/dmu_impl.h b/sys/contrib/openzfs/include/sys/dmu_impl.h
index 0c6273a3a727..def4aadba1d0 100644
--- a/sys/contrib/openzfs/include/sys/dmu_impl.h
+++ b/sys/contrib/openzfs/include/sys/dmu_impl.h
@@ -237,13 +237,6 @@ extern "C" {
struct objset;
struct dmu_pool;
-typedef struct dmu_xuio {
- int next;
- int cnt;
- struct arc_buf **bufs;
- iovec_t *iovp;
-} dmu_xuio_t;
-
typedef struct dmu_sendstatus {
list_node_t dss_link;
int dss_outfd;
diff --git a/sys/contrib/openzfs/include/sys/dmu_objset.h b/sys/contrib/openzfs/include/sys/dmu_objset.h
index 1af69832c5d3..a8cb812714ec 100644
--- a/sys/contrib/openzfs/include/sys/dmu_objset.h
+++ b/sys/contrib/openzfs/include/sys/dmu_objset.h
@@ -242,10 +242,10 @@ objset_t *dmu_objset_create_impl(spa_t *spa, struct dsl_dataset *ds,
int dmu_objset_open_impl(spa_t *spa, struct dsl_dataset *ds, blkptr_t *bp,
objset_t **osp);
void dmu_objset_evict(objset_t *os);
-void dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx);
+void dmu_objset_sync_done(objset_t *os, dmu_tx_t *tx);
void dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx);
boolean_t dmu_objset_userused_enabled(objset_t *os);
-int dmu_objset_userspace_upgrade(objset_t *os);
+void dmu_objset_userspace_upgrade(objset_t *os);
boolean_t dmu_objset_userspace_present(objset_t *os);
boolean_t dmu_objset_userobjused_enabled(objset_t *os);
boolean_t dmu_objset_userobjspace_upgradable(objset_t *os);
diff --git a/sys/contrib/openzfs/include/sys/dmu_zfetch.h b/sys/contrib/openzfs/include/sys/dmu_zfetch.h
index 4303ab314ced..34b711fc06e5 100644
--- a/sys/contrib/openzfs/include/sys/dmu_zfetch.h
+++ b/sys/contrib/openzfs/include/sys/dmu_zfetch.h
@@ -40,6 +40,13 @@ extern unsigned long zfetch_array_rd_sz;
struct dnode; /* so we can reference dnode */
+typedef struct zfetch {
+ kmutex_t zf_lock; /* protects zfetch structure */
+ list_t zf_stream; /* list of zstream_t's */
+ struct dnode *zf_dnode; /* dnode that owns this zfetch */
+ int zf_numstreams; /* number of zstream_t's */
+} zfetch_t;
+
typedef struct zstream {
uint64_t zs_blkid; /* expect next access at this blkid */
uint64_t zs_pf_blkid; /* next block to prefetch */
@@ -52,15 +59,12 @@ typedef struct zstream {
kmutex_t zs_lock; /* protects stream */
hrtime_t zs_atime; /* time last prefetch issued */
+ hrtime_t zs_start_time; /* start of last prefetch */
list_node_t zs_node; /* link for zf_stream */
+ zfetch_t *zs_fetch; /* parent fetch */
+ zfs_refcount_t zs_blocks; /* number of pending blocks in the stream */
} zstream_t;
-typedef struct zfetch {
- kmutex_t zf_lock; /* protects zfetch structure */
- list_t zf_stream; /* list of zstream_t's */
- struct dnode *zf_dnode; /* dnode that owns this zfetch */
-} zfetch_t;
-
void zfetch_init(void);
void zfetch_fini(void);
diff --git a/sys/contrib/openzfs/include/sys/dsl_dataset.h b/sys/contrib/openzfs/include/sys/dsl_dataset.h
index f5816a934c5f..ed934f969e92 100644
--- a/sys/contrib/openzfs/include/sys/dsl_dataset.h
+++ b/sys/contrib/openzfs/include/sys/dsl_dataset.h
@@ -316,6 +316,7 @@ typedef struct dsl_dataset_snapshot_arg {
/* flags for holding the dataset */
typedef enum ds_hold_flags {
+ DS_HOLD_FLAG_NONE = 0 << 0,
DS_HOLD_FLAG_DECRYPT = 1 << 0 /* needs access to encrypted data */
} ds_hold_flags_t;
diff --git a/sys/contrib/openzfs/include/sys/dsl_scan.h b/sys/contrib/openzfs/include/sys/dsl_scan.h
index 8f929207d2d7..19c3dd599b10 100644
--- a/sys/contrib/openzfs/include/sys/dsl_scan.h
+++ b/sys/contrib/openzfs/include/sys/dsl_scan.h
@@ -163,6 +163,7 @@ typedef struct dsl_scan_io_queue dsl_scan_io_queue_t;
void scan_init(void);
void scan_fini(void);
int dsl_scan_init(struct dsl_pool *dp, uint64_t txg);
+void dsl_scan_setup_sync(void *, dmu_tx_t *);
void dsl_scan_fini(struct dsl_pool *dp);
void dsl_scan_sync(struct dsl_pool *, dmu_tx_t *);
int dsl_scan_cancel(struct dsl_pool *);
diff --git a/sys/contrib/openzfs/include/sys/frame.h b/sys/contrib/openzfs/include/sys/frame.h
index b6bbaa79b2f8..caae851421d8 100644
--- a/sys/contrib/openzfs/include/sys/frame.h
+++ b/sys/contrib/openzfs/include/sys/frame.h
@@ -25,7 +25,11 @@ extern "C" {
#if defined(__KERNEL__) && defined(HAVE_KERNEL_OBJTOOL) && \
defined(HAVE_STACK_FRAME_NON_STANDARD)
+#if defined(HAVE_KERNEL_OBJTOOL_HEADER)
+#include <linux/objtool.h>
+#else
#include <linux/frame.h>
+#endif
#else
#define STACK_FRAME_NON_STANDARD(func)
#endif
diff --git a/sys/contrib/openzfs/include/sys/fs/zfs.h b/sys/contrib/openzfs/include/sys/fs/zfs.h
index fe63d735babc..60c1b84602a3 100644
--- a/sys/contrib/openzfs/include/sys/fs/zfs.h
+++ b/sys/contrib/openzfs/include/sys/fs/zfs.h
@@ -249,7 +249,7 @@ typedef enum {
ZPOOL_NUM_PROPS
} zpool_prop_t;
-/* Small enough to not hog a whole line of printout in zpool(1M). */
+/* Small enough to not hog a whole line of printout in zpool(8). */
#define ZPROP_MAX_COMMENT 32
#define ZPROP_VALUE "value"
@@ -617,6 +617,7 @@ typedef struct zpool_load_policy {
#define ZPOOL_CONFIG_PREV_INDIRECT_VDEV "com.delphix:prev_indirect_vdev"
#define ZPOOL_CONFIG_PATH "path"
#define ZPOOL_CONFIG_DEVID "devid"
+#define ZPOOL_CONFIG_SPARE_ID "spareid"
#define ZPOOL_CONFIG_METASLAB_ARRAY "metaslab_array"
#define ZPOOL_CONFIG_METASLAB_SHIFT "metaslab_shift"
#define ZPOOL_CONFIG_ASHIFT "ashift"
@@ -757,10 +758,17 @@ typedef struct zpool_load_policy {
#define ZPOOL_CONFIG_LOAD_DATA_ERRORS "verify_data_errors"
#define ZPOOL_CONFIG_REWIND_TIME "seconds_of_rewind"
+/* dRAID configuration */
+#define ZPOOL_CONFIG_DRAID_NDATA "draid_ndata"
+#define ZPOOL_CONFIG_DRAID_NSPARES "draid_nspares"
+#define ZPOOL_CONFIG_DRAID_NGROUPS "draid_ngroups"
+
#define VDEV_TYPE_ROOT "root"
#define VDEV_TYPE_MIRROR "mirror"
#define VDEV_TYPE_REPLACING "replacing"
#define VDEV_TYPE_RAIDZ "raidz"
+#define VDEV_TYPE_DRAID "draid"
+#define VDEV_TYPE_DRAID_SPARE "dspare"
#define VDEV_TYPE_DISK "disk"
#define VDEV_TYPE_FILE "file"
#define VDEV_TYPE_MISSING "missing"
@@ -770,6 +778,12 @@ typedef struct zpool_load_policy {
#define VDEV_TYPE_L2CACHE "l2cache"
#define VDEV_TYPE_INDIRECT "indirect"
+#define VDEV_RAIDZ_MAXPARITY 3
+
+#define VDEV_DRAID_MAXPARITY 3
+#define VDEV_DRAID_MIN_CHILDREN 2
+#define VDEV_DRAID_MAX_CHILDREN UINT8_MAX
+
/* VDEV_TOP_ZAP_* are used in top-level vdev ZAP objects. */
#define VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM \
"com.delphix:indirect_obsolete_sm"
@@ -1438,6 +1452,7 @@ typedef enum {
#define ZPOOL_HIST_IOCTL "ioctl"
#define ZPOOL_HIST_INPUT_NVL "in_nvl"
#define ZPOOL_HIST_OUTPUT_NVL "out_nvl"
+#define ZPOOL_HIST_OUTPUT_SIZE "out_size"
#define ZPOOL_HIST_DSNAME "dsname"
#define ZPOOL_HIST_DSID "dsid"
#define ZPOOL_HIST_ERRNO "errno"
diff --git a/sys/contrib/openzfs/include/sys/metaslab.h b/sys/contrib/openzfs/include/sys/metaslab.h
index b3b7f865536e..ecff65f13de5 100644
--- a/sys/contrib/openzfs/include/sys/metaslab.h
+++ b/sys/contrib/openzfs/include/sys/metaslab.h
@@ -78,6 +78,7 @@ uint64_t metaslab_largest_allocatable(metaslab_t *);
#define METASLAB_DONT_THROTTLE 0x10
#define METASLAB_MUST_RESERVE 0x20
#define METASLAB_FASTWRITE 0x40
+#define METASLAB_ZIL 0x80
int metaslab_alloc(spa_t *, metaslab_class_t *, uint64_t,
blkptr_t *, int, uint64_t, blkptr_t *, int, zio_alloc_list_t *, zio_t *,
diff --git a/sys/contrib/openzfs/include/sys/metaslab_impl.h b/sys/contrib/openzfs/include/sys/metaslab_impl.h
index 4a7475256a2b..3be0c466c403 100644
--- a/sys/contrib/openzfs/include/sys/metaslab_impl.h
+++ b/sys/contrib/openzfs/include/sys/metaslab_impl.h
@@ -137,6 +137,29 @@ typedef enum trace_alloc_type {
#define WEIGHT_SET_COUNT(weight, x) BF64_SET((weight), 0, 54, x)
/*
+ * Per-allocator data structure.
+ */
+typedef struct metaslab_class_allocator {
+ metaslab_group_t *mca_rotor;
+ uint64_t mca_aliquot;
+
+ /*
+ * The allocation throttle works on a reservation system. Whenever
+ * an asynchronous zio wants to perform an allocation it must
+ * first reserve the number of blocks that it wants to allocate.
+ * If there aren't sufficient slots available for the pending zio
+ * then that I/O is throttled until more slots free up. The current
+ * number of reserved allocations is maintained by the mca_alloc_slots
+ * refcount. The mca_alloc_max_slots value determines the maximum
+ * number of allocations that the system allows. Gang blocks are
+ * allowed to reserve slots even if we've reached the maximum
+ * number of allocations allowed.
+ */
+ uint64_t mca_alloc_max_slots;
+ zfs_refcount_t mca_alloc_slots;
+} metaslab_class_allocator_t;
+
+/*
* A metaslab class encompasses a category of allocatable top-level vdevs.
* Each top-level vdev is associated with a metaslab group which defines
* the allocatable region for that vdev. Examples of these categories include
@@ -145,7 +168,7 @@ typedef enum trace_alloc_type {
* When a block allocation is requested from the SPA it is associated with a
* metaslab_class_t, and only top-level vdevs (i.e. metaslab groups) belonging
* to the class can be used to satisfy that request. Allocations are done
- * by traversing the metaslab groups that are linked off of the mc_rotor field.
+ * by traversing the metaslab groups that are linked off of the mca_rotor field.
* This rotor points to the next metaslab group where allocations will be
* attempted. Allocating a block is a 3 step process -- select the metaslab
* group, select the metaslab, and then allocate the block. The metaslab
@@ -156,9 +179,7 @@ typedef enum trace_alloc_type {
struct metaslab_class {
kmutex_t mc_lock;
spa_t *mc_spa;
- metaslab_group_t *mc_rotor;
metaslab_ops_t *mc_ops;
- uint64_t mc_aliquot;
/*
* Track the number of metaslab groups that have been initialized
@@ -173,21 +194,6 @@ struct metaslab_class {
*/
boolean_t mc_alloc_throttle_enabled;
- /*
- * The allocation throttle works on a reservation system. Whenever
- * an asynchronous zio wants to perform an allocation it must
- * first reserve the number of blocks that it wants to allocate.
- * If there aren't sufficient slots available for the pending zio
- * then that I/O is throttled until more slots free up. The current
- * number of reserved allocations is maintained by the mc_alloc_slots
- * refcount. The mc_alloc_max_slots value determines the maximum
- * number of allocations that the system allows. Gang blocks are
- * allowed to reserve slots even if we've reached the maximum
- * number of allocations allowed.
- */
- uint64_t *mc_alloc_max_slots;
- zfs_refcount_t *mc_alloc_slots;
-
uint64_t mc_alloc_groups; /* # of allocatable groups */
uint64_t mc_alloc; /* total allocated space */
@@ -201,6 +207,8 @@ struct metaslab_class {
* recent use.
*/
multilist_t *mc_metaslab_txg_list;
+
+ metaslab_class_allocator_t mc_allocator[];
};
/*
@@ -258,7 +266,7 @@ struct metaslab_group {
*
* Each allocator in each metaslab group has a current queue depth
* (mg_alloc_queue_depth[allocator]) and a current max queue depth
- * (mg_cur_max_alloc_queue_depth[allocator]), and each metaslab group
+ * (mga_cur_max_alloc_queue_depth[allocator]), and each metaslab group
* has an absolute max queue depth (mg_max_alloc_queue_depth). We
* add IOs to an allocator until the mg_alloc_queue_depth for that
* allocator hits the cur_max. Every time an IO completes for a given
@@ -271,8 +279,7 @@ struct metaslab_group {
* groups are unable to handle their share of allocations.
*/
uint64_t mg_max_alloc_queue_depth;
- int mg_allocators;
- metaslab_group_allocator_t *mg_allocator; /* array */
+
/*
* A metalab group that can no longer allocate the minimum block
* size will set mg_no_free_space. Once a metaslab group is out
@@ -290,6 +297,9 @@ struct metaslab_group {
boolean_t mg_disabled_updating;
kmutex_t mg_ms_disabled_lock;
kcondvar_t mg_ms_disabled_cv;
+
+ int mg_allocators;
+ metaslab_group_allocator_t mg_allocator[];
};
/*
diff --git a/sys/contrib/openzfs/include/sys/spa_impl.h b/sys/contrib/openzfs/include/sys/spa_impl.h
index 3eb87d2bb220..a3afaef38721 100644
--- a/sys/contrib/openzfs/include/sys/spa_impl.h
+++ b/sys/contrib/openzfs/include/sys/spa_impl.h
@@ -240,8 +240,9 @@ struct spa {
kcondvar_t spa_evicting_os_cv; /* Objset Eviction Completion */
txg_list_t spa_vdev_txg_list; /* per-txg dirty vdev list */
vdev_t *spa_root_vdev; /* top-level vdev container */
- int spa_min_ashift; /* of vdevs in normal class */
- int spa_max_ashift; /* of vdevs in normal class */
+ uint64_t spa_min_ashift; /* of vdevs in normal class */
+ uint64_t spa_max_ashift; /* of vdevs in normal class */
+ uint64_t spa_min_alloc; /* of vdevs in normal class */
uint64_t spa_config_guid; /* config pool guid */
uint64_t spa_load_guid; /* spa_load initialized guid */
uint64_t spa_last_synced_guid; /* last synced guid */
diff --git a/sys/contrib/openzfs/include/sys/txg.h b/sys/contrib/openzfs/include/sys/txg.h
index 260a3b43cfe8..22158bd1a5e6 100644
--- a/sys/contrib/openzfs/include/sys/txg.h
+++ b/sys/contrib/openzfs/include/sys/txg.h
@@ -41,6 +41,7 @@ extern "C" {
#define TXG_MASK (TXG_SIZE - 1) /* mask for size */
#define TXG_INITIAL TXG_SIZE /* initial txg */
#define TXG_IDX (txg & TXG_MASK)
+#define TXG_UNKNOWN 0
/* Number of txgs worth of frees we defer adding to in-core spacemaps */
#define TXG_DEFER_SIZE 2
diff --git a/sys/contrib/openzfs/include/sys/vdev.h b/sys/contrib/openzfs/include/sys/vdev.h
index 309ce33be067..7bc72a03db1c 100644
--- a/sys/contrib/openzfs/include/sys/vdev.h
+++ b/sys/contrib/openzfs/include/sys/vdev.h
@@ -49,10 +49,13 @@ typedef enum vdev_dtl_type {
extern int zfs_nocacheflush;
+typedef boolean_t vdev_open_children_func_t(vdev_t *vd);
+
extern void vdev_dbgmsg(vdev_t *vd, const char *fmt, ...);
extern void vdev_dbgmsg_print_tree(vdev_t *, int);
extern int vdev_open(vdev_t *);
extern void vdev_open_children(vdev_t *);
+extern void vdev_open_children_subset(vdev_t *, vdev_open_children_func_t *);
extern int vdev_validate(vdev_t *);
extern int vdev_copy_path_strict(vdev_t *, vdev_t *);
extern void vdev_copy_path_relaxed(vdev_t *, vdev_t *);
@@ -71,7 +74,10 @@ extern void vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t d,
extern boolean_t vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t d,
uint64_t txg, uint64_t size);
extern boolean_t vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t d);
-extern boolean_t vdev_dtl_need_resilver(vdev_t *vd, uint64_t off, size_t size);
+extern boolean_t vdev_default_need_resilver(vdev_t *vd, const dva_t *dva,
+ size_t psize, uint64_t phys_birth);
+extern boolean_t vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva,
+ size_t psize, uint64_t phys_birth);
extern void vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
boolean_t scrub_done, boolean_t rebuild_done);
extern boolean_t vdev_dtl_required(vdev_t *vd);
@@ -97,8 +103,14 @@ extern void vdev_metaslab_set_size(vdev_t *);
extern void vdev_expand(vdev_t *vd, uint64_t txg);
extern void vdev_split(vdev_t *vd);
extern void vdev_deadman(vdev_t *vd, char *tag);
+
+typedef void vdev_xlate_func_t(void *arg, range_seg64_t *physical_rs);
+
+extern boolean_t vdev_xlate_is_empty(range_seg64_t *rs);
extern void vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
- range_seg64_t *physical_rs);
+ range_seg64_t *physical_rs, range_seg64_t *remain_rs);
+extern void vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
+ vdev_xlate_func_t *func, void *arg);
extern void vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx);
extern void vdev_get_stats(vdev_t *vd, vdev_stat_t *vs);
diff --git a/sys/contrib/openzfs/include/sys/vdev_draid.h b/sys/contrib/openzfs/include/sys/vdev_draid.h
new file mode 100644
index 000000000000..65417a93c4ed
--- /dev/null
+++ b/sys/contrib/openzfs/include/sys/vdev_draid.h
@@ -0,0 +1,110 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2016, Intel Corporation.
+ * Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
+ */
+
+#ifndef _SYS_VDEV_DRAID_H
+#define _SYS_VDEV_DRAID_H
+
+#include <sys/types.h>
+#include <sys/abd.h>
+#include <sys/nvpair.h>
+#include <sys/zio.h>
+#include <sys/vdev_impl.h>
+#include <sys/vdev_raidz_impl.h>
+#include <sys/vdev.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Constants required to generate and use dRAID permutations.
+ */
+#define VDEV_DRAID_SEED 0xd7a1d5eed
+#define VDEV_DRAID_MAX_MAPS 254
+#define VDEV_DRAID_ROWSHIFT SPA_MAXBLOCKSHIFT
+#define VDEV_DRAID_ROWHEIGHT (1ULL << VDEV_DRAID_ROWSHIFT)
+#define VDEV_DRAID_REFLOW_RESERVE (2 * VDEV_DRAID_ROWHEIGHT)
+
+/*
+ * dRAID permutation map.
+ */
+typedef struct draid_map {
+ uint64_t dm_children; /* # of permuation columns */
+ uint64_t dm_nperms; /* # of permutation rows */
+ uint64_t dm_seed; /* dRAID map seed */
+ uint64_t dm_checksum; /* Checksum of generated map */
+ uint8_t *dm_perms; /* base permutation array */
+} draid_map_t;
+
+/*
+ * dRAID configuration.
+ */
+typedef struct vdev_draid_config {
+ /*
+ * Values read from the dRAID nvlist configuration.
+ */
+ uint64_t vdc_ndata; /* # of data devices in group */
+ uint64_t vdc_nparity; /* # of parity devices in group */
+ uint64_t vdc_nspares; /* # of distributed spares */
+ uint64_t vdc_children; /* # of children */
+ uint64_t vdc_ngroups; /* # groups per slice */
+
+ /*
+ * Immutable derived constants.
+ */
+ uint8_t *vdc_perms; /* permutation array */
+ uint64_t vdc_nperms; /* # of permutations */
+ uint64_t vdc_groupwidth; /* = data + parity */
+ uint64_t vdc_ndisks; /* = children - spares */
+ uint64_t vdc_groupsz; /* = groupwidth * DRAID_ROWSIZE */
+ uint64_t vdc_devslicesz; /* = (groupsz * groups) / ndisks */
+} vdev_draid_config_t;
+
+/*
+ * Functions for handling dRAID permutation maps.
+ */
+extern uint64_t vdev_draid_rand(uint64_t *);
+extern int vdev_draid_lookup_map(uint64_t, const draid_map_t **);
+extern int vdev_draid_generate_perms(const draid_map_t *, uint8_t **);
+
+/*
+ * General dRAID support functions.
+ */
+extern boolean_t vdev_draid_readable(vdev_t *, uint64_t);
+extern boolean_t vdev_draid_missing(vdev_t *, uint64_t, uint64_t, uint64_t);
+extern uint64_t vdev_draid_asize_to_psize(vdev_t *, uint64_t);
+extern void vdev_draid_map_alloc_empty(zio_t *, struct raidz_row *);
+extern nvlist_t *vdev_draid_read_config_spare(vdev_t *);
+
+/* Functions for dRAID distributed spares. */
+extern vdev_t *vdev_draid_spare_get_child(vdev_t *, uint64_t);
+extern vdev_t *vdev_draid_spare_get_parent(vdev_t *);
+extern int vdev_draid_spare_create(nvlist_t *, vdev_t *, uint64_t *, uint64_t);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_VDEV_DRAID_H */
diff --git a/sys/contrib/openzfs/include/sys/vdev_impl.h b/sys/contrib/openzfs/include/sys/vdev_impl.h
index 3c4c3fb5a279..fc169842a86b 100644
--- a/sys/contrib/openzfs/include/sys/vdev_impl.h
+++ b/sys/contrib/openzfs/include/sys/vdev_impl.h
@@ -68,14 +68,19 @@ extern uint32_t zfs_vdev_async_write_max_active;
/*
* Virtual device operations
*/
+typedef int vdev_init_func_t(spa_t *spa, nvlist_t *nv, void **tsd);
+typedef void vdev_fini_func_t(vdev_t *vd);
typedef int vdev_open_func_t(vdev_t *vd, uint64_t *size, uint64_t *max_size,
uint64_t *ashift, uint64_t *pshift);
typedef void vdev_close_func_t(vdev_t *vd);
typedef uint64_t vdev_asize_func_t(vdev_t *vd, uint64_t psize);
+typedef uint64_t vdev_min_asize_func_t(vdev_t *vd);
+typedef uint64_t vdev_min_alloc_func_t(vdev_t *vd);
typedef void vdev_io_start_func_t(zio_t *zio);
typedef void vdev_io_done_func_t(zio_t *zio);
typedef void vdev_state_change_func_t(vdev_t *vd, int, int);
-typedef boolean_t vdev_need_resilver_func_t(vdev_t *vd, uint64_t, size_t);
+typedef boolean_t vdev_need_resilver_func_t(vdev_t *vd, const dva_t *dva,
+ size_t psize, uint64_t phys_birth);
typedef void vdev_hold_func_t(vdev_t *vd);
typedef void vdev_rele_func_t(vdev_t *vd);
@@ -87,13 +92,24 @@ typedef void vdev_remap_func_t(vdev_t *vd, uint64_t offset, uint64_t size,
* Given a target vdev, translates the logical range "in" to the physical
* range "res"
*/
-typedef void vdev_xlation_func_t(vdev_t *cvd, const range_seg64_t *in,
- range_seg64_t *res);
+typedef void vdev_xlation_func_t(vdev_t *cvd, const range_seg64_t *logical,
+ range_seg64_t *physical, range_seg64_t *remain);
+typedef uint64_t vdev_rebuild_asize_func_t(vdev_t *vd, uint64_t start,
+ uint64_t size, uint64_t max_segment);
+typedef void vdev_metaslab_init_func_t(vdev_t *vd, uint64_t *startp,
+ uint64_t *sizep);
+typedef void vdev_config_generate_func_t(vdev_t *vd, nvlist_t *nv);
+typedef uint64_t vdev_nparity_func_t(vdev_t *vd);
+typedef uint64_t vdev_ndisks_func_t(vdev_t *vd);
typedef const struct vdev_ops {
+ vdev_init_func_t *vdev_op_init;
+ vdev_fini_func_t *vdev_op_fini;
vdev_open_func_t *vdev_op_open;
vdev_close_func_t *vdev_op_close;
vdev_asize_func_t *vdev_op_asize;
+ vdev_min_asize_func_t *vdev_op_min_asize;
+ vdev_min_alloc_func_t *vdev_op_min_alloc;
vdev_io_start_func_t *vdev_op_io_start;
vdev_io_done_func_t *vdev_op_io_done;
vdev_state_change_func_t *vdev_op_state_change;
@@ -101,11 +117,12 @@ typedef const struct vdev_ops {
vdev_hold_func_t *vdev_op_hold;
vdev_rele_func_t *vdev_op_rele;
vdev_remap_func_t *vdev_op_remap;
- /*
- * For translating ranges from non-leaf vdevs (e.g. raidz) to leaves.
- * Used when initializing vdevs. Isn't used by leaf ops.
- */
vdev_xlation_func_t *vdev_op_xlate;
+ vdev_rebuild_asize_func_t *vdev_op_rebuild_asize;
+ vdev_metaslab_init_func_t *vdev_op_metaslab_init;
+ vdev_config_generate_func_t *vdev_op_config_generate;
+ vdev_nparity_func_t *vdev_op_nparity;
+ vdev_ndisks_func_t *vdev_op_ndisks;
char vdev_op_type[16];
boolean_t vdev_op_leaf;
} vdev_ops_t;
@@ -148,6 +165,9 @@ struct vdev_queue {
avl_tree_t vq_write_offset_tree;
avl_tree_t vq_trim_offset_tree;
uint64_t vq_last_offset;
+ zio_priority_t vq_last_prio; /* Last sent I/O priority. */
+ uint32_t vq_ia_active; /* Active interactive I/Os. */
+ uint32_t vq_nia_credit; /* Non-interactive I/Os credit. */
hrtime_t vq_io_complete_ts; /* time last i/o completed */
hrtime_t vq_io_delta_ts;
zio_t vq_io_search; /* used as local for stack reduction */
@@ -325,16 +345,13 @@ struct vdev {
kthread_t *vdev_rebuild_thread;
vdev_rebuild_t vdev_rebuild_config;
- /* For limiting outstanding I/Os (initialize, TRIM, rebuild) */
+ /* For limiting outstanding I/Os (initialize, TRIM) */
kmutex_t vdev_initialize_io_lock;
kcondvar_t vdev_initialize_io_cv;
uint64_t vdev_initialize_inflight;
kmutex_t vdev_trim_io_lock;
kcondvar_t vdev_trim_io_cv;
uint64_t vdev_trim_inflight[3];
- kmutex_t vdev_rebuild_io_lock;
- kcondvar_t vdev_rebuild_io_cv;
- uint64_t vdev_rebuild_inflight;
/*
* Values stored in the config for an indirect or removing vdev.
@@ -392,7 +409,6 @@ struct vdev {
uint64_t vdev_removed; /* persistent removed state */
uint64_t vdev_resilver_txg; /* persistent resilvering state */
uint64_t vdev_rebuild_txg; /* persistent rebuilding state */
- uint64_t vdev_nparity; /* number of parity devices for raidz */
char *vdev_path; /* vdev path (if any) */
char *vdev_devid; /* vdev devid (if any) */
char *vdev_physpath; /* vdev device path (if any) */
@@ -445,8 +461,6 @@ struct vdev {
zfs_ratelimit_t vdev_checksum_rl;
};
-#define VDEV_RAIDZ_MAXPARITY 3
-
#define VDEV_PAD_SIZE (8 << 10)
/* 2 padding areas (vl_pad1 and vl_be) to skip */
#define VDEV_SKIP_SIZE VDEV_PAD_SIZE * 2
@@ -532,6 +546,9 @@ typedef struct vdev_label {
#define VDEV_LABEL_END_SIZE (2 * sizeof (vdev_label_t))
#define VDEV_LABELS 4
#define VDEV_BEST_LABEL VDEV_LABELS
+#define VDEV_OFFSET_IS_LABEL(vd, off) \
+ (((off) < VDEV_LABEL_START_SIZE) || \
+ ((off) >= ((vd)->vdev_psize - VDEV_LABEL_END_SIZE)))
#define VDEV_ALLOC_LOAD 0
#define VDEV_ALLOC_ADD 1
@@ -577,6 +594,8 @@ extern vdev_ops_t vdev_root_ops;
extern vdev_ops_t vdev_mirror_ops;
extern vdev_ops_t vdev_replacing_ops;
extern vdev_ops_t vdev_raidz_ops;
+extern vdev_ops_t vdev_draid_ops;
+extern vdev_ops_t vdev_draid_spare_ops;
extern vdev_ops_t vdev_disk_ops;
extern vdev_ops_t vdev_file_ops;
extern vdev_ops_t vdev_missing_ops;
@@ -587,11 +606,15 @@ extern vdev_ops_t vdev_indirect_ops;
/*
* Common size functions
*/
-extern void vdev_default_xlate(vdev_t *vd, const range_seg64_t *in,
- range_seg64_t *out);
+extern void vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
+ range_seg64_t *physical_rs, range_seg64_t *remain_rs);
extern uint64_t vdev_default_asize(vdev_t *vd, uint64_t psize);
+extern uint64_t vdev_default_min_asize(vdev_t *vd);
extern uint64_t vdev_get_min_asize(vdev_t *vd);
extern void vdev_set_min_asize(vdev_t *vd);
+extern uint64_t vdev_get_min_alloc(vdev_t *vd);
+extern uint64_t vdev_get_nparity(vdev_t *vd);
+extern uint64_t vdev_get_ndisks(vdev_t *vd);
/*
* Global variables
diff --git a/sys/contrib/openzfs/include/sys/vdev_raidz.h b/sys/contrib/openzfs/include/sys/vdev_raidz.h
index 0ce2b5ea1d67..029fdef5f4f8 100644
--- a/sys/contrib/openzfs/include/sys/vdev_raidz.h
+++ b/sys/contrib/openzfs/include/sys/vdev_raidz.h
@@ -32,6 +32,7 @@ extern "C" {
#endif
struct zio;
+struct raidz_row;
struct raidz_map;
#if !defined(_KERNEL)
struct kernel_param {};
@@ -43,8 +44,11 @@ struct kernel_param {};
struct raidz_map *vdev_raidz_map_alloc(struct zio *, uint64_t, uint64_t,
uint64_t);
void vdev_raidz_map_free(struct raidz_map *);
+void vdev_raidz_generate_parity_row(struct raidz_map *, struct raidz_row *);
void vdev_raidz_generate_parity(struct raidz_map *);
-int vdev_raidz_reconstruct(struct raidz_map *, const int *, int);
+void vdev_raidz_reconstruct(struct raidz_map *, const int *, int);
+void vdev_raidz_child_done(zio_t *);
+void vdev_raidz_io_done(zio_t *);
/*
* vdev_raidz_math interface
@@ -52,11 +56,16 @@ int vdev_raidz_reconstruct(struct raidz_map *, const int *, int);
void vdev_raidz_math_init(void);
void vdev_raidz_math_fini(void);
const struct raidz_impl_ops *vdev_raidz_math_get_ops(void);
-int vdev_raidz_math_generate(struct raidz_map *);
-int vdev_raidz_math_reconstruct(struct raidz_map *, const int *, const int *,
- const int);
+int vdev_raidz_math_generate(struct raidz_map *, struct raidz_row *);
+int vdev_raidz_math_reconstruct(struct raidz_map *, struct raidz_row *,
+ const int *, const int *, const int);
int vdev_raidz_impl_set(const char *);
+typedef struct vdev_raidz {
+ int vd_logical_width;
+ int vd_nparity;
+} vdev_raidz_t;
+
#ifdef __cplusplus
}
#endif
diff --git a/sys/contrib/openzfs/include/sys/vdev_raidz_impl.h b/sys/contrib/openzfs/include/sys/vdev_raidz_impl.h
index 8492daedb6f8..38d4f9e0bd48 100644
--- a/sys/contrib/openzfs/include/sys/vdev_raidz_impl.h
+++ b/sys/contrib/openzfs/include/sys/vdev_raidz_impl.h
@@ -29,6 +29,7 @@
#include <sys/debug.h>
#include <sys/kstat.h>
#include <sys/abd.h>
+#include <sys/vdev_impl.h>
#ifdef __cplusplus
extern "C" {
@@ -106,30 +107,45 @@ typedef struct raidz_col {
uint64_t rc_offset; /* device offset */
uint64_t rc_size; /* I/O size */
abd_t *rc_abd; /* I/O data */
- void *rc_gdata; /* used to store the "good" version */
+ void *rc_orig_data; /* pre-reconstruction */
+ abd_t *rc_gdata; /* used to store the "good" version */
int rc_error; /* I/O error for this device */
uint8_t rc_tried; /* Did we attempt this I/O column? */
uint8_t rc_skipped; /* Did we skip this I/O column? */
+ uint8_t rc_need_orig_restore; /* need to restore from orig_data? */
+ uint8_t rc_repair; /* Write good data to this column */
} raidz_col_t;
+typedef struct raidz_row {
+ uint64_t rr_cols; /* Regular column count */
+ uint64_t rr_scols; /* Count including skipped columns */
+ uint64_t rr_bigcols; /* Remainder data column count */
+ uint64_t rr_missingdata; /* Count of missing data devices */
+ uint64_t rr_missingparity; /* Count of missing parity devices */
+ uint64_t rr_firstdatacol; /* First data column/parity count */
+ abd_t *rr_abd_copy; /* rm_asize-buffer of copied data */
+ abd_t *rr_abd_empty; /* dRAID empty sector buffer */
+ int rr_nempty; /* empty sectors included in parity */
+ int rr_code; /* reconstruction code (unused) */
+#ifdef ZFS_DEBUG
+ uint64_t rr_offset; /* Logical offset for *_io_verify() */
+ uint64_t rr_size; /* Physical size for *_io_verify() */
+#endif
+ raidz_col_t rr_col[0]; /* Flexible array of I/O columns */
+} raidz_row_t;
+
typedef struct raidz_map {
- uint64_t rm_cols; /* Regular column count */
- uint64_t rm_scols; /* Count including skipped columns */
- uint64_t rm_bigcols; /* Number of oversized columns */
- uint64_t rm_asize; /* Actual total I/O size */
- uint64_t rm_missingdata; /* Count of missing data devices */
- uint64_t rm_missingparity; /* Count of missing parity devices */
- uint64_t rm_firstdatacol; /* First data column/parity count */
- uint64_t rm_nskip; /* Skipped sectors for padding */
- uint64_t rm_skipstart; /* Column index of padding start */
- abd_t *rm_abd_copy; /* rm_asize-buffer of copied data */
uintptr_t rm_reports; /* # of referencing checksum reports */
- uint8_t rm_freed; /* map no longer has referencing ZIO */
- uint8_t rm_ecksuminjected; /* checksum error was injected */
+ boolean_t rm_freed; /* map no longer has referencing ZIO */
+ boolean_t rm_ecksuminjected; /* checksum error was injected */
+ int rm_nrows; /* Regular row count */
+ int rm_nskip; /* RAIDZ sectors skipped for padding */
+ int rm_skipstart; /* Column index of padding start */
const raidz_impl_ops_t *rm_ops; /* RAIDZ math operations */
- raidz_col_t rm_col[1]; /* Flexible array of I/O columns */
+ raidz_row_t *rm_row[0]; /* flexible array of rows */
} raidz_map_t;
+
#define RAIDZ_ORIGINAL_IMPL (INT_MAX)
extern const raidz_impl_ops_t vdev_raidz_scalar_impl;
@@ -163,14 +179,15 @@ extern const raidz_impl_ops_t vdev_raidz_powerpc_altivec_impl;
*
* raidz_parity Returns parity of the RAIDZ block
* raidz_ncols Returns number of columns the block spans
+ * Note, all rows have the same number of columns.
* raidz_nbigcols Returns number of big columns
* raidz_col_p Returns pointer to a column
* raidz_col_size Returns size of a column
* raidz_big_size Returns size of big columns
* raidz_short_size Returns size of short columns
*/
-#define raidz_parity(rm) ((rm)->rm_firstdatacol)
-#define raidz_ncols(rm) ((rm)->rm_cols)
+#define raidz_parity(rm) ((rm)->rm_row[0]->rr_firstdatacol)
+#define raidz_ncols(rm) ((rm)->rm_row[0]->rr_cols)
#define raidz_nbigcols(rm) ((rm)->rm_bigcols)
#define raidz_col_p(rm, c) ((rm)->rm_col + (c))
#define raidz_col_size(rm, c) ((rm)->rm_col[c].rc_size)
@@ -185,10 +202,10 @@ extern const raidz_impl_ops_t vdev_raidz_powerpc_altivec_impl;
*/
#define _RAIDZ_GEN_WRAP(code, impl) \
static void \
-impl ## _gen_ ## code(void *rmp) \
+impl ## _gen_ ## code(void *rrp) \
{ \
- raidz_map_t *rm = (raidz_map_t *)rmp; \
- raidz_generate_## code ## _impl(rm); \
+ raidz_row_t *rr = (raidz_row_t *)rrp; \
+ raidz_generate_## code ## _impl(rr); \
}
/*
@@ -199,10 +216,10 @@ impl ## _gen_ ## code(void *rmp) \
*/
#define _RAIDZ_REC_WRAP(code, impl) \
static int \
-impl ## _rec_ ## code(void *rmp, const int *tgtidx) \
+impl ## _rec_ ## code(void *rrp, const int *tgtidx) \
{ \
- raidz_map_t *rm = (raidz_map_t *)rmp; \
- return (raidz_reconstruct_## code ## _impl(rm, tgtidx)); \
+ raidz_row_t *rr = (raidz_row_t *)rrp; \
+ return (raidz_reconstruct_## code ## _impl(rr, tgtidx)); \
}
/*
diff --git a/sys/contrib/openzfs/include/sys/vdev_rebuild.h b/sys/contrib/openzfs/include/sys/vdev_rebuild.h
index 3d4b8cc46836..61ae15c5d09a 100644
--- a/sys/contrib/openzfs/include/sys/vdev_rebuild.h
+++ b/sys/contrib/openzfs/include/sys/vdev_rebuild.h
@@ -66,10 +66,14 @@ typedef struct vdev_rebuild {
vdev_t *vr_top_vdev; /* top-level vdev to rebuild */
metaslab_t *vr_scan_msp; /* scanning disabled metaslab */
range_tree_t *vr_scan_tree; /* scan ranges (in metaslab) */
+ kmutex_t vr_io_lock; /* inflight IO lock */
+ kcondvar_t vr_io_cv; /* inflight IO cv */
/* In-core state and progress */
uint64_t vr_scan_offset[TXG_SIZE];
uint64_t vr_prev_scan_time_ms; /* any previous scan time */
+ uint64_t vr_bytes_inflight_max; /* maximum bytes inflight */
+ uint64_t vr_bytes_inflight; /* current bytes inflight */
/* Per-rebuild pass statistics for calculating bandwidth */
uint64_t vr_pass_start_time;
diff --git a/sys/contrib/openzfs/include/sys/zfs_context.h b/sys/contrib/openzfs/include/sys/zfs_context.h
index 9f637036ee71..ee3216d6763a 100644
--- a/sys/contrib/openzfs/include/sys/zfs_context.h
+++ b/sys/contrib/openzfs/include/sys/zfs_context.h
@@ -626,6 +626,7 @@ extern void delay(clock_t ticks);
#define defclsyspri 0
#define CPU_SEQID ((uintptr_t)pthread_self() & (max_ncpus - 1))
+#define CPU_SEQID_UNSTABLE CPU_SEQID
#define kcred NULL
#define CRED() NULL
diff --git a/sys/contrib/openzfs/include/sys/zfs_vnops.h b/sys/contrib/openzfs/include/sys/zfs_vnops.h
new file mode 100644
index 000000000000..6bf077b4bf79
--- /dev/null
+++ b/sys/contrib/openzfs/include/sys/zfs_vnops.h
@@ -0,0 +1,55 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef _SYS_FS_ZFS_VNOPS_H
+#define _SYS_FS_ZFS_VNOPS_H
+#include <sys/zfs_vnops_os.h>
+
+extern int zfs_fsync(znode_t *, int, cred_t *);
+extern int zfs_read(znode_t *, uio_t *, int, cred_t *);
+extern int zfs_write(znode_t *, uio_t *, int, cred_t *);
+extern int zfs_holey(znode_t *, ulong_t, loff_t *);
+extern int zfs_access(znode_t *, int, int, cred_t *);
+
+extern int zfs_getsecattr(znode_t *, vsecattr_t *, int, cred_t *);
+extern int zfs_setsecattr(znode_t *, vsecattr_t *, int, cred_t *);
+
+extern int mappedread(znode_t *, int, uio_t *);
+extern int mappedread_sf(znode_t *, int, uio_t *);
+extern void update_pages(znode_t *, int64_t, int, objset_t *);
+
+/*
+ * Platform code that asynchronously drops zp's inode / vnode_t.
+ *
+ * Asynchronous dropping ensures that the caller will never drop the
+ * last reference on an inode / vnode_t in the current context.
+ * Doing so while holding open a tx could result in a deadlock if
+ * the platform calls into filesystem again in the implementation
+ * of inode / vnode_t dropping (e.g. call from iput_final()).
+ */
+extern void zfs_zrele_async(znode_t *zp);
+
+extern zil_get_data_t zfs_get_data;
+
+#endif
diff --git a/sys/contrib/openzfs/include/sys/zfs_znode.h b/sys/contrib/openzfs/include/sys/zfs_znode.h
index 4138f6eba0a0..1ae1520e0736 100644
--- a/sys/contrib/openzfs/include/sys/zfs_znode.h
+++ b/sys/contrib/openzfs/include/sys/zfs_znode.h
@@ -187,7 +187,6 @@ typedef struct znode {
boolean_t z_unlinked; /* file has been unlinked */
boolean_t z_atime_dirty; /* atime needs to be synced */
boolean_t z_zn_prefetch; /* Prefetch znodes? */
- boolean_t z_moved; /* Has this znode been moved? */
boolean_t z_is_sa; /* are we native sa? */
boolean_t z_is_mapped; /* are we mmap'ed */
boolean_t z_is_ctldir; /* are we .zfs entry */
diff --git a/sys/contrib/openzfs/include/sys/zio.h b/sys/contrib/openzfs/include/sys/zio.h
index 4959831716b5..334ca064b371 100644
--- a/sys/contrib/openzfs/include/sys/zio.h
+++ b/sys/contrib/openzfs/include/sys/zio.h
@@ -372,6 +372,7 @@ struct zio_cksum_report {
nvlist_t *zcr_detector;
void *zcr_cbdata;
size_t zcr_cbinfo; /* passed to zcr_free() */
+ uint64_t zcr_sector;
uint64_t zcr_align;
uint64_t zcr_length;
zio_cksum_finish_f *zcr_finish;
diff --git a/sys/contrib/openzfs/include/sys/zvol_impl.h b/sys/contrib/openzfs/include/sys/zvol_impl.h
index 36199c311d07..5137d2172088 100644
--- a/sys/contrib/openzfs/include/sys/zvol_impl.h
+++ b/sys/contrib/openzfs/include/sys/zvol_impl.h
@@ -46,6 +46,7 @@ typedef struct zvol_state {
uint32_t zv_flags; /* ZVOL_* flags */
uint32_t zv_open_count; /* open counts */
uint32_t zv_changed; /* disk changed */
+ uint32_t zv_volmode; /* volmode */
zilog_t *zv_zilog; /* ZIL handle */
zfs_rangelock_t zv_rangelock; /* for range locking */
dnode_t *zv_dn; /* dnode hold */
@@ -88,6 +89,7 @@ int zvol_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb,
zio_t *zio);
int zvol_init_impl(void);
void zvol_fini_impl(void);
+void zvol_wait_close(zvol_state_t *zv);
/*
* platform dependent functions exported to platform independent code
diff --git a/sys/contrib/openzfs/include/zfeature_common.h b/sys/contrib/openzfs/include/zfeature_common.h
index db0138ae8e39..cf05bad76c3c 100644
--- a/sys/contrib/openzfs/include/zfeature_common.h
+++ b/sys/contrib/openzfs/include/zfeature_common.h
@@ -76,6 +76,7 @@ typedef enum spa_feature {
SPA_FEATURE_LIVELIST,
SPA_FEATURE_DEVICE_REBUILD,
SPA_FEATURE_ZSTD_COMPRESS,
+ SPA_FEATURE_DRAID,
SPA_FEATURES
} spa_feature_t;
diff --git a/sys/contrib/openzfs/lib/Makefile.am b/sys/contrib/openzfs/lib/Makefile.am
index f049288a1ae7..685c7b6695c6 100644
--- a/sys/contrib/openzfs/lib/Makefile.am
+++ b/sys/contrib/openzfs/lib/Makefile.am
@@ -15,4 +15,21 @@ SUBDIRS += libzutil libunicode
# These five libraries, which are installed as the final build product,
# incorporate the eight convenience libraries given above.
-SUBDIRS += libuutil libzfs_core libzfs libzpool libzfsbootenv
+DISTLIBS = libuutil libzfs_core libzfs libzpool libzfsbootenv
+SUBDIRS += $(DISTLIBS)
+DISTLIBS += libnvpair
+
+# An ABI is stored for each of these libraries. Note that libzpool.so
+# is only linked against by ztest and zdb and no stable ABI is provided.
+ABILIBS = libnvpair libuutil libzfs_core libzfs libzfsbootenv
+
+PHONY = checkabi storeabi
+checkabi: $(ABILIBS)
+ set -e ; for dir in $(ABILIBS) ; do \
+ $(MAKE) -C $$dir checkabi ; \
+ done
+
+storeabi: $(ABILIBS)
+ set -e ; for dir in $(ABILIBS) ; do \
+ $(MAKE) -C $$dir storeabi ; \
+ done
diff --git a/sys/contrib/openzfs/lib/libnvpair/Makefile.am b/sys/contrib/openzfs/lib/libnvpair/Makefile.am
index ec16c5d526c5..7840e099bbd4 100644
--- a/sys/contrib/openzfs/lib/libnvpair/Makefile.am
+++ b/sys/contrib/openzfs/lib/libnvpair/Makefile.am
@@ -1,4 +1,5 @@
include $(top_srcdir)/config/Rules.am
+PHONY =
VPATH = \
$(top_srcdir)/module/nvpair \
@@ -10,6 +11,8 @@ AM_CFLAGS += $(FRAME_LARGER_THAN) $(LIBTIRPC_CFLAGS)
lib_LTLIBRARIES = libnvpair.la
+include $(top_srcdir)/config/Abigail.am
+
USER_C = \
libnvpair.c \
libnvpair_json.c \
@@ -37,8 +40,7 @@ if !ASAN_ENABLED
libnvpair_la_LDFLAGS += -Wl,-z,defs
endif
-if BUILD_FREEBSD
libnvpair_la_LDFLAGS += -version-info 3:0:0
-else
-libnvpair_la_LDFLAGS += -version-info 1:1:0
-endif
+
+# Library ABI
+EXTRA_DIST = libnvpair.abi libnvpair.suppr
diff --git a/sys/contrib/openzfs/lib/libnvpair/libnvpair.abi b/sys/contrib/openzfs/lib/libnvpair/libnvpair.abi
new file mode 100644
index 000000000000..c1b50a8aa39c
--- /dev/null
+++ b/sys/contrib/openzfs/lib/libnvpair/libnvpair.abi
@@ -0,0 +1,2805 @@
+<abi-corpus path='libnvpair.so' architecture='elf-amd-x86_64' soname='libnvpair.so.3'>
+ <elf-needed>
+ <dependency name='libtirpc.so.3'/>
+ <dependency name='libc.so.6'/>
+ </elf-needed>
+ <elf-function-symbols>
+ <elf-symbol name='dump_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_boolean' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_nvlist_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_string_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_add_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_alloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_dup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_boolean' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_lookup_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_merge' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_num_pairs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_pack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_pack_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_remove_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_size' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvlist_unpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvpair_value_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvpair_value_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvpair_value_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvpair_value_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvpair_value_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvpair_value_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvpair_value_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvpair_value_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvpair_value_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvpair_value_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvpair_value_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='fnvpair_value_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='libspl_assertf' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nv_alloc_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nv_alloc_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nv_alloc_reset' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_boolean' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_double' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_hrtime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_nvlist_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_string_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_add_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_alloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_dup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_exists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_boolean' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_double' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_hrtime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_nv_alloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_nvlist_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_nvpair_embedded_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_pairs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_string_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_lookup_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_merge' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_next_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_nvflag' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_pack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prev_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_print' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_print_json' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctl_alloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctl_dofmt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctl_doindent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctl_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctl_getdest' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctl_setdest' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctl_setfmt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctl_setindent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_boolean' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_double' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_hrtime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_nvlist_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_string_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_prtctlop_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_remove_all' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_remove_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_size' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_unpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_xalloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_xdup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_xpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_xunpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_type_is_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_double' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_hrtime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_match' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_match_regex' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_nvlist_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_string_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_value_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ </elf-function-symbols>
+ <elf-variable-symbols>
+ <elf-symbol name='aok' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nv_alloc_nosleep' size='8' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nv_alloc_nosleep_def' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nv_alloc_sleep' size='8' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nv_alloc_sleep_def' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nv_fixed_ops' size='8' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nv_fixed_ops_def' size='40' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvlist_hashtable_init_size' size='8' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='nvpair_max_recursion' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ </elf-variable-symbols>
+ <abi-instr version='1.0' address-size='64' path='libnvpair.c' comp-dir-path='/home/fedora/zfs/lib/libnvpair' language='LANG_C99'>
+
+
+ <type-decl name='char' size-in-bits='8' id='type-id-1'/>
+ <array-type-def dimensions='1' type-id='type-id-1' size-in-bits='8' id='type-id-2'>
+ <subrange length='1' type-id='type-id-3' id='type-id-4'/>
+
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='type-id-1' size-in-bits='160' id='type-id-5'>
+ <subrange length='20' type-id='type-id-3' id='type-id-6'/>
+
+ </array-type-def>
+ <class-decl name='_IO_codecvt' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-7'/>
+ <class-decl name='_IO_marker' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-8'/>
+ <class-decl name='_IO_wide_data' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-9'/>
+ <class-decl name='re_dfa_t' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-10'/>
+ <type-decl name='double' size-in-bits='64' id='type-id-11'/>
+ <type-decl name='int' size-in-bits='32' id='type-id-12'/>
+ <type-decl name='long int' size-in-bits='64' id='type-id-13'/>
+ <type-decl name='long long int' size-in-bits='64' id='type-id-14'/>
+ <type-decl name='short int' size-in-bits='16' id='type-id-15'/>
+ <type-decl name='signed char' size-in-bits='8' id='type-id-16'/>
+ <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-17'/>
+ <type-decl name='unsigned char' size-in-bits='8' id='type-id-18'/>
+ <type-decl name='unsigned int' size-in-bits='32' id='type-id-19'/>
+ <type-decl name='unsigned long int' size-in-bits='64' id='type-id-3'/>
+ <type-decl name='unsigned short int' size-in-bits='16' id='type-id-20'/>
+ <type-decl name='void' id='type-id-21'/>
+ <typedef-decl name='nvpair_t' type-id='type-id-22' filepath='../../include/sys/nvpair.h' line='82' column='1' id='type-id-23'/>
+ <class-decl name='nvpair' size-in-bits='128' is-struct='yes' visibility='default' filepath='../../include/sys/nvpair.h' line='73' column='1' id='type-id-22'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='nvp_size' type-id='type-id-24' visibility='default' filepath='../../include/sys/nvpair.h' line='74' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='nvp_name_sz' type-id='type-id-25' visibility='default' filepath='../../include/sys/nvpair.h' line='75' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='48'>
+ <var-decl name='nvp_reserve' type-id='type-id-25' visibility='default' filepath='../../include/sys/nvpair.h' line='76' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='nvp_value_elem' type-id='type-id-24' visibility='default' filepath='../../include/sys/nvpair.h' line='77' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='96'>
+ <var-decl name='nvp_type' type-id='type-id-26' visibility='default' filepath='../../include/sys/nvpair.h' line='78' column='1'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='int32_t' type-id='type-id-27' filepath='/usr/include/bits/stdint-intn.h' line='26' column='1' id='type-id-24'/>
+ <typedef-decl name='__int32_t' type-id='type-id-12' filepath='/usr/include/bits/types.h' line='41' column='1' id='type-id-27'/>
+ <typedef-decl name='int16_t' type-id='type-id-28' filepath='/usr/include/bits/stdint-intn.h' line='25' column='1' id='type-id-25'/>
+ <typedef-decl name='__int16_t' type-id='type-id-15' filepath='/usr/include/bits/types.h' line='39' column='1' id='type-id-28'/>
+ <typedef-decl name='data_type_t' type-id='type-id-29' filepath='../../include/sys/nvpair.h' line='71' column='1' id='type-id-26'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' filepath='../../include/sys/nvpair.h' line='37' column='1' id='type-id-29'>
+ <underlying-type type-id='type-id-17'/>
+ <enumerator name='DATA_TYPE_DONTCARE' value='-1'/>
+ <enumerator name='DATA_TYPE_UNKNOWN' value='0'/>
+ <enumerator name='DATA_TYPE_BOOLEAN' value='1'/>
+ <enumerator name='DATA_TYPE_BYTE' value='2'/>
+ <enumerator name='DATA_TYPE_INT16' value='3'/>
+ <enumerator name='DATA_TYPE_UINT16' value='4'/>
+ <enumerator name='DATA_TYPE_INT32' value='5'/>
+ <enumerator name='DATA_TYPE_UINT32' value='6'/>
+ <enumerator name='DATA_TYPE_INT64' value='7'/>
+ <enumerator name='DATA_TYPE_UINT64' value='8'/>
+ <enumerator name='DATA_TYPE_STRING' value='9'/>
+ <enumerator name='DATA_TYPE_BYTE_ARRAY' value='10'/>
+ <enumerator name='DATA_TYPE_INT16_ARRAY' value='11'/>
+ <enumerator name='DATA_TYPE_UINT16_ARRAY' value='12'/>
+ <enumerator name='DATA_TYPE_INT32_ARRAY' value='13'/>
+ <enumerator name='DATA_TYPE_UINT32_ARRAY' value='14'/>
+ <enumerator name='DATA_TYPE_INT64_ARRAY' value='15'/>
+ <enumerator name='DATA_TYPE_UINT64_ARRAY' value='16'/>
+ <enumerator name='DATA_TYPE_STRING_ARRAY' value='17'/>
+ <enumerator name='DATA_TYPE_HRTIME' value='18'/>
+ <enumerator name='DATA_TYPE_NVLIST' value='19'/>
+ <enumerator name='DATA_TYPE_NVLIST_ARRAY' value='20'/>
+ <enumerator name='DATA_TYPE_BOOLEAN_VALUE' value='21'/>
+ <enumerator name='DATA_TYPE_INT8' value='22'/>
+ <enumerator name='DATA_TYPE_UINT8' value='23'/>
+ <enumerator name='DATA_TYPE_BOOLEAN_ARRAY' value='24'/>
+ <enumerator name='DATA_TYPE_INT8_ARRAY' value='25'/>
+ <enumerator name='DATA_TYPE_UINT8_ARRAY' value='26'/>
+ <enumerator name='DATA_TYPE_DOUBLE' value='27'/>
+ </enum-decl>
+ <typedef-decl name='nvlist_t' type-id='type-id-30' filepath='../../include/sys/nvpair.h' line='91' column='1' id='type-id-31'/>
+ <class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' filepath='../../include/sys/nvpair.h' line='85' column='1' id='type-id-30'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='nvl_version' type-id='type-id-24' visibility='default' filepath='../../include/sys/nvpair.h' line='86' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='nvl_nvflag' type-id='type-id-32' visibility='default' filepath='../../include/sys/nvpair.h' line='87' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='nvl_priv' type-id='type-id-33' visibility='default' filepath='../../include/sys/nvpair.h' line='88' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='nvl_flag' type-id='type-id-32' visibility='default' filepath='../../include/sys/nvpair.h' line='89' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='160'>
+ <var-decl name='nvl_pad' type-id='type-id-24' visibility='default' filepath='../../include/sys/nvpair.h' line='90' column='1'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='uint32_t' type-id='type-id-34' filepath='/usr/include/bits/stdint-uintn.h' line='26' column='1' id='type-id-32'/>
+ <typedef-decl name='__uint32_t' type-id='type-id-19' filepath='/usr/include/bits/types.h' line='42' column='1' id='type-id-34'/>
+ <typedef-decl name='uint64_t' type-id='type-id-35' filepath='/usr/include/bits/stdint-uintn.h' line='27' column='1' id='type-id-33'/>
+ <typedef-decl name='__uint64_t' type-id='type-id-3' filepath='/usr/include/bits/types.h' line='45' column='1' id='type-id-35'/>
+ <typedef-decl name='nvlist_prtctl_t' type-id='type-id-36' filepath='../../include/libnvpair.h' line='84' column='1' id='type-id-37'/>
+ <class-decl name='nvlist_prtctl' size-in-bits='576' is-struct='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='91' column='1' id='type-id-38'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='nvprt_fp' type-id='type-id-39' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='92' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='nvprt_indent_mode' type-id='type-id-40' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='93' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='96'>
+ <var-decl name='nvprt_indent' type-id='type-id-12' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='94' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='nvprt_indentinc' type-id='type-id-12' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='95' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='nvprt_nmfmt' type-id='type-id-41' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='96' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='nvprt_eomfmt' type-id='type-id-41' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='97' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='nvprt_btwnarrfmt' type-id='type-id-41' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='98' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='nvprt_btwnarrfmt_nl' type-id='type-id-12' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='99' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='nvprt_dfltops' type-id='type-id-42' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='100' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='nvprt_custops' type-id='type-id-42' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='101' column='1'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='FILE' type-id='type-id-43' filepath='/usr/include/bits/types/FILE.h' line='7' column='1' id='type-id-44'/>
+ <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='49' column='1' id='type-id-43'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='_flags' type-id='type-id-12' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='51' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='_IO_read_ptr' type-id='type-id-45' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='54' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='_IO_read_end' type-id='type-id-45' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='55' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='_IO_read_base' type-id='type-id-45' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='56' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='_IO_write_base' type-id='type-id-45' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='57' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='_IO_write_ptr' type-id='type-id-45' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='58' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='_IO_write_end' type-id='type-id-45' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='59' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='_IO_buf_base' type-id='type-id-45' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='60' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='_IO_buf_end' type-id='type-id-45' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='61' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='_IO_save_base' type-id='type-id-45' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='64' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='640'>
+ <var-decl name='_IO_backup_base' type-id='type-id-45' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='65' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='704'>
+ <var-decl name='_IO_save_end' type-id='type-id-45' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='66' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='768'>
+ <var-decl name='_markers' type-id='type-id-46' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='68' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='832'>
+ <var-decl name='_chain' type-id='type-id-47' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='70' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='896'>
+ <var-decl name='_fileno' type-id='type-id-12' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='72' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='928'>
+ <var-decl name='_flags2' type-id='type-id-12' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='73' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='960'>
+ <var-decl name='_old_offset' type-id='type-id-48' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='74' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1024'>
+ <var-decl name='_cur_column' type-id='type-id-20' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='77' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1040'>
+ <var-decl name='_vtable_offset' type-id='type-id-16' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='78' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1048'>
+ <var-decl name='_shortbuf' type-id='type-id-2' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='79' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1088'>
+ <var-decl name='_lock' type-id='type-id-49' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='81' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1152'>
+ <var-decl name='_offset' type-id='type-id-50' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='89' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1216'>
+ <var-decl name='_codecvt' type-id='type-id-51' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='91' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1280'>
+ <var-decl name='_wide_data' type-id='type-id-52' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='92' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1344'>
+ <var-decl name='_freeres_list' type-id='type-id-47' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='93' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1408'>
+ <var-decl name='_freeres_buf' type-id='type-id-53' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='94' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1472'>
+ <var-decl name='__pad5' type-id='type-id-54' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='95' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1536'>
+ <var-decl name='_mode' type-id='type-id-12' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='96' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1568'>
+ <var-decl name='_unused2' type-id='type-id-5' visibility='default' filepath='/usr/include/bits/types/struct_FILE.h' line='98' column='1'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='__off_t' type-id='type-id-13' filepath='/usr/include/bits/types.h' line='152' column='1' id='type-id-48'/>
+ <typedef-decl name='_IO_lock_t' type-id='type-id-21' filepath='/usr/include/bits/types/struct_FILE.h' line='43' column='1' id='type-id-55'/>
+ <typedef-decl name='__off64_t' type-id='type-id-13' filepath='/usr/include/bits/types.h' line='153' column='1' id='type-id-50'/>
+ <typedef-decl name='size_t' type-id='type-id-3' filepath='/usr/lib/gcc/x86_64-redhat-linux/10/include/stddef.h' line='209' column='1' id='type-id-54'/>
+ <enum-decl name='nvlist_indent_mode' filepath='../../include/libnvpair.h' line='86' column='1' id='type-id-40'>
+ <underlying-type type-id='type-id-17'/>
+ <enumerator name='NVLIST_INDENT_ABS' value='0'/>
+ <enumerator name='NVLIST_INDENT_TABBED' value='1'/>
+ </enum-decl>
+ <class-decl name='nvlist_printops' size-in-bits='3456' is-struct='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='61' column='1' id='type-id-56'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='print_boolean' type-id='type-id-57' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='62' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='print_boolean_value' type-id='type-id-58' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='63' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='print_byte' type-id='type-id-59' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='64' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='print_int8' type-id='type-id-60' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='65' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='print_uint8' type-id='type-id-61' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='66' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='640'>
+ <var-decl name='print_int16' type-id='type-id-62' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='67' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='768'>
+ <var-decl name='print_uint16' type-id='type-id-63' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='68' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='896'>
+ <var-decl name='print_int32' type-id='type-id-64' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='69' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1024'>
+ <var-decl name='print_uint32' type-id='type-id-65' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='70' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1152'>
+ <var-decl name='print_int64' type-id='type-id-66' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='71' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1280'>
+ <var-decl name='print_uint64' type-id='type-id-67' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='72' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1408'>
+ <var-decl name='print_double' type-id='type-id-68' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='73' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1536'>
+ <var-decl name='print_string' type-id='type-id-69' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='74' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1664'>
+ <var-decl name='print_hrtime' type-id='type-id-70' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='75' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1792'>
+ <var-decl name='print_nvlist' type-id='type-id-71' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='76' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1920'>
+ <var-decl name='print_boolean_array' type-id='type-id-72' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='77' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2048'>
+ <var-decl name='print_byte_array' type-id='type-id-73' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='78' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2176'>
+ <var-decl name='print_int8_array' type-id='type-id-74' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='79' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2304'>
+ <var-decl name='print_uint8_array' type-id='type-id-75' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='80' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2432'>
+ <var-decl name='print_int16_array' type-id='type-id-76' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='81' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2560'>
+ <var-decl name='print_uint16_array' type-id='type-id-77' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='82' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2688'>
+ <var-decl name='print_int32_array' type-id='type-id-78' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='83' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2816'>
+ <var-decl name='print_uint32_array' type-id='type-id-79' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='84' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2944'>
+ <var-decl name='print_int64_array' type-id='type-id-80' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='85' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='3072'>
+ <var-decl name='print_uint64_array' type-id='type-id-81' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='86' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='3200'>
+ <var-decl name='print_string_array' type-id='type-id-82' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='87' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='3328'>
+ <var-decl name='print_nvlist_array' type-id='type-id-83' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='88' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='62' column='1' id='type-id-57'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-84' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='62' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='62' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__1' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='63' column='1' id='type-id-58'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-85' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='63' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='63' column='1'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='boolean_t' type-id='type-id-86' filepath='../../lib/libspl/include/sys/stdtypes.h' line='29' column='1' id='type-id-87'/>
+ <enum-decl name='__anonymous_enum__1' is-anonymous='yes' filepath='../../lib/libspl/include/sys/stdtypes.h' line='26' column='1' id='type-id-86'>
+ <underlying-type type-id='type-id-17'/>
+ <enumerator name='B_FALSE' value='0'/>
+ <enumerator name='B_TRUE' value='1'/>
+ </enum-decl>
+ <class-decl name='__anonymous_struct__2' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='64' column='1' id='type-id-59'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-88' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='64' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='64' column='1'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='uchar_t' type-id='type-id-18' filepath='../../lib/libspl/include/sys/stdtypes.h' line='31' column='1' id='type-id-89'/>
+ <class-decl name='__anonymous_struct__3' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='65' column='1' id='type-id-60'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-90' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='65' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='65' column='1'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='int8_t' type-id='type-id-91' filepath='/usr/include/bits/stdint-intn.h' line='24' column='1' id='type-id-92'/>
+ <typedef-decl name='__int8_t' type-id='type-id-16' filepath='/usr/include/bits/types.h' line='37' column='1' id='type-id-91'/>
+ <class-decl name='__anonymous_struct__4' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='66' column='1' id='type-id-61'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-93' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='66' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='66' column='1'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='uint8_t' type-id='type-id-94' filepath='/usr/include/bits/stdint-uintn.h' line='24' column='1' id='type-id-95'/>
+ <typedef-decl name='__uint8_t' type-id='type-id-18' filepath='/usr/include/bits/types.h' line='38' column='1' id='type-id-94'/>
+ <class-decl name='__anonymous_struct__5' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='67' column='1' id='type-id-62'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-96' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='67' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='67' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__6' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='68' column='1' id='type-id-63'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-97' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='68' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='68' column='1'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='uint16_t' type-id='type-id-98' filepath='/usr/include/bits/stdint-uintn.h' line='25' column='1' id='type-id-99'/>
+ <typedef-decl name='__uint16_t' type-id='type-id-20' filepath='/usr/include/bits/types.h' line='40' column='1' id='type-id-98'/>
+ <class-decl name='__anonymous_struct__7' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='69' column='1' id='type-id-64'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-100' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='69' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='69' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__8' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='70' column='1' id='type-id-65'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-101' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='70' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='70' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__9' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='71' column='1' id='type-id-66'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-102' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='71' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='71' column='1'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='int64_t' type-id='type-id-103' filepath='/usr/include/bits/stdint-intn.h' line='27' column='1' id='type-id-104'/>
+ <typedef-decl name='__int64_t' type-id='type-id-13' filepath='/usr/include/bits/types.h' line='44' column='1' id='type-id-103'/>
+ <class-decl name='__anonymous_struct__10' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='72' column='1' id='type-id-67'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-105' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='72' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='72' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__11' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='73' column='1' id='type-id-68'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-106' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='73' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='73' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__12' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='74' column='1' id='type-id-69'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-107' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='74' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='74' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__13' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='75' column='1' id='type-id-70'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-108' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='75' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='75' column='1'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='hrtime_t' type-id='type-id-14' filepath='../../lib/libspl/include/sys/time.h' line='78' column='1' id='type-id-109'/>
+ <class-decl name='__anonymous_struct__14' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='76' column='1' id='type-id-71'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-110' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='76' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='76' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__15' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='77' column='1' id='type-id-72'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-111' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='77' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='77' column='1'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='uint_t' type-id='type-id-19' filepath='../../lib/libspl/include/sys/stdtypes.h' line='33' column='1' id='type-id-112'/>
+ <class-decl name='__anonymous_struct__16' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='78' column='1' id='type-id-73'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-113' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='78' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='78' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__17' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='79' column='1' id='type-id-74'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-114' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='79' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='79' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__18' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='80' column='1' id='type-id-75'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-115' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='80' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='80' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__19' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='81' column='1' id='type-id-76'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-116' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='81' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='81' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__20' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='82' column='1' id='type-id-77'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-117' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='82' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='82' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__21' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='83' column='1' id='type-id-78'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-118' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='83' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='83' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__22' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='84' column='1' id='type-id-79'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-119' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='84' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='84' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__23' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='85' column='1' id='type-id-80'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-120' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='85' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='85' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__24' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='86' column='1' id='type-id-81'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-121' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='86' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='86' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__25' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='87' column='1' id='type-id-82'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-122' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='87' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='87' column='1'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__26' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='88' column='1' id='type-id-83'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='type-id-123' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='88' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='type-id-53' visibility='default' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='88' column='1'/>
+ </data-member>
+ </class-decl>
+ <enum-decl name='nvlist_prtctl_fmt' filepath='../../include/libnvpair.h' line='104' column='1' id='type-id-124'>
+ <underlying-type type-id='type-id-17'/>
+ <enumerator name='NVLIST_FMT_MEMBER_NAME' value='0'/>
+ <enumerator name='NVLIST_FMT_MEMBER_POSTAMBLE' value='1'/>
+ <enumerator name='NVLIST_FMT_BTWN_ARRAY' value='2'/>
+ </enum-decl>
+ <typedef-decl name='regex_t' type-id='type-id-125' filepath='/usr/include/regex.h' line='478' column='1' id='type-id-126'/>
+ <class-decl name='re_pattern_buffer' size-in-bits='512' is-struct='yes' visibility='default' filepath='/usr/include/regex.h' line='413' column='1' id='type-id-125'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='buffer' type-id='type-id-127' visibility='default' filepath='/usr/include/regex.h' line='417' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='allocated' type-id='type-id-128' visibility='default' filepath='/usr/include/regex.h' line='420' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='used' type-id='type-id-128' visibility='default' filepath='/usr/include/regex.h' line='423' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='syntax' type-id='type-id-129' visibility='default' filepath='/usr/include/regex.h' line='426' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='fastmap' type-id='type-id-45' visibility='default' filepath='/usr/include/regex.h' line='431' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='translate' type-id='type-id-130' visibility='default' filepath='/usr/include/regex.h' line='437' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='re_nsub' type-id='type-id-54' visibility='default' filepath='/usr/include/regex.h' line='440' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='31'>
+ <var-decl name='can_be_null' type-id='type-id-19' visibility='default' filepath='/usr/include/regex.h' line='446' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='29'>
+ <var-decl name='regs_allocated' type-id='type-id-19' visibility='default' filepath='/usr/include/regex.h' line='457' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='28'>
+ <var-decl name='fastmap_accurate' type-id='type-id-19' visibility='default' filepath='/usr/include/regex.h' line='461' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='27'>
+ <var-decl name='no_sub' type-id='type-id-19' visibility='default' filepath='/usr/include/regex.h' line='465' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='26'>
+ <var-decl name='not_bol' type-id='type-id-19' visibility='default' filepath='/usr/include/regex.h' line='469' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='25'>
+ <var-decl name='not_eol' type-id='type-id-19' visibility='default' filepath='/usr/include/regex.h' line='472' column='1'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='24'>
+ <var-decl name='newline_anchor' type-id='type-id-19' visibility='default' filepath='/usr/include/regex.h' line='475' column='1'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='__re_long_size_t' type-id='type-id-3' filepath='/usr/include/regex.h' line='56' column='1' id='type-id-128'/>
+ <typedef-decl name='reg_syntax_t' type-id='type-id-3' filepath='/usr/include/regex.h' line='72' column='1' id='type-id-129'/>
+ <pointer-type-def type-id='type-id-44' size-in-bits='64' id='type-id-39'/>
+ <pointer-type-def type-id='type-id-43' size-in-bits='64' id='type-id-47'/>
+ <pointer-type-def type-id='type-id-7' size-in-bits='64' id='type-id-51'/>
+ <pointer-type-def type-id='type-id-55' size-in-bits='64' id='type-id-49'/>
+ <pointer-type-def type-id='type-id-8' size-in-bits='64' id='type-id-46'/>
+ <pointer-type-def type-id='type-id-9' size-in-bits='64' id='type-id-52'/>
+ <pointer-type-def type-id='type-id-87' size-in-bits='64' id='type-id-131'/>
+ <pointer-type-def type-id='type-id-1' size-in-bits='64' id='type-id-45'/>
+ <pointer-type-def type-id='type-id-45' size-in-bits='64' id='type-id-132'/>
+ <qualified-type-def type-id='type-id-1' const='yes' id='type-id-133'/>
+ <pointer-type-def type-id='type-id-133' size-in-bits='64' id='type-id-41'/>
+ <pointer-type-def type-id='type-id-134' size-in-bits='64' id='type-id-111'/>
+ <pointer-type-def type-id='type-id-135' size-in-bits='64' id='type-id-107'/>
+ <pointer-type-def type-id='type-id-136' size-in-bits='64' id='type-id-122'/>
+ <pointer-type-def type-id='type-id-137' size-in-bits='64' id='type-id-106'/>
+ <pointer-type-def type-id='type-id-138' size-in-bits='64' id='type-id-84'/>
+ <pointer-type-def type-id='type-id-139' size-in-bits='64' id='type-id-116'/>
+ <pointer-type-def type-id='type-id-140' size-in-bits='64' id='type-id-118'/>
+ <pointer-type-def type-id='type-id-141' size-in-bits='64' id='type-id-120'/>
+ <pointer-type-def type-id='type-id-142' size-in-bits='64' id='type-id-114'/>
+ <pointer-type-def type-id='type-id-143' size-in-bits='64' id='type-id-110'/>
+ <pointer-type-def type-id='type-id-144' size-in-bits='64' id='type-id-123'/>
+ <pointer-type-def type-id='type-id-145' size-in-bits='64' id='type-id-85'/>
+ <pointer-type-def type-id='type-id-146' size-in-bits='64' id='type-id-108'/>
+ <pointer-type-def type-id='type-id-147' size-in-bits='64' id='type-id-96'/>
+ <pointer-type-def type-id='type-id-148' size-in-bits='64' id='type-id-100'/>
+ <pointer-type-def type-id='type-id-149' size-in-bits='64' id='type-id-102'/>
+ <pointer-type-def type-id='type-id-150' size-in-bits='64' id='type-id-90'/>
+ <pointer-type-def type-id='type-id-151' size-in-bits='64' id='type-id-88'/>
+ <pointer-type-def type-id='type-id-152' size-in-bits='64' id='type-id-97'/>
+ <pointer-type-def type-id='type-id-153' size-in-bits='64' id='type-id-101'/>
+ <pointer-type-def type-id='type-id-154' size-in-bits='64' id='type-id-105'/>
+ <pointer-type-def type-id='type-id-155' size-in-bits='64' id='type-id-93'/>
+ <pointer-type-def type-id='type-id-156' size-in-bits='64' id='type-id-113'/>
+ <pointer-type-def type-id='type-id-157' size-in-bits='64' id='type-id-117'/>
+ <pointer-type-def type-id='type-id-158' size-in-bits='64' id='type-id-119'/>
+ <pointer-type-def type-id='type-id-159' size-in-bits='64' id='type-id-121'/>
+ <pointer-type-def type-id='type-id-160' size-in-bits='64' id='type-id-115'/>
+ <pointer-type-def type-id='type-id-161' size-in-bits='64' id='type-id-162'/>
+ <pointer-type-def type-id='type-id-163' size-in-bits='64' id='type-id-164'/>
+ <pointer-type-def type-id='type-id-165' size-in-bits='64' id='type-id-166'/>
+ <pointer-type-def type-id='type-id-167' size-in-bits='64' id='type-id-168'/>
+ <pointer-type-def type-id='type-id-169' size-in-bits='64' id='type-id-170'/>
+ <pointer-type-def type-id='type-id-171' size-in-bits='64' id='type-id-172'/>
+ <pointer-type-def type-id='type-id-173' size-in-bits='64' id='type-id-174'/>
+ <pointer-type-def type-id='type-id-175' size-in-bits='64' id='type-id-176'/>
+ <pointer-type-def type-id='type-id-177' size-in-bits='64' id='type-id-178'/>
+ <pointer-type-def type-id='type-id-179' size-in-bits='64' id='type-id-180'/>
+ <pointer-type-def type-id='type-id-181' size-in-bits='64' id='type-id-182'/>
+ <pointer-type-def type-id='type-id-183' size-in-bits='64' id='type-id-184'/>
+ <pointer-type-def type-id='type-id-185' size-in-bits='64' id='type-id-186'/>
+ <pointer-type-def type-id='type-id-187' size-in-bits='64' id='type-id-188'/>
+ <pointer-type-def type-id='type-id-189' size-in-bits='64' id='type-id-190'/>
+ <pointer-type-def type-id='type-id-191' size-in-bits='64' id='type-id-192'/>
+ <pointer-type-def type-id='type-id-193' size-in-bits='64' id='type-id-194'/>
+ <pointer-type-def type-id='type-id-195' size-in-bits='64' id='type-id-196'/>
+ <pointer-type-def type-id='type-id-197' size-in-bits='64' id='type-id-198'/>
+ <pointer-type-def type-id='type-id-199' size-in-bits='64' id='type-id-200'/>
+ <pointer-type-def type-id='type-id-201' size-in-bits='64' id='type-id-202'/>
+ <pointer-type-def type-id='type-id-203' size-in-bits='64' id='type-id-204'/>
+ <pointer-type-def type-id='type-id-205' size-in-bits='64' id='type-id-206'/>
+ <pointer-type-def type-id='type-id-207' size-in-bits='64' id='type-id-208'/>
+ <pointer-type-def type-id='type-id-209' size-in-bits='64' id='type-id-210'/>
+ <pointer-type-def type-id='type-id-211' size-in-bits='64' id='type-id-212'/>
+ <pointer-type-def type-id='type-id-213' size-in-bits='64' id='type-id-214'/>
+ <pointer-type-def type-id='type-id-25' size-in-bits='64' id='type-id-215'/>
+ <pointer-type-def type-id='type-id-24' size-in-bits='64' id='type-id-216'/>
+ <pointer-type-def type-id='type-id-104' size-in-bits='64' id='type-id-217'/>
+ <pointer-type-def type-id='type-id-92' size-in-bits='64' id='type-id-218'/>
+ <pointer-type-def type-id='type-id-56' size-in-bits='64' id='type-id-42'/>
+ <pointer-type-def type-id='type-id-38' size-in-bits='64' id='type-id-36'/>
+ <pointer-type-def type-id='type-id-31' size-in-bits='64' id='type-id-219'/>
+ <pointer-type-def type-id='type-id-219' size-in-bits='64' id='type-id-220'/>
+ <pointer-type-def type-id='type-id-23' size-in-bits='64' id='type-id-221'/>
+ <pointer-type-def type-id='type-id-10' size-in-bits='64' id='type-id-127'/>
+ <pointer-type-def type-id='type-id-126' size-in-bits='64' id='type-id-222'/>
+ <pointer-type-def type-id='type-id-89' size-in-bits='64' id='type-id-223'/>
+ <pointer-type-def type-id='type-id-99' size-in-bits='64' id='type-id-224'/>
+ <pointer-type-def type-id='type-id-32' size-in-bits='64' id='type-id-225'/>
+ <pointer-type-def type-id='type-id-33' size-in-bits='64' id='type-id-226'/>
+ <pointer-type-def type-id='type-id-95' size-in-bits='64' id='type-id-227'/>
+ <pointer-type-def type-id='type-id-18' size-in-bits='64' id='type-id-130'/>
+ <pointer-type-def type-id='type-id-21' size-in-bits='64' id='type-id-53'/>
+ <function-decl name='nvpair_value_match' mangled-name='nvpair_value_match' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='1274' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_match'>
+ <parameter type-id='type-id-221' name='nvp' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='1274' column='1'/>
+ <parameter type-id='type-id-12' name='ai' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='1274' column='1'/>
+ <parameter type-id='type-id-45' name='value' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='1274' column='1'/>
+ <parameter type-id='type-id-132' name='ep' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='1274' column='1'/>
+ <return type-id='type-id-12'/>
+ </function-decl>
+ <function-decl name='dump_nvlist' mangled-name='dump_nvlist' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='794' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dump_nvlist'>
+ <parameter type-id='type-id-219' name='list' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='794' column='1'/>
+ <parameter type-id='type-id-12' name='indent' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='794' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prt' mangled-name='nvlist_prt' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='766' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prt'>
+ <parameter type-id='type-id-219' name='nvl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='766' column='1'/>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='766' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_print' mangled-name='nvlist_print' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='757' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_print'>
+ <parameter type-id='type-id-39' name='fp' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='757' column='1'/>
+ <parameter type-id='type-id-219' name='nvl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='757' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctl_free' mangled-name='nvlist_prtctl_free' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='546' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_free'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='546' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctl_alloc' mangled-name='nvlist_prtctl_alloc' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='527' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_alloc'>
+ <return type-id='type-id-37'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_nvlist_array' mangled-name='nvlist_prtctlop_nvlist_array' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='467' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_nvlist_array'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='467' column='1'/>
+ <parameter type-id='type-id-182' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='467' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='467' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_string_array' mangled-name='nvlist_prtctlop_string_array' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='466' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_string_array'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='466' column='1'/>
+ <parameter type-id='type-id-166' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='466' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='466' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_uint64_array' mangled-name='nvlist_prtctlop_uint64_array' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='465' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint64_array'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='465' column='1'/>
+ <parameter type-id='type-id-212' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='465' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='465' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_int64_array' mangled-name='nvlist_prtctlop_int64_array' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='464' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int64_array'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='464' column='1'/>
+ <parameter type-id='type-id-176' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='464' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='464' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_uint32_array' mangled-name='nvlist_prtctlop_uint32_array' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='463' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint32_array'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='463' column='1'/>
+ <parameter type-id='type-id-210' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='463' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='463' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_int32_array' mangled-name='nvlist_prtctlop_int32_array' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='462' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int32_array'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='462' column='1'/>
+ <parameter type-id='type-id-174' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='462' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='462' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_uint16_array' mangled-name='nvlist_prtctlop_uint16_array' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='461' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint16_array'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='461' column='1'/>
+ <parameter type-id='type-id-208' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='461' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='461' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_int16_array' mangled-name='nvlist_prtctlop_int16_array' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='460' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int16_array'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='460' column='1'/>
+ <parameter type-id='type-id-172' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='460' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='460' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_uint8_array' mangled-name='nvlist_prtctlop_uint8_array' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='459' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint8_array'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='459' column='1'/>
+ <parameter type-id='type-id-214' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='459' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='459' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_int8_array' mangled-name='nvlist_prtctlop_int8_array' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='458' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int8_array'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='458' column='1'/>
+ <parameter type-id='type-id-178' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='458' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='458' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_byte_array' mangled-name='nvlist_prtctlop_byte_array' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='457' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_byte_array'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='457' column='1'/>
+ <parameter type-id='type-id-206' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='457' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='457' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_boolean_array' mangled-name='nvlist_prtctlop_boolean_array' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='456' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_boolean_array'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='456' column='1'/>
+ <parameter type-id='type-id-162' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='456' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='456' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_nvlist' mangled-name='nvlist_prtctlop_nvlist' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='444' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_nvlist'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='444' column='1'/>
+ <parameter type-id='type-id-180' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='444' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='444' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_hrtime' mangled-name='nvlist_prtctlop_hrtime' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='443' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_hrtime'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='443' column='1'/>
+ <parameter type-id='type-id-186' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='443' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='443' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_string' mangled-name='nvlist_prtctlop_string' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='442' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_string'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='442' column='1'/>
+ <parameter type-id='type-id-164' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='442' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='442' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_double' mangled-name='nvlist_prtctlop_double' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='441' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_double'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='441' column='1'/>
+ <parameter type-id='type-id-168' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='441' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='441' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_uint64' mangled-name='nvlist_prtctlop_uint64' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='440' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint64'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='440' column='1'/>
+ <parameter type-id='type-id-202' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='440' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='440' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_int64' mangled-name='nvlist_prtctlop_int64' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='439' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int64'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='439' column='1'/>
+ <parameter type-id='type-id-192' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='439' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='439' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_uint32' mangled-name='nvlist_prtctlop_uint32' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='438' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint32'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='438' column='1'/>
+ <parameter type-id='type-id-200' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='438' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='438' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_int32' mangled-name='nvlist_prtctlop_int32' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='437' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int32'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='437' column='1'/>
+ <parameter type-id='type-id-190' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='437' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='437' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_uint16' mangled-name='nvlist_prtctlop_uint16' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='436' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint16'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='436' column='1'/>
+ <parameter type-id='type-id-198' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='436' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='436' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_int16' mangled-name='nvlist_prtctlop_int16' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='435' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int16'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='435' column='1'/>
+ <parameter type-id='type-id-188' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='435' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='435' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_uint8' mangled-name='nvlist_prtctlop_uint8' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='434' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint8'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='434' column='1'/>
+ <parameter type-id='type-id-204' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='434' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='434' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_int8' mangled-name='nvlist_prtctlop_int8' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='433' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int8'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='433' column='1'/>
+ <parameter type-id='type-id-194' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='433' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='433' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_byte' mangled-name='nvlist_prtctlop_byte' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='432' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_byte'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='432' column='1'/>
+ <parameter type-id='type-id-196' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='432' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='432' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_boolean_value' mangled-name='nvlist_prtctlop_boolean_value' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='431' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_boolean_value'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='431' column='1'/>
+ <parameter type-id='type-id-184' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='431' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='431' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctlop_boolean' mangled-name='nvlist_prtctlop_boolean' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='430' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_boolean'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='430' column='1'/>
+ <parameter type-id='type-id-170' name='func' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='430' column='1'/>
+ <parameter type-id='type-id-53' name='private' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='430' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctl_dofmt' mangled-name='nvlist_prtctl_dofmt' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='383' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_dofmt'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='383' column='1'/>
+ <parameter type-id='type-id-124' name='which' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='383' column='1'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctl_setfmt' mangled-name='nvlist_prtctl_setfmt' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='350' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_setfmt'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='350' column='1'/>
+ <parameter type-id='type-id-124' name='which' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='350' column='1'/>
+ <parameter type-id='type-id-41' name='fmt' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='351' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctl_doindent' mangled-name='nvlist_prtctl_doindent' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='343' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_doindent'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='343' column='1'/>
+ <parameter type-id='type-id-12' name='onemore' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='343' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctl_setindent' mangled-name='nvlist_prtctl_setindent' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='325' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_setindent'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='325' column='1'/>
+ <parameter type-id='type-id-40' name='mode' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='325' column='1'/>
+ <parameter type-id='type-id-12' name='start' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='326' column='1'/>
+ <parameter type-id='type-id-12' name='inc' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='326' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctl_getdest' mangled-name='nvlist_prtctl_getdest' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='318' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_getdest'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='318' column='1'/>
+ <return type-id='type-id-39'/>
+ </function-decl>
+ <function-decl name='nvlist_prtctl_setdest' mangled-name='nvlist_prtctl_setdest' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='312' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_setdest'>
+ <parameter type-id='type-id-37' name='pctl' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='312' column='1'/>
+ <parameter type-id='type-id-39' name='fp' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='312' column='1'/>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='nvpair_value_match_regex' mangled-name='nvpair_value_match_regex' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='949' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_match_regex'>
+ <parameter type-id='type-id-221' name='nvp' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='949' column='1'/>
+ <parameter type-id='type-id-12' name='ai' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='949' column='1'/>
+ <parameter type-id='type-id-45' name='value' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='950' column='1'/>
+ <parameter type-id='type-id-222' name='value_regex' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='950' column='1'/>
+ <parameter type-id='type-id-132' name='ep' filepath='/home/fedora/zfs/lib/libnvpair/libnvpair.c' line='950' column='1'/>
+ <return type-id='type-id-12'/>
+ </function-decl>
+ <function-decl name='__builtin_fputs' mangled-name='fputs' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='__builtin_strchr' mangled-name='strchr' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-decl name='__builtin_fputc' mangled-name='fputc' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='type-id-21'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='type-id-134'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-131'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-135'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-45'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-136'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-132'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-137'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-11'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-138'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-12'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-139'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-215'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-140'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-216'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-141'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-217'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-142'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-218'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-143'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-219'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-144'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-220'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-145'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-87'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-146'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-109'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-147'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-25'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-148'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-24'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-149'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-104'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-150'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-92'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-151'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-89'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-152'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-99'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-153'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-32'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-154'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-33'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-155'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-95'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-156'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-223'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-157'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-224'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-158'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-225'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-159'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-226'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-160'>
+ <parameter type-id='type-id-36'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-227'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-161'>
+ <parameter type-id='type-id-37'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-131'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-163'>
+ <parameter type-id='type-id-37'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-45'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-165'>
+ <parameter type-id='type-id-37'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-132'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-167'>
+ <parameter type-id='type-id-37'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-11'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-169'>
+ <parameter type-id='type-id-37'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-12'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-171'>
+ <parameter type-id='type-id-37'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-215'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-173'>
+ <parameter type-id='type-id-37'/>
+ <parameter type-id='type-id-53'/>
+ <parameter type-id='type-id-219'/>
+ <parameter type-id='type-id-41'/>
+ <parameter type-id='type-id-216'/>
+ <parameter type-id='type-id-112'/>
+ <return type-id='type-id-12'/>
+ </function-type>
+ <function-type size-in-bits='64' id='type-id-175'>
+ <parameter type-id='type-id-37'/>
+ <paramete