aboutsummaryrefslogtreecommitdiff
path: root/usr.sbin/makefs/tests
diff options
context:
space:
mode:
Diffstat (limited to 'usr.sbin/makefs/tests')
-rw-r--r--usr.sbin/makefs/tests/Makefile10
-rw-r--r--usr.sbin/makefs/tests/Makefile.depend1
-rw-r--r--usr.sbin/makefs/tests/makefs_cd9660_tests.sh120
-rw-r--r--usr.sbin/makefs/tests/makefs_ffs_tests.sh80
-rw-r--r--usr.sbin/makefs/tests/makefs_msdos_tests.sh136
-rw-r--r--usr.sbin/makefs/tests/makefs_tests_common.sh12
-rw-r--r--usr.sbin/makefs/tests/makefs_zfs_tests.sh1060
7 files changed, 1403 insertions, 16 deletions
diff --git a/usr.sbin/makefs/tests/Makefile b/usr.sbin/makefs/tests/Makefile
index 85e4b233aea7..748bafa06211 100644
--- a/usr.sbin/makefs/tests/Makefile
+++ b/usr.sbin/makefs/tests/Makefile
@@ -1,7 +1,13 @@
-# $FreeBSD$
+.include <src.opts.mk>
ATF_TESTS_SH+= makefs_cd9660_tests
+TEST_METADATA.makefs_cd9660_tests+= required_files="/sbin/mount_cd9660"
ATF_TESTS_SH+= makefs_ffs_tests
+ATF_TESTS_SH+= makefs_msdos_tests
+TEST_METADATA.makefs_msdos_tests+= required_files="/sbin/mount_msdosfs"
+.if ${MK_ZFS} != "no"
+ATF_TESTS_SH+= makefs_zfs_tests
+.endif
BINDIR= ${TESTSDIR}
@@ -9,8 +15,6 @@ BINDIR= ${TESTSDIR}
SCRIPTS+= makefs_tests_common.sh
SCRIPTSNAME_makefs_tests_common.sh= makefs_tests_common.sh
-TEST_METADATA.makefs_cd9660_tests+= required_files="/sbin/mount_cd9660"
-
.for t in ${ATF_TESTS_SH}
TEST_METADATA.$t+= required_user="root"
.endfor
diff --git a/usr.sbin/makefs/tests/Makefile.depend b/usr.sbin/makefs/tests/Makefile.depend
index f80275d86ab1..11aba52f82cf 100644
--- a/usr.sbin/makefs/tests/Makefile.depend
+++ b/usr.sbin/makefs/tests/Makefile.depend
@@ -1,4 +1,3 @@
-# $FreeBSD$
# Autogenerated - do NOT edit!
DIRDEPS = \
diff --git a/usr.sbin/makefs/tests/makefs_cd9660_tests.sh b/usr.sbin/makefs/tests/makefs_cd9660_tests.sh
index 8a3ac1684032..e058dfc57b7b 100644
--- a/usr.sbin/makefs/tests/makefs_cd9660_tests.sh
+++ b/usr.sbin/makefs/tests/makefs_cd9660_tests.sh
@@ -23,9 +23,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# $FreeBSD$
-#
# A note on specs:
# - A copy of the ISO-9660 spec can be found here:
@@ -54,8 +51,8 @@ common_cleanup()
check_base_iso9660_image_contents()
{
# Symlinks are treated like files when rockridge support isn't
- # specified
- check_image_contents "$@" -X c
+ # specified, and directories cannot contain a '.'.
+ check_image_contents "$@" -X c -X .g -X _g
atf_check -e empty -o empty -s exit:0 test -L $TEST_INPUTS_DIR/c
atf_check -e empty -o empty -s exit:0 test -f $TEST_MOUNT_DIR/c
@@ -377,6 +374,114 @@ o_flag_rockridge_dev_nodes_cleanup()
common_cleanup
}
+atf_test_case T_flag_dir cleanup
+T_flag_dir_body()
+{
+ timestamp=1742574909
+ check_cd9660_support
+ create_test_dirs
+
+ mkdir -p $TEST_INPUTS_DIR/dir1
+ atf_check -e empty -o empty -s exit:0 \
+ $MAKEFS -T $timestamp -o rockridge $TEST_IMAGE $TEST_INPUTS_DIR
+
+ mount_image
+ eval $(stat -s $TEST_MOUNT_DIR/dir1)
+ atf_check_equal $st_atime $timestamp
+ atf_check_equal $st_mtime $timestamp
+ atf_check_equal $st_ctime $timestamp
+}
+
+T_flag_dir_cleanup()
+{
+ common_cleanup
+}
+
+atf_test_case T_flag_F_flag cleanup
+T_flag_F_flag_body()
+{
+ atf_expect_fail "-F doesn't take precedence over -T"
+ timestamp_F=1742574909
+ timestamp_T=1742574910
+ create_test_dirs
+ mkdir -p $TEST_INPUTS_DIR/dir1
+
+ atf_check -e empty -o save:$TEST_SPEC_FILE -s exit:0 \
+ mtree -c -k "type,time" -p $TEST_INPUTS_DIR
+ change_mtree_timestamp $TEST_SPEC_FILE $timestamp_F
+ atf_check -e empty -o not-empty -s exit:0 \
+ $MAKEFS -F $TEST_SPEC_FILE -T $timestamp_T -o rockridge $TEST_IMAGE $TEST_INPUTS_DIR
+
+ mount_image
+ eval $(stat -s $TEST_MOUNT_DIR/dir1)
+ atf_check_equal $st_atime $timestamp_F
+ atf_check_equal $st_mtime $timestamp_F
+ atf_check_equal $st_ctime $timestamp_F
+}
+
+T_flag_F_flag_cleanup()
+{
+ common_cleanup
+}
+
+atf_test_case T_flag_mtree cleanup
+T_flag_mtree_body()
+{
+ timestamp=1742574909
+ create_test_dirs
+ mkdir -p $TEST_INPUTS_DIR/dir1
+
+ atf_check -e empty -o save:$TEST_SPEC_FILE -s exit:0 \
+ mtree -c -k "type" -p $TEST_INPUTS_DIR
+ atf_check -e empty -o empty -s exit:0 \
+ $MAKEFS -T $timestamp -o rockridge $TEST_IMAGE $TEST_SPEC_FILE
+
+ check_cd9660_support
+ mount_image
+ eval $(stat -s $TEST_MOUNT_DIR/dir1)
+ atf_check_equal $st_atime $timestamp
+ atf_check_equal $st_mtime $timestamp
+ atf_check_equal $st_ctime $timestamp
+}
+
+T_flag_mtree_cleanup()
+{
+ common_cleanup
+}
+
+atf_test_case duplicate_names cleanup
+duplicate_names_head()
+{
+ atf_set "descr" "Ensure shortened directory names are unique (PR283238)"
+}
+duplicate_names_body()
+{
+ check_cd9660_support
+ create_test_dirs
+
+ # Create three directories which are identical in the first 31 characters.
+ dir_prefix="this_directory_name_is_31_chars"
+ mkdir -p $TEST_INPUTS_DIR/${dir_prefix}1
+ mkdir -p $TEST_INPUTS_DIR/${dir_prefix}2
+ mkdir -p $TEST_INPUTS_DIR/${dir_prefix}3
+
+ atf_check -e empty -o empty -s exit:0 \
+ $MAKEFS -o rockridge $TEST_IMAGE $TEST_INPUTS_DIR
+
+ # Disable Rock Ridge extensions to read the plain ISO Level 2 names.
+ mount_image -r
+
+ # The specific way the short names are made unique is not important.
+ # We verify only that there are three unique names and that the unique
+ # part is at the end of the name.
+ atf_check_equal $(ls -1 $TEST_MOUNT_DIR | sort | uniq | wc -l) 3
+ atf_check_equal $(ls -1 $TEST_MOUNT_DIR | cut -c -29 | sort | uniq | wc -l) 1
+}
+duplicate_names_cleanup()
+{
+ common_cleanup
+}
+
atf_init_test_cases()
{
atf_add_test_case D_flag
@@ -395,4 +500,9 @@ atf_init_test_cases()
atf_add_test_case o_flag_publisher
atf_add_test_case o_flag_rockridge
atf_add_test_case o_flag_rockridge_dev_nodes
+ atf_add_test_case T_flag_dir
+ atf_add_test_case T_flag_F_flag
+ atf_add_test_case T_flag_mtree
+
+ atf_add_test_case duplicate_names
}
diff --git a/usr.sbin/makefs/tests/makefs_ffs_tests.sh b/usr.sbin/makefs/tests/makefs_ffs_tests.sh
index 1a415cb5f518..f828f632b06e 100644
--- a/usr.sbin/makefs/tests/makefs_ffs_tests.sh
+++ b/usr.sbin/makefs/tests/makefs_ffs_tests.sh
@@ -23,9 +23,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# $FreeBSD$
-#
MAKEFS="makefs -t ffs"
MOUNT="mount"
@@ -244,6 +241,80 @@ o_flag_version_2_cleanup()
common_cleanup
}
+
+atf_test_case T_flag_dir cleanup
+T_flag_dir_body()
+{
+ timestamp=1742574909
+ create_test_dirs
+
+ mkdir -p $TEST_INPUTS_DIR/dir1
+ atf_check -e empty -o not-empty -s exit:0 \
+ $MAKEFS -M 1m -T $timestamp $TEST_IMAGE $TEST_INPUTS_DIR
+
+ mount_image
+ eval $(stat -s $TEST_MOUNT_DIR/dir1)
+ atf_check_equal $st_atime $timestamp
+ atf_check_equal $st_mtime $timestamp
+ atf_check_equal $st_ctime $timestamp
+}
+
+T_flag_dir_cleanup()
+{
+ common_cleanup
+}
+
+atf_test_case T_flag_F_flag cleanup
+T_flag_F_flag_body()
+{
+ atf_expect_fail "-F doesn't take precedence over -T"
+ timestamp_F=1742574909
+ timestamp_T=1742574910
+ create_test_dirs
+ mkdir -p $TEST_INPUTS_DIR/dir1
+
+ atf_check -e empty -o save:$TEST_SPEC_FILE -s exit:0 \
+ mtree -c -k "type,time" -p $TEST_INPUTS_DIR
+ change_mtree_timestamp $TEST_SPEC_FILE $timestamp_F
+ atf_check -e empty -o not-empty -s exit:0 \
+ $MAKEFS -F $TEST_SPEC_FILE -T $timestamp_T -M 1m $TEST_IMAGE $TEST_INPUTS_DIR
+
+ mount_image
+ eval $(stat -s $TEST_MOUNT_DIR/dir1)
+ atf_check_equal $st_atime $timestamp_F
+ atf_check_equal $st_mtime $timestamp_F
+ atf_check_equal $st_ctime $timestamp_F
+}
+
+T_flag_F_flag_cleanup()
+{
+ common_cleanup
+}
+
+atf_test_case T_flag_mtree cleanup
+T_flag_mtree_body()
+{
+ timestamp=1742574909
+ create_test_dirs
+ mkdir -p $TEST_INPUTS_DIR/dir1
+
+ atf_check -e empty -o save:$TEST_SPEC_FILE -s exit:0 \
+ mtree -c -k "type" -p $TEST_INPUTS_DIR
+ atf_check -e empty -o not-empty -s exit:0 \
+ $MAKEFS -M 1m -T $timestamp $TEST_IMAGE $TEST_SPEC_FILE
+
+ mount_image
+ eval $(stat -s $TEST_MOUNT_DIR/dir1)
+ atf_check_equal $st_atime $timestamp
+ atf_check_equal $st_mtime $timestamp
+ atf_check_equal $st_ctime $timestamp
+}
+
+T_flag_mtree_cleanup()
+{
+ common_cleanup
+}
+
atf_init_test_cases()
{
@@ -258,4 +329,7 @@ atf_init_test_cases()
atf_add_test_case o_flag_version_1
atf_add_test_case o_flag_version_2
+ atf_add_test_case T_flag_dir
+ atf_add_test_case T_flag_F_flag
+ atf_add_test_case T_flag_mtree
}
diff --git a/usr.sbin/makefs/tests/makefs_msdos_tests.sh b/usr.sbin/makefs/tests/makefs_msdos_tests.sh
new file mode 100644
index 000000000000..fb94429b477b
--- /dev/null
+++ b/usr.sbin/makefs/tests/makefs_msdos_tests.sh
@@ -0,0 +1,136 @@
+#-
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (c) 2025 The FreeBSD Foundation
+#
+# This software was developed by Klara, Inc.
+# under sponsorship from the FreeBSD Foundation.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+
+MAKEFS="makefs -t msdos"
+MOUNT="mount_msdosfs"
+. "$(dirname "$0")/makefs_tests_common.sh"
+
+common_cleanup()
+{
+ if ! test_md_device=$(cat $TEST_MD_DEVICE_FILE); then
+ echo "$TEST_MD_DEVICE_FILE could not be opened; has an md(4) device been attached?"
+ return
+ fi
+
+ umount -f /dev/$test_md_device || :
+ mdconfig -d -u $test_md_device || :
+}
+
+check_msdosfs_support()
+{
+ kldstat -m msdosfs || \
+ atf_skip "Requires msdosfs filesystem support to be present in the kernel"
+}
+
+atf_test_case T_flag_dir cleanup
+T_flag_dir_body()
+{
+ atf_expect_fail \
+ "The msdos backend saves the wrong timestamp value" \
+ "(possibly due to the 2s resolution for FAT timestamp)"
+ timestamp=1742574909
+ check_msdosfs_support
+
+ create_test_dirs
+ mkdir -p $TEST_INPUTS_DIR/dir1
+ atf_check -e empty -o not-empty -s exit:0 \
+ $MAKEFS -T $timestamp -s 1m $TEST_IMAGE $TEST_INPUTS_DIR
+
+ mount_image
+ eval $(stat -s $TEST_MOUNT_DIR/dir1)
+ atf_check_equal $st_atime $timestamp
+ atf_check_equal $st_mtime $timestamp
+ atf_check_equal $st_ctime $timestamp
+}
+
+T_flag_dir_cleanup()
+{
+ common_cleanup
+}
+
+atf_test_case T_flag_F_flag cleanup
+T_flag_F_flag_body()
+{
+ atf_expect_fail "-F doesn't take precedence over -T"
+ timestamp_F=1742574909
+ timestamp_T=1742574910
+ create_test_dirs
+ mkdir -p $TEST_INPUTS_DIR/dir1
+
+ atf_check -e empty -o save:$TEST_SPEC_FILE -s exit:0 \
+ mtree -c -k "type,time" -p $TEST_INPUTS_DIR
+ change_mtree_timestamp $TEST_SPEC_FILE $timestamp_F
+ atf_check -e empty -o not-empty -s exit:0 \
+ $MAKEFS -F $TEST_SPEC_FILE -T $timestamp_T -s 1m $TEST_IMAGE $TEST_INPUTS_DIR
+
+ mount_image
+ eval $(stat -s $TEST_MOUNT_DIR/dir1)
+ atf_check_equal $st_atime $timestamp_F
+ atf_check_equal $st_mtime $timestamp_F
+ atf_check_equal $st_ctime $timestamp_F
+}
+
+T_flag_F_flag_cleanup()
+{
+ common_cleanup
+}
+
+atf_test_case T_flag_mtree cleanup
+T_flag_mtree_body()
+{
+ timestamp=1742574908 # Even value, timestamp precision is 2s.
+ check_msdosfs_support
+
+ create_test_dirs
+ mkdir -p $TEST_INPUTS_DIR/dir1
+ atf_check -e empty -o save:$TEST_SPEC_FILE -s exit:0 \
+ mtree -c -k "type" -p $TEST_INPUTS_DIR
+ atf_check -e empty -o not-empty -s exit:0 \
+ $MAKEFS -T $timestamp -s 1m $TEST_IMAGE $TEST_SPEC_FILE
+
+ mount_image
+ eval $(stat -s $TEST_MOUNT_DIR/dir1)
+ # FAT directory entries don't have an access time, just a date.
+ #atf_check_equal $st_atime $timestamp
+ atf_check_equal $st_mtime $timestamp
+ atf_check_equal $st_ctime $timestamp
+}
+
+T_flag_mtree_cleanup()
+{
+ common_cleanup
+}
+
+atf_init_test_cases()
+{
+ atf_add_test_case T_flag_dir
+ atf_add_test_case T_flag_F_flag
+ atf_add_test_case T_flag_mtree
+}
diff --git a/usr.sbin/makefs/tests/makefs_tests_common.sh b/usr.sbin/makefs/tests/makefs_tests_common.sh
index 5eb4ee5bf9f5..edb79bc811e1 100644
--- a/usr.sbin/makefs/tests/makefs_tests_common.sh
+++ b/usr.sbin/makefs/tests/makefs_tests_common.sh
@@ -23,9 +23,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# $FreeBSD$
-#
KB=1024
: ${TMPDIR=/tmp}
@@ -141,6 +138,13 @@ mount_image()
atf_check -e empty -o save:$TEST_MD_DEVICE_FILE -s exit:0 \
mdconfig -a -f $TEST_IMAGE
atf_check -e empty -o empty -s exit:0 \
- $MOUNT /dev/$(cat $TEST_MD_DEVICE_FILE) $TEST_MOUNT_DIR
+ $MOUNT ${1} /dev/$(cat $TEST_MD_DEVICE_FILE) $TEST_MOUNT_DIR
}
+change_mtree_timestamp()
+{
+ filename="$1"
+ timestamp="$2"
+
+ sed -i "" "s/time=.*$/time=${timestamp}.0/g" "$filename"
+}
diff --git a/usr.sbin/makefs/tests/makefs_zfs_tests.sh b/usr.sbin/makefs/tests/makefs_zfs_tests.sh
new file mode 100644
index 000000000000..2fafce85b347
--- /dev/null
+++ b/usr.sbin/makefs/tests/makefs_zfs_tests.sh
@@ -0,0 +1,1060 @@
+#-
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (c) 2022-2023 The FreeBSD Foundation
+#
+# This software was developed by Mark Johnston under sponsorship from
+# the FreeBSD Foundation.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+
+MAKEFS="makefs -t zfs -o verify-txgs=true -o poolguid=$$"
+ZFS_POOL_NAME="makefstest$$"
+TEST_ZFS_POOL_NAME="$TMPDIR/poolname"
+
+. "$(dirname "$0")/makefs_tests_common.sh"
+
+common_cleanup()
+{
+ local pool md
+
+ # Try to force a TXG, this can help catch bugs by triggering a panic.
+ sync
+
+ pool=$(cat $TEST_ZFS_POOL_NAME)
+ if zpool list "$pool" >/dev/null; then
+ zpool destroy "$pool"
+ fi
+
+ md=$(cat $TEST_MD_DEVICE_FILE)
+ if [ -c /dev/"$md" ]; then
+ mdconfig -d -u "$md"
+ fi
+}
+
+import_image()
+{
+ atf_check -e empty -o save:$TEST_MD_DEVICE_FILE -s exit:0 \
+ mdconfig -a -f $TEST_IMAGE
+ atf_check -o ignore -e empty -s exit:0 \
+ zdb -e -p /dev/$(cat $TEST_MD_DEVICE_FILE) -mmm -ddddd $ZFS_POOL_NAME
+ atf_check zpool import -R $TEST_MOUNT_DIR $ZFS_POOL_NAME
+ echo "$ZFS_POOL_NAME" > $TEST_ZFS_POOL_NAME
+}
+
+#
+# Test autoexpansion of the vdev.
+#
+# The pool is initially 10GB, so we get 10GB minus one metaslab's worth of
+# usable space for data. Then the pool is expanded to 50GB, and the amount of
+# usable space is 50GB minus one metaslab.
+#
+atf_test_case autoexpand cleanup
+autoexpand_body()
+{
+ local mssize poolsize poolsize1 newpoolsize
+
+ create_test_inputs
+
+ mssize=$((128 * 1024 * 1024))
+ poolsize=$((10 * 1024 * 1024 * 1024))
+ atf_check $MAKEFS -s $poolsize -o mssize=$mssize -o rootpath=/ \
+ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ newpoolsize=$((50 * 1024 * 1024 * 1024))
+ truncate -s $newpoolsize $TEST_IMAGE
+
+ import_image
+
+ check_image_contents
+
+ poolsize1=$(zpool list -Hp -o size $ZFS_POOL_NAME)
+ atf_check [ $((poolsize1 + $mssize)) -eq $poolsize ]
+
+ atf_check zpool online -e $ZFS_POOL_NAME /dev/$(cat $TEST_MD_DEVICE_FILE)
+
+ check_image_contents
+
+ poolsize1=$(zpool list -Hp -o size $ZFS_POOL_NAME)
+ atf_check [ $((poolsize1 + $mssize)) -eq $newpoolsize ]
+}
+autoexpand_cleanup()
+{
+ common_cleanup
+}
+
+#
+# Test with some default layout defined by the common code.
+#
+atf_test_case basic cleanup
+basic_body()
+{
+ create_test_inputs
+
+ atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ check_image_contents
+}
+basic_cleanup()
+{
+ common_cleanup
+}
+
+#
+# Try configuring various compression algorithms.
+#
+atf_test_case compression cleanup
+compression_body()
+{
+ create_test_inputs
+
+ cd $TEST_INPUTS_DIR
+ mkdir dir
+ mkdir dir2
+ cd -
+
+ for alg in off on lzjb gzip gzip-1 gzip-2 gzip-3 gzip-4 \
+ gzip-5 gzip-6 gzip-7 gzip-8 gzip-9 zle lz4 zstd; do
+ atf_check $MAKEFS -s 1g -o rootpath=/ \
+ -o poolname=$ZFS_POOL_NAME \
+ -o fs=${ZFS_POOL_NAME}\;compression=$alg \
+ -o fs=${ZFS_POOL_NAME}/dir \
+ -o fs=${ZFS_POOL_NAME}/dir2\;compression=off \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ check_image_contents
+
+ if [ $alg = gzip-6 ]; then
+ # ZFS reports gzip-6 as just gzip since it uses
+ # a default compression level of 6.
+ alg=gzip
+ fi
+ # The "dir" dataset's compression algorithm should be
+ # inherited from the root dataset.
+ atf_check -o inline:$alg\\n -e empty -s exit:0 \
+ zfs get -H -o value compression ${ZFS_POOL_NAME}
+ atf_check -o inline:$alg\\n -e empty -s exit:0 \
+ zfs get -H -o value compression ${ZFS_POOL_NAME}/dir
+ atf_check -o inline:off\\n -e empty -s exit:0 \
+ zfs get -H -o value compression ${ZFS_POOL_NAME}/dir2
+
+ atf_check -e ignore dd if=/dev/random \
+ of=${TEST_MOUNT_DIR}/dir/random bs=1M count=10
+ atf_check -e ignore dd if=/dev/zero \
+ of=${TEST_MOUNT_DIR}/dir/zero bs=1M count=10
+ atf_check -e ignore dd if=/dev/zero \
+ of=${TEST_MOUNT_DIR}/dir2/zero bs=1M count=10
+
+ # Export and reimport to ensure that everything is
+ # flushed to disk.
+ atf_check zpool export ${ZFS_POOL_NAME}
+ atf_check -o ignore -e empty -s exit:0 \
+ zdb -e -p /dev/$(cat $TEST_MD_DEVICE_FILE) -mmm -ddddd \
+ $ZFS_POOL_NAME
+ atf_check zpool import -R $TEST_MOUNT_DIR $ZFS_POOL_NAME
+
+ if [ $alg = off ]; then
+ # If compression is off, the files should be the
+ # same size as the input.
+ atf_check -o match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir/random" \
+ du -m ${TEST_MOUNT_DIR}/dir/random
+ atf_check -o match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir/zero" \
+ du -m ${TEST_MOUNT_DIR}/dir/zero
+ atf_check -o match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir2/zero" \
+ du -m ${TEST_MOUNT_DIR}/dir2/zero
+ else
+ # If compression is on, the dir/zero file ought
+ # to be smaller.
+ atf_check -o match:"^1[[:space:]]+${TEST_MOUNT_DIR}/dir/zero" \
+ du -m ${TEST_MOUNT_DIR}/dir/zero
+ atf_check -o match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir/random" \
+ du -m ${TEST_MOUNT_DIR}/dir/random
+ atf_check -o match:"^11[[:space:]]+${TEST_MOUNT_DIR}/dir2/zero" \
+ du -m ${TEST_MOUNT_DIR}/dir2/zero
+ fi
+
+ atf_check zpool destroy ${ZFS_POOL_NAME}
+ atf_check rm -f ${TEST_ZFS_POOL_NAME}
+ atf_check mdconfig -d -u $(cat ${TEST_MD_DEVICE_FILE})
+ atf_check rm -f ${TEST_MD_DEVICE_FILE}
+ done
+}
+compression_cleanup()
+{
+ common_cleanup
+}
+
+#
+# Try destroying a dataset that was created by makefs.
+#
+atf_test_case dataset_removal cleanup
+dataset_removal_body()
+{
+ create_test_dirs
+
+ cd $TEST_INPUTS_DIR
+ mkdir dir
+ cd -
+
+ atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ -o fs=${ZFS_POOL_NAME}/dir \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ check_image_contents
+
+ atf_check zfs destroy ${ZFS_POOL_NAME}/dir
+}
+dataset_removal_cleanup()
+{
+ common_cleanup
+}
+
+#
+# Make sure that we can handle some special file types. Anything other than
+# regular files, symlinks and directories are ignored.
+#
+atf_test_case devfs cleanup
+devfs_body()
+{
+ atf_check mkdir dev
+ atf_check mount -t devfs none ./dev
+
+ atf_check -e match:"skipping unhandled" $MAKEFS -s 1g -o rootpath=/ \
+ -o poolname=$ZFS_POOL_NAME $TEST_IMAGE ./dev
+
+ import_image
+}
+devfs_cleanup()
+{
+ common_cleanup
+ umount -f ./dev
+}
+
+#
+# Make sure that we can create and remove an empty directory.
+#
+atf_test_case empty_dir cleanup
+empty_dir_body()
+{
+ create_test_dirs
+
+ cd $TEST_INPUTS_DIR
+ mkdir dir
+ cd -
+
+ atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ check_image_contents
+
+ atf_check rmdir ${TEST_MOUNT_DIR}/dir
+}
+empty_dir_cleanup()
+{
+ common_cleanup
+}
+
+atf_test_case empty_fs cleanup
+empty_fs_body()
+{
+ create_test_dirs
+
+ atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ check_image_contents
+}
+empty_fs_cleanup()
+{
+ common_cleanup
+}
+
+atf_test_case file_extend cleanup
+file_extend_body()
+{
+ local i start
+
+ create_test_dirs
+
+ # Create a file slightly longer than the maximum block size.
+ start=132
+ dd if=/dev/random of=${TEST_INPUTS_DIR}/foo bs=1k count=$start
+ md5 -q ${TEST_INPUTS_DIR}/foo > foo.md5
+
+ atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ check_image_contents
+
+ i=0
+ while [ $i -lt 1000 ]; do
+ dd if=/dev/random of=${TEST_MOUNT_DIR}/foo bs=1k count=1 \
+ seek=$(($i + $start)) conv=notrunc
+ # Make sure that the first $start blocks are unmodified.
+ dd if=${TEST_MOUNT_DIR}/foo bs=1k count=$start of=foo.copy
+ atf_check -o file:foo.md5 md5 -q foo.copy
+ i=$(($i + 1))
+ done
+}
+file_extend_cleanup()
+{
+ common_cleanup
+}
+
+atf_test_case file_sizes cleanup
+file_sizes_body()
+{
+ local i
+
+ create_test_dirs
+ cd $TEST_INPUTS_DIR
+
+ i=1
+ while [ $i -lt $((1 << 20)) ]; do
+ truncate -s $i ${i}.1
+ truncate -s $(($i - 1)) ${i}.2
+ truncate -s $(($i + 1)) ${i}.3
+ i=$(($i << 1))
+ done
+
+ cd -
+
+ # XXXMJ this creates sparse files, make sure makefs doesn't
+ # preserve the sparseness.
+ # XXXMJ need to test with larger files (at least 128MB for L2 indirs)
+ atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ check_image_contents
+}
+file_sizes_cleanup()
+{
+ common_cleanup
+}
+
+atf_test_case hard_links cleanup
+hard_links_body()
+{
+ local f
+
+ create_test_dirs
+ cd $TEST_INPUTS_DIR
+
+ mkdir dir
+ echo "hello" > 1
+ ln 1 2
+ ln 1 dir/1
+
+ echo "goodbye" > dir/a
+ ln dir/a dir/b
+ ln dir/a a
+
+ cd -
+
+ atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ check_image_contents
+
+ stat -f '%i' ${TEST_MOUNT_DIR}/1 > ./ino
+ stat -f '%l' ${TEST_MOUNT_DIR}/1 > ./nlink
+ for f in 1 2 dir/1; do
+ atf_check -o file:./nlink -e empty -s exit:0 \
+ stat -f '%l' ${TEST_MOUNT_DIR}/${f}
+ atf_check -o file:./ino -e empty -s exit:0 \
+ stat -f '%i' ${TEST_MOUNT_DIR}/${f}
+ atf_check cmp -s ${TEST_INPUTS_DIR}/1 ${TEST_MOUNT_DIR}/${f}
+ done
+
+ stat -f '%i' ${TEST_MOUNT_DIR}/dir/a > ./ino
+ stat -f '%l' ${TEST_MOUNT_DIR}/dir/a > ./nlink
+ for f in dir/a dir/b a; do
+ atf_check -o file:./nlink -e empty -s exit:0 \
+ stat -f '%l' ${TEST_MOUNT_DIR}/${f}
+ atf_check -o file:./ino -e empty -s exit:0 \
+ stat -f '%i' ${TEST_MOUNT_DIR}/${f}
+ atf_check cmp -s ${TEST_INPUTS_DIR}/dir/a ${TEST_MOUNT_DIR}/${f}
+ done
+}
+hard_links_cleanup()
+{
+ common_cleanup
+}
+
+# Allocate enough dnodes from an object set that the meta dnode needs to use
+# indirect blocks.
+atf_test_case indirect_dnode_array cleanup
+indirect_dnode_array_body()
+{
+ local count i
+
+ # How many dnodes do we need to allocate? Well, the data block size
+ # for meta dnodes is always 16KB, so with a dnode size of 512B we get
+ # 32 dnodes per direct block. The maximum indirect block size is 128KB
+ # and that can fit 1024 block pointers, so we need at least 32 * 1024
+ # files to force the use of two levels of indirection.
+ #
+ # Unfortunately that number of files makes the test run quite slowly,
+ # so we settle for a single indirect block for now...
+ count=$(jot -r 1 32 1024)
+
+ create_test_dirs
+ cd $TEST_INPUTS_DIR
+ for i in $(seq 1 $count); do
+ touch $i
+ done
+ cd -
+
+ atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ check_image_contents
+}
+indirect_dnode_array_cleanup()
+{
+ common_cleanup
+}
+
+#
+# Create some files with long names, so as to test fat ZAP handling.
+#
+atf_test_case long_file_name cleanup
+long_file_name_body()
+{
+ local dir i
+
+ create_test_dirs
+ cd $TEST_INPUTS_DIR
+
+ # micro ZAP keys can be at most 50 bytes.
+ for i in $(seq 1 60); do
+ touch $(jot -s '' $i 1 1)
+ done
+ dir=$(jot -s '' 61 1 1)
+ mkdir $dir
+ for i in $(seq 1 60); do
+ touch ${dir}/$(jot -s '' $i 1 1)
+ done
+
+ cd -
+
+ atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ check_image_contents
+
+ # Add a directory entry in the hope that OpenZFS might catch a bug
+ # in makefs' fat ZAP encoding.
+ touch ${TEST_MOUNT_DIR}/foo
+}
+long_file_name_cleanup()
+{
+ common_cleanup
+}
+
+#
+# Exercise handling of multiple datasets.
+#
+atf_test_case multi_dataset_1 cleanup
+multi_dataset_1_body()
+{
+ create_test_dirs
+ cd $TEST_INPUTS_DIR
+
+ mkdir dir1
+ echo a > dir1/a
+ mkdir dir2
+ echo b > dir2/b
+
+ cd -
+
+ atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ -o fs=${ZFS_POOL_NAME}/dir1 -o fs=${ZFS_POOL_NAME}/dir2 \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ check_image_contents
+
+ # Make sure that we have three datasets with the expected mount points.
+ atf_check -o inline:${ZFS_POOL_NAME}\\n -e empty -s exit:0 \
+ zfs list -H -o name ${ZFS_POOL_NAME}
+ atf_check -o inline:${TEST_MOUNT_DIR}\\n -e empty -s exit:0 \
+ zfs list -H -o mountpoint ${ZFS_POOL_NAME}
+
+ atf_check -o inline:${ZFS_POOL_NAME}/dir1\\n -e empty -s exit:0 \
+ zfs list -H -o name ${ZFS_POOL_NAME}/dir1
+ atf_check -o inline:${TEST_MOUNT_DIR}/dir1\\n -e empty -s exit:0 \
+ zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
+
+ atf_check -o inline:${ZFS_POOL_NAME}/dir2\\n -e empty -s exit:0 \
+ zfs list -H -o name ${ZFS_POOL_NAME}/dir2
+ atf_check -o inline:${TEST_MOUNT_DIR}/dir2\\n -e empty -s exit:0 \
+ zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir2
+}
+multi_dataset_1_cleanup()
+{
+ common_cleanup
+}
+
+#
+# Create a pool with two datasets, where the root dataset is mounted below
+# the child dataset.
+#
+atf_test_case multi_dataset_2 cleanup
+multi_dataset_2_body()
+{
+ create_test_dirs
+ cd $TEST_INPUTS_DIR
+
+ mkdir dir1
+ echo a > dir1/a
+ mkdir dir2
+ echo b > dir2/b
+
+ cd -
+
+ atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ -o fs=${ZFS_POOL_NAME}/dir1\;mountpoint=/ \
+ -o fs=${ZFS_POOL_NAME}\;mountpoint=/dir1 \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ check_image_contents
+}
+multi_dataset_2_cleanup()
+{
+ common_cleanup
+}
+
+#
+# Create a dataset with a non-existent mount point.
+#
+atf_test_case multi_dataset_3 cleanup
+multi_dataset_3_body()
+{
+ create_test_dirs
+ cd $TEST_INPUTS_DIR
+
+ mkdir dir1
+ echo a > dir1/a
+
+ cd -
+
+ atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ -o fs=${ZFS_POOL_NAME}/dir1 \
+ -o fs=${ZFS_POOL_NAME}/dir2 \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ atf_check -o inline:${TEST_MOUNT_DIR}/dir2\\n -e empty -s exit:0 \
+ zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir2
+
+ # Mounting dir2 should have created a directory called dir2. Go
+ # back and create it in the staging tree before comparing.
+ atf_check mkdir ${TEST_INPUTS_DIR}/dir2
+
+ check_image_contents
+}
+multi_dataset_3_cleanup()
+{
+ common_cleanup
+}
+
+#
+# Create an unmounted dataset.
+#
+atf_test_case multi_dataset_4 cleanup
+multi_dataset_4_body()
+{
+ create_test_dirs
+ cd $TEST_INPUTS_DIR
+
+ mkdir dir1
+ echo a > dir1/a
+
+ cd -
+
+ atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ -o fs=${ZFS_POOL_NAME}/dir1\;canmount=noauto\;mountpoint=none \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ atf_check -o inline:none\\n -e empty -s exit:0 \
+ zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
+
+ check_image_contents
+
+ atf_check zfs set mountpoint=/dir1 ${ZFS_POOL_NAME}/dir1
+ atf_check zfs mount ${ZFS_POOL_NAME}/dir1
+ atf_check -o inline:${TEST_MOUNT_DIR}/dir1\\n -e empty -s exit:0 \
+ zfs list -H -o mountpoint ${ZFS_POOL_NAME}/dir1
+
+ # dir1/a should be part of the root dataset, not dir1.
+ atf_check -s not-exit:0 -e not-empty stat ${TEST_MOUNT_DIR}dir1/a
+}
+multi_dataset_4_cleanup()
+{
+ common_cleanup
+}
+
+#
+# Validate handling of multiple staging directories.
+#
+atf_test_case multi_staging_1 cleanup
+multi_staging_1_body()
+{
+ local tmpdir
+
+ create_test_dirs
+ cd $TEST_INPUTS_DIR
+
+ mkdir dir1
+ echo a > a
+ echo a > dir1/a
+ echo z > z
+
+ cd -
+
+ tmpdir=$(mktemp -d)
+ cd $tmpdir
+
+ mkdir dir2 dir2/dir3
+ echo b > dir2/b
+ echo c > dir2/dir3/c
+ ln -s dir2/dir3c s
+
+ cd -
+
+ atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE ${TEST_INPUTS_DIR} $tmpdir
+
+ import_image
+
+ check_image_contents -d $tmpdir
+}
+multi_staging_1_cleanup()
+{
+ common_cleanup
+}
+
+atf_test_case multi_staging_2 cleanup
+multi_staging_2_body()
+{
+ local tmpdir
+
+ create_test_dirs
+ cd $TEST_INPUTS_DIR
+
+ mkdir dir
+ echo a > dir/foo
+ echo b > dir/bar
+
+ cd -
+
+ tmpdir=$(mktemp -d)
+ cd $tmpdir
+
+ mkdir dir
+ echo c > dir/baz
+
+ cd -
+
+ atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE ${TEST_INPUTS_DIR} $tmpdir
+
+ import_image
+
+ # check_image_contents can't easily handle merged directories, so
+ # just check that the merged directory contains the files we expect.
+ atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/foo
+ atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/bar
+ atf_check -o not-empty stat ${TEST_MOUNT_DIR}/dir/baz
+
+ if [ "$(ls ${TEST_MOUNT_DIR}/dir | wc -l)" -ne 3 ]; then
+ atf_fail "Expected 3 files in ${TEST_MOUNT_DIR}/dir"
+ fi
+}
+multi_staging_2_cleanup()
+{
+ common_cleanup
+}
+
+#
+# Rudimentary test to verify that two ZFS images created using the same
+# parameters and input hierarchy are byte-identical. In particular, makefs(1)
+# does not preserve file access times.
+#
+atf_test_case reproducible cleanup
+reproducible_body()
+{
+ create_test_inputs
+
+ atf_check $MAKEFS -s 512m -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ ${TEST_IMAGE}.1 $TEST_INPUTS_DIR
+
+ atf_check $MAKEFS -s 512m -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ ${TEST_IMAGE}.2 $TEST_INPUTS_DIR
+
+ # XXX-MJ cmp(1) is really slow
+ atf_check cmp ${TEST_IMAGE}.1 ${TEST_IMAGE}.2
+}
+reproducible_cleanup()
+{
+}
+
+#
+# Verify that we can take a snapshot of a generated dataset.
+#
+atf_test_case snapshot cleanup
+snapshot_body()
+{
+ create_test_dirs
+ cd $TEST_INPUTS_DIR
+
+ mkdir dir
+ echo "hello" > dir/hello
+ echo "goodbye" > goodbye
+
+ cd -
+
+ atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ atf_check zfs snapshot ${ZFS_POOL_NAME}@1
+}
+snapshot_cleanup()
+{
+ common_cleanup
+}
+
+#
+# Check handling of symbolic links.
+#
+atf_test_case soft_links cleanup
+soft_links_body()
+{
+ create_test_dirs
+ cd $TEST_INPUTS_DIR
+
+ mkdir dir
+ ln -s a a
+ ln -s dir/../a a
+ ln -s dir/b b
+ echo 'c' > dir
+ ln -s dir/c c
+ # XXX-MJ overflows bonus buffer ln -s $(jot -s '' 320 1 1) 1
+
+ cd -
+
+ atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ check_image_contents
+}
+soft_links_cleanup()
+{
+ common_cleanup
+}
+
+#
+# Verify that we can set properties on the root dataset.
+#
+atf_test_case root_props cleanup
+root_props_body()
+{
+ create_test_inputs
+
+ atf_check $MAKEFS -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ -o fs=${ZFS_POOL_NAME}\;atime=off\;setuid=off \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ check_image_contents
+
+ atf_check -o inline:off\\n -e empty -s exit:0 \
+ zfs get -H -o value atime $ZFS_POOL_NAME
+ atf_check -o inline:local\\n -e empty -s exit:0 \
+ zfs get -H -o source atime $ZFS_POOL_NAME
+ atf_check -o inline:off\\n -e empty -s exit:0 \
+ zfs get -H -o value setuid $ZFS_POOL_NAME
+ atf_check -o inline:local\\n -e empty -s exit:0 \
+ zfs get -H -o source setuid $ZFS_POOL_NAME
+}
+root_props_cleanup()
+{
+ common_cleanup
+}
+
+#
+# Verify that usedds and usedchild props are set properly.
+#
+atf_test_case used_space_props cleanup
+used_space_props_body()
+{
+ local used usedds usedchild
+ local rootmb childmb totalmb fudge
+ local status
+
+ create_test_dirs
+ cd $TEST_INPUTS_DIR
+ mkdir dir
+
+ rootmb=17
+ childmb=39
+ totalmb=$(($rootmb + $childmb))
+ fudge=$((2 * 1024 * 1024))
+
+ atf_check -e ignore dd if=/dev/random of=foo bs=1M count=$rootmb
+ atf_check -e ignore dd if=/dev/random of=dir/bar bs=1M count=$childmb
+
+ cd -
+
+ atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ -o fs=${ZFS_POOL_NAME}/dir \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ # Make sure that each dataset's space usage is no more than 2MB larger
+ # than their files. This number is magic and might need to change
+ # someday.
+ usedds=$(zfs list -o usedds -Hp ${ZFS_POOL_NAME})
+ atf_check test $usedds -gt $(($rootmb * 1024 * 1024)) -a \
+ $usedds -le $(($rootmb * 1024 * 1024 + $fudge))
+ usedds=$(zfs list -o usedds -Hp ${ZFS_POOL_NAME}/dir)
+ atf_check test $usedds -gt $(($childmb * 1024 * 1024)) -a \
+ $usedds -le $(($childmb * 1024 * 1024 + $fudge))
+
+ # Make sure that the usedchild property value makes sense: the parent's
+ # value corresponds to the size of the child, and the child has no
+ # children.
+ usedchild=$(zfs list -o usedchild -Hp ${ZFS_POOL_NAME})
+ atf_check test $usedchild -gt $(($childmb * 1024 * 1024)) -a \
+ $usedchild -le $(($childmb * 1024 * 1024 + $fudge))
+ atf_check -o inline:'0\n' \
+ zfs list -Hp -o usedchild ${ZFS_POOL_NAME}/dir
+
+ # Make sure that the used property value makes sense: the parent's
+ # value is the sum of the two sizes, and the child's value is the
+ # same as its usedds value, which has already been checked.
+ used=$(zfs list -o used -Hp ${ZFS_POOL_NAME})
+ atf_check test $used -gt $(($totalmb * 1024 * 1024)) -a \
+ $used -le $(($totalmb * 1024 * 1024 + 2 * $fudge))
+ used=$(zfs list -o used -Hp ${ZFS_POOL_NAME}/dir)
+ atf_check -o inline:$used'\n' \
+ zfs list -Hp -o usedds ${ZFS_POOL_NAME}/dir
+
+ # Both datasets do not have snapshots.
+ atf_check -o inline:'0\n' zfs list -Hp -o usedsnap ${ZFS_POOL_NAME}
+ atf_check -o inline:'0\n' zfs list -Hp -o usedsnap ${ZFS_POOL_NAME}/dir
+}
+used_space_props_cleanup()
+{
+ common_cleanup
+}
+
+# Verify that file permissions are set properly. Make sure that non-executable
+# files can't be executed.
+atf_test_case perms cleanup
+perms_body()
+{
+ local mode
+
+ create_test_dirs
+ cd $TEST_INPUTS_DIR
+
+ for mode in $(seq 0 511); do
+ mode=$(printf "%04o\n" $mode)
+ echo 'echo a' > $mode
+ atf_check chmod $mode $mode
+ done
+
+ cd -
+
+ atf_check $MAKEFS -s 1g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+
+ check_image_contents
+
+ for mode in $(seq 0 511); do
+ mode=$(printf "%04o\n" $mode)
+ if [ $(($mode & 0111)) -eq 0 ]; then
+ atf_check -s not-exit:0 -e match:"Permission denied" \
+ ${TEST_INPUTS_DIR}/$mode
+ fi
+ if [ $(($mode & 0001)) -eq 0 ]; then
+ atf_check -s not-exit:0 -e match:"Permission denied" \
+ su -m tests -c ${TEST_INPUTS_DIR}/$mode
+ fi
+ done
+
+}
+perms_cleanup()
+{
+ common_cleanup
+}
+
+#
+# Verify that -T timestamps are honored.
+#
+atf_test_case T_flag_dir cleanup
+T_flag_dir_body()
+{
+ timestamp=1742574909
+ create_test_dirs
+ mkdir -p $TEST_INPUTS_DIR/dir1
+
+ atf_check $MAKEFS -T $timestamp -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE $TEST_INPUTS_DIR
+
+ import_image
+ eval $(stat -s $TEST_MOUNT_DIR/dir1)
+ atf_check_equal $st_atime $timestamp
+ atf_check_equal $st_mtime $timestamp
+ atf_check_equal $st_ctime $timestamp
+}
+
+T_flag_dir_cleanup()
+{
+ common_cleanup
+}
+
+atf_test_case T_flag_F_flag cleanup
+T_flag_F_flag_body()
+{
+ atf_expect_fail "-F doesn't take precedence over -T"
+ timestamp_F=1742574909
+ timestamp_T=1742574910
+ create_test_dirs
+ mkdir -p $TEST_INPUTS_DIR/dir1
+
+ atf_check -e empty -o save:$TEST_SPEC_FILE -s exit:0 \
+ mtree -c -k "type,time" -p $TEST_INPUTS_DIR
+ change_mtree_timestamp $TEST_SPEC_FILE $timestamp_F
+ atf_check -e empty -o not-empty -s exit:0 \
+ $MAKEFS -F $TEST_SPEC_FILE -T $timestamp_T -s 10g -o rootpath=/ \
+ -o poolname=$ZFS_POOL_NAME $TEST_IMAGE $TEST_INPUTS_DIR
+
+ mount_image
+ eval $(stat -s $TEST_MOUNT_DIR/dir1)
+ atf_check_equal $st_atime $timestamp_F
+ atf_check_equal $st_mtime $timestamp_F
+ atf_check_equal $st_ctime $timestamp_F
+}
+
+T_flag_F_flag_cleanup()
+{
+ common_cleanup
+}
+
+atf_test_case T_flag_mtree cleanup
+T_flag_mtree_body()
+{
+ timestamp=1742574909
+ create_test_dirs
+ mkdir -p $TEST_INPUTS_DIR/dir1
+
+ atf_check -e empty -o save:$TEST_SPEC_FILE -s exit:0 \
+ mtree -c -k "type" -p $TEST_INPUTS_DIR
+ atf_check $MAKEFS -T $timestamp -s 10g -o rootpath=/ -o poolname=$ZFS_POOL_NAME \
+ $TEST_IMAGE $TEST_SPEC_FILE
+
+ import_image
+ eval $(stat -s $TEST_MOUNT_DIR/dir1)
+ atf_check_equal $st_atime $timestamp
+ atf_check_equal $st_mtime $timestamp
+ atf_check_equal $st_ctime $timestamp
+}
+
+T_flag_mtree_cleanup()
+{
+ common_cleanup
+}
+
+atf_init_test_cases()
+{
+ atf_add_test_case autoexpand
+ atf_add_test_case basic
+ atf_add_test_case compression
+ atf_add_test_case dataset_removal
+ atf_add_test_case devfs
+ atf_add_test_case empty_dir
+ atf_add_test_case empty_fs
+ atf_add_test_case file_extend
+ atf_add_test_case file_sizes
+ atf_add_test_case hard_links
+ atf_add_test_case indirect_dnode_array
+ atf_add_test_case long_file_name
+ atf_add_test_case multi_dataset_1
+ atf_add_test_case multi_dataset_2
+ atf_add_test_case multi_dataset_3
+ atf_add_test_case multi_dataset_4
+ atf_add_test_case multi_staging_1
+ atf_add_test_case multi_staging_2
+ atf_add_test_case reproducible
+ atf_add_test_case snapshot
+ atf_add_test_case soft_links
+ atf_add_test_case root_props
+ atf_add_test_case used_space_props
+ atf_add_test_case perms
+ atf_add_test_case T_flag_dir
+ atf_add_test_case T_flag_F_flag
+ atf_add_test_case T_flag_mtree
+
+ # XXXMJ tests:
+ # - test with different ashifts (at least, 9 and 12), different image sizes
+ # - create datasets in imported pool
+}