diff options
Diffstat (limited to 'sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint')
23 files changed, 1734 insertions, 0 deletions
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/Makefile.am new file mode 100644 index 000000000000..cc1c1183db79 --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/Makefile.am @@ -0,0 +1,26 @@ +pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/pool_checkpoint +dist_pkgdata_SCRIPTS = \ + cleanup.ksh \ + setup.ksh \ + checkpoint_after_rewind.ksh \ + checkpoint_big_rewind.ksh \ + checkpoint_capacity.ksh \ + checkpoint_conf_change.ksh \ + checkpoint_discard_busy.ksh \ + checkpoint_discard.ksh \ + checkpoint_discard_many.ksh \ + checkpoint_indirect.ksh \ + checkpoint_invalid.ksh \ + checkpoint_lun_expsz.ksh \ + checkpoint_open.ksh \ + checkpoint_removal.ksh \ + checkpoint_rewind.ksh \ + checkpoint_ro_rewind.ksh \ + checkpoint_sm_scale.ksh \ + checkpoint_twice.ksh \ + checkpoint_vdev_add.ksh \ + checkpoint_zdb.ksh \ + checkpoint_zhack_feat.ksh + +dist_pkgdata_DATA = \ + pool_checkpoint.kshlib diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_after_rewind.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_after_rewind.ksh new file mode 100755 index 000000000000..c1dec30aa50d --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_after_rewind.ksh @@ -0,0 +1,55 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can checkpoint a pool that we just rewound. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Modify data (include at least one destructive change) +# 5. Rewind to checkpoint +# 6. Verify that the data before the checkpoint are present +# and the data after the checkpoint is gone +# 7. Take another checkpoint +# 8. Change state again +# 9. Verify the state at that time +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool +log_must zpool checkpoint $TESTPOOL +test_change_state_after_checkpoint + +log_must zpool export $TESTPOOL +log_must zpool import --rewind-to-checkpoint $TESTPOOL +test_verify_pre_checkpoint_state + +log_must zpool checkpoint $TESTPOOL +test_change_state_after_checkpoint + +test_verify_post_checkpoint_state + +log_pass "Checkpoint a pool that we just rewound." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_big_rewind.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_big_rewind.ksh new file mode 100755 index 000000000000..f915d2ad418c --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_big_rewind.ksh @@ -0,0 +1,57 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017, 2018 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Rewind to checkpoint on a stressed pool. We basically try to +# fragment the pool before and after taking a checkpoint and +# see if zdb finds any checksum or other errors that imply that +# blocks from the checkpoint have been reused. +# +# STRATEGY: +# 1. Import pool that's slightly fragmented +# 2. Take checkpoint +# 3. Apply a destructive action and do more random writes +# 4. Run zdb on both current and checkpointed data and make +# sure that zdb returns with no errors +# 5. Rewind to checkpoint +# 6. Run zdb again +# + +verify_runnable "global" + +setup_nested_pool_state +log_onexit cleanup_nested_pools + +log_must zpool checkpoint $NESTEDPOOL + +# +# Destroy one dataset, modify an existing one and create a +# a new one. Do more random writes in an attempt to raise +# more fragmentation. Then verify both current and checkpointed +# states. +# +fragment_after_checkpoint_and_verify + +log_must zpool export $NESTEDPOOL +log_must zpool import -d $FILEDISKDIR --rewind-to-checkpoint $NESTEDPOOL + +log_must zdb $NESTEDPOOL + +log_pass "Rewind to checkpoint on a stressed pool." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_capacity.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_capacity.ksh new file mode 100755 index 000000000000..15afc4adf4bc --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_capacity.ksh @@ -0,0 +1,92 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we don't reuse checkpointed blocks when the +# pool hits ENOSPC errors because of the slop space limit. +# This test also ensures that the DSL layer correctly takes +# into account the space used by the checkpoint when deciding +# whether to allow operations based on the reserved slop +# space. +# +# STRATEGY: +# 1. Create pool with one disk of 1G size +# 2. Create a file with random data of 700M in size. +# leaving ~200M left in pool capacity. +# 3. Checkpoint the pool +# 4. Remove the file. All of its blocks should stay around +# in ZFS as they are part of the checkpoint. +# 5. Create a new empty file and attempt to write ~300M +# of data to it. This should fail, as the reserved +# SLOP space for the pool should be ~128M, and we should +# be hitting that limit getting ENOSPC. +# 6. Use zdb to traverse and checksum all the checkpointed +# data to ensure its integrity. +# 7. Export the pool and rewind to ensure that everything +# is actually there as expected. +# + +function test_cleanup +{ + poolexists $NESTEDPOOL && destroy_pool $NESTEDPOOL + set_tunable32 SPA_ASIZE_INFLATION 24 + cleanup_test_pool +} + +verify_runnable "global" + +setup_test_pool +log_onexit test_cleanup +log_must set_tunable32 SPA_ASIZE_INFLATION 4 + +log_must zfs create $DISKFS + +log_must mkfile $FILEDISKSIZE $FILEDISK1 +log_must zpool create $NESTEDPOOL $FILEDISK1 + +log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS0 +log_must dd if=/dev/urandom of=$NESTEDFS0FILE bs=1M count=700 +FILE0INTRO=$(head -c 100 $NESTEDFS0FILE) + +log_must zpool checkpoint $NESTEDPOOL +log_must rm $NESTEDFS0FILE + +# +# only for debugging purposes +# +log_must zpool list $NESTEDPOOL + +log_mustnot dd if=/dev/urandom of=$NESTEDFS0FILE bs=1M count=300 + +# +# only for debugging purposes +# +log_must zpool list $NESTEDPOOL + +log_must zdb -kc $NESTEDPOOL + +log_must zpool export $NESTEDPOOL +log_must zpool import -d $FILEDISKDIR --rewind-to-checkpoint $NESTEDPOOL + +log_must [ "$(head -c 100 $NESTEDFS0FILE)" = "$FILE0INTRO" ] + +log_must zdb $NESTEDPOOL + +log_pass "Do not reuse checkpointed space at low capacity." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_conf_change.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_conf_change.ksh new file mode 100755 index 000000000000..4f783108a93a --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_conf_change.ksh @@ -0,0 +1,43 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# It shouldn't be possible to change pool's vdev config when +# it has a checkpoint. +# +# STRATEGY: +# 1. Create pool and take checkpoint +# 2. Attempt to change guid +# 3. Attempt to attach/replace/remove device +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +log_must zpool checkpoint $TESTPOOL + +log_mustnot zpool reguid $TESTPOOL +log_mustnot zpool attach -f $TESTPOOL $TESTDISK $EXTRATESTDISK +log_mustnot zpool replace $TESTPOOL $TESTDISK $EXTRATESTDISK +log_mustnot zpool remove $TESTPOOL $TESTDISK + +log_pass "Cannot change pool's config when pool has checkpoint." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard.ksh new file mode 100755 index 000000000000..efd46a69b9d7 --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard.ksh @@ -0,0 +1,53 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can discard the checkpoint from a pool. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Modify data (include at least one destructive change) +# 5. Discard checkpoint +# 6. Export and attempt to rewind. Rewinding should fail +# 7. Import pool normally and verify state +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool + +log_must zpool checkpoint $TESTPOOL + +test_change_state_after_checkpoint + +log_must zpool checkpoint -d $TESTPOOL + +log_must zpool export $TESTPOOL +log_mustnot zpool import --rewind-to-checkpoint $TESTPOOL + +log_must zpool import $TESTPOOL +test_verify_post_checkpoint_state + +log_pass "Discard checkpoint from pool." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh new file mode 100755 index 000000000000..ae099ff270f1 --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh @@ -0,0 +1,110 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017, 2018 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Discard checkpoint on a stressed pool. Ensure that we can +# export and import the pool while discarding but not run any +# operations that have to do with the checkpoint or change the +# pool's config. +# +# STRATEGY: +# 1. Import pools that's slightly fragmented +# 2. Take checkpoint +# 3. Do more random writes to "free" checkpointed blocks +# 4. Start discarding checkpoint +# 5. Export pool while discarding checkpoint +# 6. Attempt to rewind (should fail) +# 7. Import pool and ensure that discard is still running +# 8. Attempt to run checkpoint commands, or commands that +# change the pool's config (should fail) +# + +verify_runnable "global" + +function test_cleanup +{ + # reset memory limit to 16M + set_tunable64 SPA_DISCARD_MEMORY_LIMIT 1000000 + cleanup_nested_pools +} + +setup_nested_pool_state +log_onexit test_cleanup + +# +# Force discard to happen slower so it spans over +# multiple txgs. +# +# Set memory limit to 128 bytes. Assuming that we +# use 64-bit words for encoding space map entries, +# ZFS will discard 8 non-debug entries per txg +# (so at most 16 space map entries in debug-builds +# due to debug entries). +# +# That should give us more than enough txgs to be +# discarding the checkpoint for a long time as with +# the current setup the checkpoint space maps should +# have tens of thousands of entries. +# +# Note: If two-words entries are used in the space +# map, we should have even more time to +# verify this. +# +set_tunable64 SPA_DISCARD_MEMORY_LIMIT 128 + +log_must zpool checkpoint $NESTEDPOOL + +fragment_after_checkpoint_and_verify + +log_must zpool checkpoint -d $NESTEDPOOL + +log_must zpool export $NESTEDPOOL + +# +# Verify on-disk state while pool is exported +# +log_must zdb -e -p $FILEDISKDIR $NESTEDPOOL + +# +# Attempt to rewind on a pool that is discarding +# a checkpoint. +# +log_mustnot zpool import -d $FILEDISKDIR --rewind-to-checkpoint $NESTEDPOOL + +log_must zpool import -d $FILEDISKDIR $NESTEDPOOL + +# +# Discarding should continue after import, so +# all the following operations should fail. +# +log_mustnot zpool checkpoint $NESTEDPOOL +log_mustnot zpool checkpoint -d $NESTEDPOOL +log_mustnot zpool remove $NESTEDPOOL $FILEDISK1 +log_mustnot zpool reguid $NESTEDPOOL + +# reset memory limit to 16M +set_tunable64 SPA_DISCARD_MEMORY_LIMIT 16777216 + +nested_wait_discard_finish + +log_must zdb $NESTEDPOOL + +log_pass "Can export/import but not rewind/checkpoint/discard or " \ + "change pool's config while discarding." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_many.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_many.ksh new file mode 100755 index 000000000000..cf0cf6ce9eea --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_many.ksh @@ -0,0 +1,52 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Take a checkpoint and discard checkpointed data twice. The +# idea is to ensure that the background discard zfs thread is +# always running and works as expected. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it and then take a checkpoint +# 3. Do some changes afterwards, and then discard checkpoint +# 4. Repeat steps 2 and 3 +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool +log_must zpool checkpoint $TESTPOOL +test_change_state_after_checkpoint +log_must zpool checkpoint -d $TESTPOOL +test_wait_discard_finish + +log_must mkfile -n 100M $FS2FILE +log_must randwritecomp $FS2FILE 100 +log_must zpool checkpoint $TESTPOOL + +log_must randwritecomp $FS2FILE 100 +log_must zpool checkpoint -d $TESTPOOL +test_wait_discard_finish + +log_pass "Background discarding works as expected." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_indirect.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_indirect.ksh new file mode 100755 index 000000000000..aa14d8ed27fc --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_indirect.ksh @@ -0,0 +1,59 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017, 2018 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that checkpoint plays well with indirect mappings +# and blocks. +# +# STRATEGY: +# 1. Import pool that's slightly fragmented +# 2. Introduce indirection by removing and re-adding devices +# 3. Take checkpoint +# 4. Apply a destructive action and do more random writes +# 5. Run zdb on both current and checkpointed data and make +# sure that zdb returns with no errors +# + +verify_runnable "global" + +setup_nested_pool_state +log_onexit cleanup_nested_pools + +# +# Remove and re-add all disks. +# +introduce_indirection + +# +# Display fragmentation after removals +# +log_must zpool list -v + +log_must zpool checkpoint $NESTEDPOOL + +# +# Destroy one dataset, modify an existing one and create a +# a new one. Do more random writes in an attempt to raise +# more fragmentation. Then verify both current and checkpointed +# states. +# +fragment_after_checkpoint_and_verify + +log_pass "Running correctly on indirect setups with a checkpoint." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_invalid.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_invalid.ksh new file mode 100755 index 000000000000..c10f0550c6b8 --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_invalid.ksh @@ -0,0 +1,80 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Try each 'zpool checkpoint' and relevant 'zpool import' with +# invalid inputs to ensure it returns an error. That includes: +# * A non-existent pool name or no pool name at all is supplied +# * Pool supplied for discarding or rewinding but the pool +# does not have a checkpoint +# * A dataset or a file/directory are supplied instead of a pool +# +# STRATEGY: +# 1. Create an array of parameters for the different scenarios +# 2. For each parameter, execute the scenarios sub-command +# 3. Verify that an error was returned +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool +populate_test_pool + +# +# Argument groups below. Note that all_args also includes +# an empty string as "run command with no argument". +# +set -A all_args "" "-d" "--discard" + +# +# Target groups below. Note that invalid_targets includes +# an empty string as "do not supply a pool name". +# +set -A invalid_targets "" "iDontExist" "$FS0" "$FS0FILE" +non_checkpointed="$TESTPOOL" + +# +# Scenario 1 +# Trying all checkpoint args with all invalid targets +# +typeset -i i=0 +while (( i < ${#invalid_targets[*]} )); do + typeset -i j=0 + while (( j < ${#all_args[*]} )); do + log_mustnot zpool checkpoint ${all_args[j]} \ + ${invalid_targets[i]} + ((j = j + 1)) + done + ((i = i + 1)) +done + +# +# Scenario 2 +# If the pool does not have a checkpoint, -d nor import rewind +# should work with it. +# +log_mustnot zpool checkpoint -d $non_checkpointed +log_must zpool export $non_checkpointed +log_mustnot zpool import --rewind-to-checkpoint $non_checkpointed +log_must zpool import $non_checkpointed + +log_pass "Badly formed checkpoint related commands with " \ + "invalid inputs fail as expected." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_lun_expsz.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_lun_expsz.ksh new file mode 100755 index 000000000000..59f64081a977 --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_lun_expsz.ksh @@ -0,0 +1,61 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can expand a device while the pool has a +# checkpoint but in the case of a rewind that device rewinds +# back to its previous size. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Expand the device and modify some data +# (include at least one destructive change) +# 5. Rewind to checkpoint +# 6. Verify that we rewinded successfully and check if the +# device shows up expanded in the vdev list +# + +verify_runnable "global" + +EXPSZ=2G + +setup_nested_pools +log_onexit cleanup_nested_pools + +populate_nested_pool +INITSZ=$(zpool list -v | grep "$FILEDISK1" | awk '{print $2}') +log_must zpool checkpoint $NESTEDPOOL + +log_must truncate -s $EXPSZ $FILEDISK1 +log_must zpool online -e $NESTEDPOOL $FILEDISK1 +NEWSZ=$(zpool list -v | grep "$FILEDISK1" | awk '{print $2}') +nested_change_state_after_checkpoint +log_mustnot [ "$INITSZ" = "$NEWSZ" ] + +log_must zpool export $NESTEDPOOL +log_must zpool import -d $FILEDISKDIR --rewind-to-checkpoint $NESTEDPOOL + +nested_verify_pre_checkpoint_state +FINSZ=$(zpool list -v | grep "$FILEDISK1" | awk '{print $2}') +log_must [ "$INITSZ" = "$FINSZ" ] + +log_pass "LUN expansion rewinded correctly." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_open.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_open.ksh new file mode 100755 index 000000000000..018478af8d61 --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_open.ksh @@ -0,0 +1,48 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can open a checkpointed pool. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Modify data (include at least one destructive change) +# 5. Export and import pool +# 6. Verify that the pool was opened with the most current +# data and not the checkpointed state. +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool +log_must zpool checkpoint $TESTPOOL +test_change_state_after_checkpoint + +log_must zpool export $TESTPOOL +log_must zpool import $TESTPOOL + +test_verify_post_checkpoint_state + +log_pass "Open a checkpointed pool." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_removal.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_removal.ksh new file mode 100755 index 000000000000..514a05984160 --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_removal.ksh @@ -0,0 +1,72 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Attempt to take a checkpoint while a removal is +# in progress. The attempt should fail. +# +# STRATEGY: +# 1. Create pool with one disk +# 2. Create a big file in the pool, so when the disk +# is later removed, it will give us enough of a +# time window to attempt the checkpoint while the +# removal takes place +# 3. Add a second disk where all the data will be moved +# to when the first disk will be removed. +# 4. Start removal of first disk +# 5. Attempt to checkpoint (attempt should fail) +# + +verify_runnable "global" + +function callback +{ + log_mustnot zpool checkpoint $TESTPOOL + return 0 +} + +# +# Create pool +# +setup_test_pool +log_onexit cleanup_test_pool +populate_test_pool + +# +# Create big empty file and do some writes at random +# offsets to ensure that it takes up space. Note that +# the implicitly created filesystem ($FS0) does not +# have compression enabled. +# +log_must mkfile $BIGFILESIZE $FS0FILE +log_must randwritecomp $FS0FILE 1000 + +# +# Add second disk +# +log_must zpool add $TESTPOOL $EXTRATESTDISK + +# +# Remove disk and attempt to take checkpoint +# +log_must attempt_during_removal $TESTPOOL $TESTDISK callback +log_must zpool status $TESTPOOL + +log_pass "Attempting to checkpoint during removal fails as expected." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_rewind.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_rewind.ksh new file mode 100755 index 000000000000..2a2bb2deed80 --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_rewind.ksh @@ -0,0 +1,49 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can rewind on a checkpointed pool. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Modify data (include at least one destructive change) +# 5. Rewind to checkpoint +# 6. Verify that the data before the checkpoint are present +# and the data after the checkpoint is gone. +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool +populate_test_pool + +log_must zpool checkpoint $TESTPOOL + +test_change_state_after_checkpoint + +log_must zpool export $TESTPOOL +log_must zpool import --rewind-to-checkpoint $TESTPOOL + +test_verify_pre_checkpoint_state + +log_pass "Rewind on a checkpointed pool." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_ro_rewind.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_ro_rewind.ksh new file mode 100755 index 000000000000..fd7416612b7c --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_ro_rewind.ksh @@ -0,0 +1,57 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can open the checkpointed state of a pool +# as read-only. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Modify data (include at least one destructive change) +# 5. Export and import the checkpointed state as readonly +# 6. Verify that we can see the checkpointed state and not +# the actual current state. +# 7. Export and import the current state +# 8. Verify that we can see the current state and not the +# checkpointed state. +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool +log_must zpool checkpoint $TESTPOOL +test_change_state_after_checkpoint + +log_must zpool export $TESTPOOL +log_must zpool import -o readonly=on --rewind-to-checkpoint $TESTPOOL + +test_verify_pre_checkpoint_state "ro-check" + +log_must zpool export $TESTPOOL +log_must zpool import $TESTPOOL + +test_verify_post_checkpoint_state + +log_pass "Open checkpointed state of the pool as read-only pool." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_sm_scale.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_sm_scale.ksh new file mode 100755 index 000000000000..e24c4eb55252 --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_sm_scale.ksh @@ -0,0 +1,92 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017, 2018 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# The maximum address that can be described by a single-word +# space map entry limits the maximum allocatable space of any +# top-level vdev to 64PB whenever a vdev-wide space map is used. +# +# Since a vdev-wide space map is introduced for the checkpoint +# we want to ensure that we cannot checkpoint a pool that does +# not use the new space map encoding (V2) and has a top-level +# vdev with more than 64PB of allocatable space. +# +# Note: Since this is a pool created from file-based vdevs we +# are guaranteed that vdev_ashift is SPA_MINBLOCKSHIFT +# [which is currently 9 and (1 << 9) = 512], so the numbers +# work out for this test. +# +# STRATEGY: +# 1. Create pool with a disk of exactly 64PB +# (so ~63.5PB of allocatable space) and +# ensure that has the checkpoint feature +# enabled but not space map V2 +# 2. Ensure that you can checkpoint it +# 3. Create pool with a disk of exactly 65PB +# (so ~64.5PB of allocatable space) with +# the same setup +# 4. Ensure we fail trying to checkpoint it +# +# Note: +# This test used to create the two pools and attempt to checkpoint +# them at the same time, then destroy them. We later had to change +# this to test one pool at a time as the metaslabs (even though empty) +# consumed a lot of memory, especially on a machine that has been +# running with debug enabled. To give an example, each metaslab +# structure is ~1712 bytes (at the time of this writing), and each +# vdev has 128K metaslabs, which means that just the structures +# consume 131071 * 1712 = ~224M. +# + +verify_runnable "global" + +TESTPOOL1=testpool1 +TESTPOOL2=testpool2 + +DISK64PB=/$DISKFS/disk64PB +DISK65PB=/$DISKFS/disk65PB + +function test_cleanup +{ + poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1 + poolexists $TESTPOOL2 && destroy_pool $TESTPOOL2 + log_must rm -f $DISK64PB $DISK65PB + cleanup_test_pool +} + +setup_test_pool +log_onexit test_cleanup + +log_must zfs create $DISKFS +log_must mkfile -n $((64 * 1024 * 1024))g $DISK64PB +log_must mkfile -n $((65 * 1024 * 1024))g $DISK65PB + +log_must zpool create -d $TESTPOOL1 $DISK64PB +log_must zpool set feature@zpool_checkpoint=enabled $TESTPOOL1 +log_must zpool checkpoint $TESTPOOL1 +destroy_pool $TESTPOOL1 + +log_must zpool create -d $TESTPOOL2 $DISK65PB +log_must zpool set feature@zpool_checkpoint=enabled $TESTPOOL2 +log_mustnot zpool checkpoint $TESTPOOL2 +destroy_pool $TESTPOOL2 + +log_pass "Fail to checkpoint pool with old spacemap encoding" \ + " and a vdev that's more than 64PB." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_twice.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_twice.ksh new file mode 100755 index 000000000000..3f1076b94c4e --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_twice.ksh @@ -0,0 +1,40 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Attempt to take a checkpoint for an already +# checkpointed pool. The attempt should fail. +# +# STRATEGY: +# 1. Create pool +# 2. Checkpoint it +# 3. Attempt to checkpoint it again (should fail). +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +log_must zpool checkpoint $TESTPOOL +log_mustnot zpool checkpoint $TESTPOOL + +log_pass "Attempting to checkpoint an already checkpointed " \ + "pool fails as expected." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_vdev_add.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_vdev_add.ksh new file mode 100755 index 000000000000..efb69b7c0ee9 --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_vdev_add.ksh @@ -0,0 +1,63 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can add a device while the pool has a +# checkpoint but in the case of a rewind that device does +# not show up. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Add device and modify data +# (include at least one destructive change) +# 5. Rewind to checkpoint +# 6. Verify that we rewinded successfully and check if the +# device shows up in the vdev list +# + +verify_runnable "global" + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool + +log_must zpool checkpoint $TESTPOOL +log_must zpool add $TESTPOOL $EXTRATESTDISK + +# +# Ensure that the vdev shows up +# +log_must eval "zpool list -v $TESTPOOL | grep $EXTRATESTDISK" +test_change_state_after_checkpoint + +log_must zpool export $TESTPOOL +log_must zpool import --rewind-to-checkpoint $TESTPOOL + +test_verify_pre_checkpoint_state + +# +# Ensure that the vdev doesn't show up after the rewind +# +log_mustnot eval "zpool list -v $TESTPOOL | grep $EXTRATESTDISK" + +log_pass "Add device in checkpointed pool." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_zdb.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_zdb.ksh new file mode 100755 index 000000000000..fdefc0c3bdbb --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_zdb.ksh @@ -0,0 +1,98 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that checkpoint verification within zdb works as +# we expect. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Modify data (include at least one destructive change) +# 5. Verify zdb finds checkpoint when run on current state +# 6. Verify zdb finds old dataset when run on checkpointed +# state +# 7. Export pool, and verify the same things with zdb to +# test the -e option. +# 8. Import pool and discard checkpoint +# 9. Verify zdb does not find the checkpoint anymore in the +# current state. +# 10.Verify that zdb cannot find the checkpointed state +# anymore when trying to open it for verification. +# + +verify_runnable "global" + +# +# zdb does this thing where it imports the checkpointed state of the +# pool under a new pool with a different name, alongside the pool +# with the current state. The name of this temporary pool is the +# name of the actual pool with the suffix below appended to it. +# +CHECKPOINT_SUFFIX="_CHECKPOINTED_UNIVERSE" +CHECKPOINTED_FS1=$TESTPOOL$CHECKPOINT_SUFFIX/$TESTFS1 + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool +log_must zpool checkpoint $TESTPOOL + +test_change_state_after_checkpoint + +zdb $TESTPOOL | grep "Checkpointed uberblock found" || \ + log_fail "zdb could not find checkpointed uberblock" + +zdb -k $TESTPOOL | grep "Checkpointed uberblock found" && \ + log_fail "zdb found checkpointed uberblock in checkpointed state" + +zdb $TESTPOOL | grep "Dataset $FS1" && \ + log_fail "zdb found destroyed dataset in current state" + +zdb -k $TESTPOOL | grep "Dataset $CHECKPOINTED_FS1" || \ + log_fail "zdb could not find destroyed dataset in checkpoint" + +log_must zpool export $TESTPOOL + +zdb -e $TESTPOOL | grep "Checkpointed uberblock found" || \ + log_fail "zdb could not find checkpointed uberblock" + +zdb -k -e $TESTPOOL | grep "Checkpointed uberblock found" && \ + log_fail "zdb found checkpointed uberblock in checkpointed state" + +zdb -e $TESTPOOL | grep "Dataset $FS1" && \ + log_fail "zdb found destroyed dataset in current state" + +zdb -k -e $TESTPOOL | grep "Dataset $CHECKPOINTED_FS1" || \ + log_fail "zdb could not find destroyed dataset in checkpoint" + +log_must zpool import $TESTPOOL + +log_must zpool checkpoint -d $TESTPOOL + +zdb $TESTPOOL | grep "Checkpointed uberblock found" && \ + log_fail "zdb found checkpointed uberblock after discarding " \ + "the checkpoint" + +zdb -k $TESTPOOL && \ + log_fail "zdb opened checkpointed state that was discarded" + +log_pass "zdb can analyze checkpointed pools." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_zhack_feat.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_zhack_feat.ksh new file mode 100755 index 000000000000..815fc8573987 --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_zhack_feat.ksh @@ -0,0 +1,66 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +# +# DESCRIPTION: +# Ensure that we can rewind to a checkpointed state that was +# before a readonly-compatible feature was introduced. +# +# STRATEGY: +# 1. Create pool +# 2. Populate it +# 3. Take checkpoint +# 4. Modify data (include at least one destructive change) +# 5. Export pool +# 6. Introduce a new feature in the pool which is unsupported +# but readonly-compatible and increment its reference +# number so it is marked active. +# 7. Verify that the pool can't be opened writeable, but we +# can rewind to the checkpoint (before the feature was +# introduced) if we want to. +# + +verify_runnable "global" + +# +# Clear all labels from all vdevs so zhack +# doesn't get confused +# +for disk in ${DISKS[@]}; do + zpool labelclear -f $disk +done + +setup_test_pool +log_onexit cleanup_test_pool + +populate_test_pool +log_must zpool checkpoint $TESTPOOL +test_change_state_after_checkpoint + +log_must zpool export $TESTPOOL + +log_must zhack feature enable -r $TESTPOOL 'com.company:future_feature' +log_must zhack feature ref $TESTPOOL 'com.company:future_feature' + +log_mustnot zpool import $TESTPOOL +log_must zpool import --rewind-to-checkpoint $TESTPOOL + +test_verify_pre_checkpoint_state + +log_pass "Rewind to checkpoint from unsupported pool feature." diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/cleanup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/cleanup.ksh new file mode 100755 index 000000000000..5fa03d74f6b6 --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/cleanup.ksh @@ -0,0 +1,23 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2018 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +verify_runnable "global" + +test_group_destroy_saved_pool +log_pass diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/pool_checkpoint.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/pool_checkpoint.kshlib new file mode 100644 index 000000000000..ea6c03e9d59d --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/pool_checkpoint.kshlib @@ -0,0 +1,413 @@ +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2017, 2018 by Delphix. All rights reserved. +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/functional/removal/removal.kshlib + +# +# In general all the tests related to the pool checkpoint can +# be divided into two categories. TESTS that verify features +# provided by the checkpoint (e.g. checkpoint_rewind) and tests +# that stress-test the checkpoint (e.g. checkpoint_big_rewind). +# +# For the first group we don't really care about the size of +# the pool or the individual file sizes within the filesystems. +# This is why these tests run directly on pools that use a +# "real disk vdev" (meaning not a file based one). These tests +# use the $TESTPOOL pool that is created on top of $TESTDISK. +# This pool is referred to as the "test pool" and thus all +# the tests of this group use the testpool-related functions of +# this file (not the nested_pools ones). +# +# For the second group we generally try to bring the pool to its +# limits by increasing fragmentation, filling all allocatable +# space, attempting to use vdevs that the checkpoint spacemap +# cannot represent, etc. For these tests we need to control +# almost all parameters of the pool and the vdevs that back it +# so we create them based on file-based vdevs that we carefully +# create within the $TESTPOOL pool. So most of these tests, in +# order to create this nested pool sctructure, generally start +# like this: +# 1] We create the test pool ($TESTPOOL). +# 2] We create a filesystem and we populate it with files of +# some predetermined size. +# 3] We use those files as vdevs for the pool that the test +# will use ($NESTEDPOOL). +# 4] Go on and let the test run and operate on $NESTEDPOOL. +# + +# +# These disks are used to back $TESTPOOL +# +TESTDISK="$(echo $DISKS | cut -d' ' -f1)" +EXTRATESTDISK="$(echo $DISKS | cut -d' ' -f2)" + +FS0=$TESTPOOL/$TESTFS +FS1=$TESTPOOL/$TESTFS1 +FS2=$TESTPOOL/$TESTFS2 + +FS0FILE=/$FS0/$TESTFILE0 +FS1FILE=/$FS1/$TESTFILE1 +FS2FILE=/$FS2/$TESTFILE2 + +# +# The following are created within $TESTPOOL and +# will be used to back $NESTEDPOOL +# +DISKFS=$TESTPOOL/disks +FILEDISKDIR=/$DISKFS +FILEDISK1=/$DISKFS/dsk1 +FILEDISK2=/$DISKFS/dsk2 +FILEDISKS="$FILEDISK1 $FILEDISK2" + +# +# $NESTEDPOOL related variables +# +NESTEDPOOL=nestedpool +NESTEDFS0=$NESTEDPOOL/$TESTFS +NESTEDFS1=$NESTEDPOOL/$TESTFS1 +NESTEDFS2=$NESTEDPOOL/$TESTFS2 +NESTEDFS0FILE=/$NESTEDFS0/$TESTFILE0 +NESTEDFS1FILE=/$NESTEDFS1/$TESTFILE1 +NESTEDFS2FILE=/$NESTEDFS2/$TESTFILE2 + +# +# In the tests that stress-test the pool (second category +# mentioned above), there exist some that need to bring +# fragmentation at high percentages in a relatively short +# period of time. In order to do that we set the following +# parameters: +# +# * We use two disks of 1G each, to create a pool of size 2G. +# The point is that 2G is not small nor large, and we also +# want to have 2 disks to introduce indirect vdevs on our +# setup. +# * We enable compression and set the record size of all +# filesystems to 8K. The point of compression is to +# ensure that we are not filling up the whole pool (that's +# what checkpoint_capacity is for), and the specific +# record size is set to match the block size of randwritecomp +# which is used to increase fragmentation by writing on +# files. +# * We always have 2 big files present of 512M each, which +# should account for 40%~50% capacity by the end of each +# test with fragmentation around 50~60%. +# * At each file we attempt to do enough random writes to +# touch every offset twice on average. +# +# Note that the amount of random writes per files are based +# on the following calculation: +# +# ((512M / 8K) * 3) * 2 = ~400000 +# +# Given that the file is 512M and one write is 8K, we would +# need (512M / 8K) writes to go through the whole file. +# Assuming though that each write has a compression ratio of +# 3, then we want 3 times that to cover the same amount of +# space. Finally, we multiply that by 2 since our goal is to +# touch each offset twice on average. +# +# Examples of those tests are checkpoint_big_rewind and +# checkpoint_discard_busy. +# +FILEDISKSIZE=1g +DISKSIZE=1g +BIGFILESIZE=512M +RANDOMWRITES=400000 + + +# +# Assumes create_test_pool has been called beforehand. +# +function setup_nested_pool +{ + log_must zfs create $DISKFS + + log_must truncate -s $DISKSIZE $FILEDISK1 + log_must truncate -s $DISKSIZE $FILEDISK2 + + log_must zpool create -O sync=disabled $NESTEDPOOL $FILEDISKS +} + +function setup_test_pool +{ + log_must zpool create -O sync=disabled $TESTPOOL "$TESTDISK" +} + +function setup_nested_pools +{ + setup_test_pool + setup_nested_pool +} + +function cleanup_nested_pool +{ + log_must zpool destroy $NESTEDPOOL + log_must rm -f $FILEDISKS +} + +function cleanup_test_pool +{ + log_must zpool destroy $TESTPOOL + + # + # We always clear the labels of all disks + # between tests so imports from zpool or + # or zdb do not get confused with leftover + # data from old pools. + # + for disk in $DISKS; do + zpool labelclear -f $disk + done +} + +function cleanup_nested_pools +{ + cleanup_nested_pool + cleanup_test_pool +} + +# +# Remove and re-add each vdev to ensure that data is +# moved between disks and indirect mappings are created +# +function introduce_indirection +{ + for disk in ${FILEDISKS[@]}; do + log_must zpool remove $NESTEDPOOL $disk + log_must wait_for_removal $NESTEDPOOL + log_mustnot vdevs_in_pool $NESTEDPOOL $disk + log_must zpool add $NESTEDPOOL $disk + done +} + +FILECONTENTS0="Can't wait to be checkpointed!" +FILECONTENTS1="Can't wait to be checkpointed too!" +NEWFILECONTENTS0="I survived after the checkpoint!" +NEWFILECONTENTS2="I was born after the checkpoint!" + +function populate_test_pool +{ + log_must zfs create -o compression=lz4 -o recordsize=8k $FS0 + log_must zfs create -o compression=lz4 -o recordsize=8k $FS1 + + echo $FILECONTENTS0 > $FS0FILE + echo $FILECONTENTS1 > $FS1FILE +} + +function populate_nested_pool +{ + log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS0 + log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS1 + + echo $FILECONTENTS0 > $NESTEDFS0FILE + echo $FILECONTENTS1 > $NESTEDFS1FILE +} + +function test_verify_pre_checkpoint_state +{ + log_must zfs list $FS0 + log_must zfs list $FS1 + log_must [ "$(<$FS0FILE)" = "$FILECONTENTS0" ] + log_must [ "$(<$FS1FILE)" = "$FILECONTENTS1" ] + + # + # If we've opened the checkpointed state of the + # pool as read-only without rewinding on-disk we + # can't really use zdb on it. + # + if [[ "$1" != "ro-check" ]] ; then + log_must zdb $TESTPOOL + fi + + # + # Ensure post-checkpoint state is not present + # + log_mustnot zfs list $FS2 + log_mustnot [ "$(<$FS0FILE)" = "$NEWFILECONTENTS0" ] +} + +function nested_verify_pre_checkpoint_state +{ + log_must zfs list $NESTEDFS0 + log_must zfs list $NESTEDFS1 + log_must [ "$(<$NESTEDFS0FILE)" = "$FILECONTENTS0" ] + log_must [ "$(<$NESTEDFS1FILE)" = "$FILECONTENTS1" ] + + # + # If we've opened the checkpointed state of the + # pool as read-only without rewinding on-disk we + # can't really use zdb on it. + # + if [[ "$1" != "ro-check" ]] ; then + log_must zdb $NESTEDPOOL + fi + + # + # Ensure post-checkpoint state is not present + # + log_mustnot zfs list $NESTEDFS2 + log_mustnot [ "$(<$NESTEDFS0FILE)" = "$NEWFILECONTENTS0" ] +} + +function test_change_state_after_checkpoint +{ + log_must zfs destroy $FS1 + log_must zfs create -o compression=lz4 -o recordsize=8k $FS2 + + echo $NEWFILECONTENTS0 > $FS0FILE + echo $NEWFILECONTENTS2 > $FS2FILE +} + +function nested_change_state_after_checkpoint +{ + log_must zfs destroy $NESTEDFS1 + log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS2 + + echo $NEWFILECONTENTS0 > $NESTEDFS0FILE + echo $NEWFILECONTENTS2 > $NESTEDFS2FILE +} + +function test_verify_post_checkpoint_state +{ + log_must zfs list $FS0 + log_must zfs list $FS2 + log_must [ "$(<$FS0FILE)" = "$NEWFILECONTENTS0" ] + log_must [ "$(<$FS2FILE)" = "$NEWFILECONTENTS2" ] + + log_must zdb $TESTPOOL + + # + # Ensure pre-checkpointed state that was removed post-checkpoint + # is not present + # + log_mustnot zfs list $FS1 + log_mustnot [ "$(<$FS0FILE)" = "$FILECONTENTS0" ] +} + +function fragment_before_checkpoint +{ + populate_nested_pool + log_must mkfile -n $BIGFILESIZE $NESTEDFS0FILE + log_must mkfile -n $BIGFILESIZE $NESTEDFS1FILE + log_must randwritecomp $NESTEDFS0FILE $RANDOMWRITES + log_must randwritecomp $NESTEDFS1FILE $RANDOMWRITES + + # + # Display fragmentation on test log + # + log_must zpool list -v +} + +function fragment_after_checkpoint_and_verify +{ + log_must zfs destroy $NESTEDFS1 + log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS2 + log_must mkfile -n $BIGFILESIZE $NESTEDFS2FILE + log_must randwritecomp $NESTEDFS0FILE $RANDOMWRITES + log_must randwritecomp $NESTEDFS2FILE $RANDOMWRITES + + # + # Display fragmentation on test log + # + log_must zpool list -v + + # + # Typically we would just run zdb at this point and things + # would be fine. Unfortunately, if there is still any + # background I/O in the pool the zdb command can fail with + # checksum errors temporarily. + # + # Export the pool when running zdb so the pool is idle and + # the verification results are consistent. + # + log_must zpool export $NESTEDPOOL + log_must zdb -e -p $FILEDISKDIR $NESTEDPOOL + log_must zdb -e -p $FILEDISKDIR -kc $NESTEDPOOL + log_must zpool import -d $FILEDISKDIR $NESTEDPOOL +} + +function wait_discard_finish +{ + typeset pool="$1" + + typeset status + status=$(zpool status $pool | grep "checkpoint:") + while [ "" != "$status" ]; do + sleep 5 + status=$(zpool status $pool | grep "checkpoint:") + done +} + +function test_wait_discard_finish +{ + wait_discard_finish $TESTPOOL +} + +function nested_wait_discard_finish +{ + wait_discard_finish $NESTEDPOOL +} + +# +# Creating the setup for the second group of tests mentioned in +# block comment of this file can take some time as we are doing +# random writes to raise capacity and fragmentation before taking +# the checkpoint. Thus we create this setup once and save the +# disks of the nested pool in a temporary directory where we can +# reuse it for each test that requires that setup. +# +SAVEDPOOLDIR="$TEST_BASE_DIR/ckpoint_saved_pool" + +function test_group_premake_nested_pools +{ + setup_nested_pools + + # + # Populate and fragment the pool. + # + fragment_before_checkpoint + + # + # Export and save the pool for other tests. + # + log_must zpool export $NESTEDPOOL + log_must mkdir $SAVEDPOOLDIR + log_must cp $FILEDISKS $SAVEDPOOLDIR + + # + # Reimport pool to be destroyed by + # cleanup_nested_pools function + # + log_must zpool import -d $FILEDISKDIR $NESTEDPOOL +} + +function test_group_destroy_saved_pool +{ + log_must rm -rf $SAVEDPOOLDIR +} + +# +# Recreate nested pool setup from saved pool. +# +function setup_nested_pool_state +{ + setup_test_pool + + log_must zfs create $DISKFS + log_must cp $SAVEDPOOLDIR/* $FILEDISKDIR + + log_must zpool import -d $FILEDISKDIR $NESTEDPOOL +} diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/setup.ksh new file mode 100755 index 000000000000..118400cb2a00 --- /dev/null +++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/setup.ksh @@ -0,0 +1,25 @@ +#!/bin/ksh -p + +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2018 by Delphix. All rights reserved. +# + +. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib + +verify_runnable "global" + +test_group_premake_nested_pools +log_onexit cleanup_nested_pools + +log_pass "Successfully saved pool to be reused for tests in the group." |