aboutsummaryrefslogtreecommitdiff
path: root/cddl/usr.sbin/zfsd/tests/zfsd_unittest.cc
diff options
context:
space:
mode:
Diffstat (limited to 'cddl/usr.sbin/zfsd/tests/zfsd_unittest.cc')
-rw-r--r--cddl/usr.sbin/zfsd/tests/zfsd_unittest.cc118
1 files changed, 112 insertions, 6 deletions
diff --git a/cddl/usr.sbin/zfsd/tests/zfsd_unittest.cc b/cddl/usr.sbin/zfsd/tests/zfsd_unittest.cc
index e2833170f9ac..6c9a49a8ebbf 100644
--- a/cddl/usr.sbin/zfsd/tests/zfsd_unittest.cc
+++ b/cddl/usr.sbin/zfsd/tests/zfsd_unittest.cc
@@ -62,9 +62,6 @@
#include <zfsd/zpool_list.h>
#include "libmocks.h"
-
-__FBSDID("$FreeBSD$");
-
/*================================== Macros ==================================*/
#define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x))
@@ -85,6 +82,9 @@ struct zpool_handle
zpool_handle_t *zpool_next;
char zpool_name[ZFS_MAX_DATASET_NAME_LEN];
int zpool_state;
+ unsigned int zpool_n_propnames;
+#define ZHP_MAX_PROPNAMES 4
+ const char *zpool_propnames[ZHP_MAX_PROPNAMES];
size_t zpool_config_size;
nvlist_t *zpool_config;
nvlist_t *zpool_old_config;
@@ -137,6 +137,7 @@ public:
MOCK_CONST_METHOD0(PoolGUID, Guid());
MOCK_CONST_METHOD0(State, vdev_state());
MOCK_CONST_METHOD0(PhysicalPath, string());
+ MOCK_CONST_METHOD2(Name, string(zpool_handle_t * zhp, bool verbose));
};
MockVdev::MockVdev(nvlist_t *vdevConfig)
@@ -400,7 +401,7 @@ TEST_F(ZfsEventTest, ProcessPoolEventGetsCalled)
{
string evString("!system=ZFS "
"subsystem=ZFS "
- "type=misc.fs.zfs.vdev_remove "
+ "type=sysevent.fs.zfs.vdev_remove "
"pool_name=foo "
"pool_guid=9756779504028057996 "
"vdev_guid=1631193447431603339 "
@@ -434,6 +435,8 @@ protected:
m_vdev = new MockVdev(m_vdevConfig);
ON_CALL(*m_vdev, GUID())
.WillByDefault(::testing::Return(Guid(123)));
+ ON_CALL(*m_vdev, Name(::testing::_, ::testing::_))
+ .WillByDefault(::testing::Return(string("/dev/da999")));
ON_CALL(*m_vdev, PoolGUID())
.WillByDefault(::testing::Return(Guid(456)));
ON_CALL(*m_vdev, State())
@@ -515,7 +518,7 @@ TEST_F(CaseFileTest, PoolDestroy)
"pool_guid=456 "
"subsystem=ZFS "
"timestamp=1348867914 "
- "type=misc.fs.zfs.pool_destroy ");
+ "type=sysevent.fs.zfs.pool_destroy ");
m_event = Event::CreateEvent(*m_eventFactory, evString);
ZfsEvent *zfs_event = static_cast<ZfsEvent*>(m_event);
EXPECT_CALL(*m_caseFile, Close());
@@ -523,6 +526,72 @@ TEST_F(CaseFileTest, PoolDestroy)
}
/*
+ * A Vdev with a very large number of Delay errors should fault
+ * For performance reasons, RefreshVdevState should be called at most once
+ */
+TEST_F(CaseFileTest, VeryManyDelayErrors)
+{
+ EXPECT_CALL(*m_caseFile, RefreshVdevState())
+ .Times(::testing::AtMost(1))
+ .WillRepeatedly(::testing::Return(true));
+
+ for(int i=0; i<100; i++) {
+ stringstream evStringStream;
+ evStringStream <<
+ "!system=ZFS "
+ "class=ereport.fs.zfs.delay "
+ "ena=12091638756982918145 "
+ "parent_guid=13237004955564865395 "
+ "parent_type=raidz "
+ "pool=testpool.4415 "
+ "pool_context=0 "
+ "pool_failmode=wait "
+ "pool_guid=456 "
+ "pool_state= 0"
+ "subsystem=ZFS "
+ "time=";
+ evStringStream << i << "0000000000000000 ";
+ evStringStream << "timestamp=" << i << " ";
+ evStringStream <<
+ "type=ereport.fs.zfs.delay "
+ "vdev_ashift=12 "
+ "vdev_cksum_errors=0 "
+ "vdev_complete_ts=948336226469 "
+ "vdev_delays=77 "
+ "vdev_delta_ts=123998485899 "
+ "vdev_guid=123 "
+ "vdev_path=/dev/da400 "
+ "vdev_read_errors=0 "
+ "vdev_spare_guids= "
+ "vdev_type=disk "
+ "vdev_write_errors=0 "
+ "zio_blkid=622 "
+ "zio_delay=31000041101 "
+ "zio_delta=123998485899 "
+ "zio_err=0 "
+ "zio_flags=1572992 "
+ "zio_level=-2 "
+ "zio_object=0 "
+ "zio_objset=37 "
+ "zio_offset=25598976 "
+ "zio_pipeline=48234496 "
+ "zio_priority=3 "
+ "zio_size=1024"
+ "zio_stage=33554432 "
+ "zio_timestamp=824337740570 ";
+ Event *event(Event::CreateEvent(*m_eventFactory,
+ evStringStream.str()));
+ ZfsEvent *zfs_event = static_cast<ZfsEvent*>(event);
+ EXPECT_TRUE(m_caseFile->ReEvaluate(*zfs_event));
+ delete event;
+ }
+
+ m_caseFile->SpliceEvents();
+ EXPECT_FALSE(m_caseFile->ShouldDegrade());
+ EXPECT_TRUE(m_caseFile->ShouldFault());
+}
+
+/*
* A Vdev with a very large number of IO errors should fault
* For performance reasons, RefreshVdevState should be called at most once
*/
@@ -685,7 +754,7 @@ string ReEvaluateByGuidTest::s_evString(
"pool_name=foo "
"subsystem=ZFS "
"timestamp=1360620391 "
- "type=misc.fs.zfs.config_sync");
+ "type=sysevent.fs.zfs.config_sync");
/*
@@ -769,3 +838,40 @@ TEST_F(ReEvaluateByGuidTest, ReEvaluateByGuid_five)
delete CaseFile4;
delete CaseFile5;
}
+
+/*
+ * Test VdevIterator
+ */
+class VdevIteratorTest : public ::testing::Test
+{
+};
+
+bool VdevIteratorTestCB(Vdev &vdev, void *cbArg) {
+ return (false);
+}
+
+/*
+ * VdevIterator::Next should not crash when run on a pool that has a previously
+ * removed vdev. Regression for
+ * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=273663
+ */
+TEST_F(VdevIteratorTest, VdevRemoval)
+{
+ nvlist_t* poolConfig, *rootVdev;
+
+ ASSERT_EQ(0, nvlist_alloc(&rootVdev, NV_UNIQUE_NAME, 0));
+ ASSERT_EQ(0, nvlist_add_uint64(rootVdev, ZPOOL_CONFIG_GUID, 0x5678));
+ /*
+ * Note: pools with previously-removed top-level VDEVs will contain a
+ * TLV in their labels that has 0 children.
+ */
+ ASSERT_EQ(0, nvlist_add_nvlist_array(rootVdev, ZPOOL_CONFIG_CHILDREN,
+ NULL, 0));
+ ASSERT_EQ(0, nvlist_alloc(&poolConfig, NV_UNIQUE_NAME, 0));
+ ASSERT_EQ(0, nvlist_add_uint64(poolConfig,
+ ZPOOL_CONFIG_POOL_GUID, 0x1234));
+ ASSERT_EQ(0, nvlist_add_nvlist(poolConfig, ZPOOL_CONFIG_VDEV_TREE,
+ rootVdev));
+
+ VdevIterator(poolConfig).Each(VdevIteratorTestCB, NULL);
+}