aboutsummaryrefslogtreecommitdiff
path: root/sys/contrib/openzfs/module/zfs/vdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/contrib/openzfs/module/zfs/vdev.c')
-rw-r--r--sys/contrib/openzfs/module/zfs/vdev.c103
1 files changed, 68 insertions, 35 deletions
diff --git a/sys/contrib/openzfs/module/zfs/vdev.c b/sys/contrib/openzfs/module/zfs/vdev.c
index 01758b0c54c0..9cf35e379000 100644
--- a/sys/contrib/openzfs/module/zfs/vdev.c
+++ b/sys/contrib/openzfs/module/zfs/vdev.c
@@ -243,6 +243,25 @@ vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
}
+char *
+vdev_rt_name(vdev_t *vd, const char *name)
+{
+ return (kmem_asprintf("{spa=%s vdev_guid=%llu %s}",
+ spa_name(vd->vdev_spa),
+ (u_longlong_t)vd->vdev_guid,
+ name));
+}
+
+static char *
+vdev_rt_name_dtl(vdev_t *vd, const char *name, vdev_dtl_type_t dtl_type)
+{
+ return (kmem_asprintf("{spa=%s vdev_guid=%llu %s[%d]}",
+ spa_name(vd->vdev_spa),
+ (u_longlong_t)vd->vdev_guid,
+ name,
+ dtl_type));
+}
+
/*
* Virtual device management.
*/
@@ -282,12 +301,15 @@ vdev_getops(const char *type)
* Given a vdev and a metaslab class, find which metaslab group we're
* interested in. All vdevs may belong to two different metaslab classes.
* Dedicated slog devices use only the primary metaslab group, rather than a
- * separate log group. For embedded slogs, the vdev_log_mg will be non-NULL.
+ * separate log group. For embedded slogs, vdev_log_mg will be non-NULL and
+ * will point to a metaslab group of either embedded_log_class (for normal
+ * vdevs) or special_embedded_log_class (for special vdevs).
*/
metaslab_group_t *
vdev_get_mg(vdev_t *vd, metaslab_class_t *mc)
{
- if (mc == spa_embedded_log_class(vd->vdev_spa) &&
+ if ((mc == spa_embedded_log_class(vd->vdev_spa) ||
+ mc == spa_special_embedded_log_class(vd->vdev_spa)) &&
vd->vdev_log_mg != NULL)
return (vd->vdev_log_mg);
else
@@ -532,7 +554,7 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd)
vdev_t **newchild;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
- ASSERT(cvd->vdev_parent == NULL);
+ ASSERT0P(cvd->vdev_parent);
cvd->vdev_parent = pvd;
@@ -556,7 +578,7 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd)
pvd->vdev_nonrot &= cvd->vdev_nonrot;
cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
- ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
+ ASSERT0P(cvd->vdev_top->vdev_parent->vdev_parent);
/*
* Walk up all ancestors to update guid sum.
@@ -692,8 +714,9 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
- vd->vdev_obsolete_segments = zfs_range_tree_create(NULL,
- ZFS_RANGE_SEG64, NULL, 0, 0);
+ vd->vdev_obsolete_segments = zfs_range_tree_create_flags(
+ NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
+ ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "vdev_obsolete_segments"));
/*
* Initialize rate limit structs for events. We rate limit ZIO delay
@@ -747,8 +770,9 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < DTL_TYPES; t++) {
- vd->vdev_dtl[t] = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
- NULL, 0, 0);
+ vd->vdev_dtl[t] = zfs_range_tree_create_flags(
+ NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
+ ZFS_RT_F_DYN_NAME, vdev_rt_name_dtl(vd, "vdev_dtl", t));
}
txg_list_create(&vd->vdev_ms_list, spa,
@@ -1077,10 +1101,10 @@ vdev_free(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
- ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
- ASSERT3P(vd->vdev_trim_thread, ==, NULL);
- ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
- ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
+ ASSERT0P(vd->vdev_initialize_thread);
+ ASSERT0P(vd->vdev_trim_thread);
+ ASSERT0P(vd->vdev_autotrim_thread);
+ ASSERT0P(vd->vdev_rebuild_thread);
/*
* Scan queues are normally destroyed at the end of a scan. If the
@@ -1109,7 +1133,7 @@ vdev_free(vdev_t *vd)
for (int c = 0; c < vd->vdev_children; c++)
vdev_free(vd->vdev_child[c]);
- ASSERT(vd->vdev_child == NULL);
+ ASSERT0P(vd->vdev_child);
ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
if (vd->vdev_ops->vdev_op_fini != NULL)
@@ -1138,7 +1162,7 @@ vdev_free(vdev_t *vd)
*/
vdev_remove_child(vd->vdev_parent, vd);
- ASSERT(vd->vdev_parent == NULL);
+ ASSERT0P(vd->vdev_parent);
ASSERT(!list_link_active(&vd->vdev_leaf_node));
/*
@@ -1285,9 +1309,9 @@ vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
ASSERT0(tvd->vdev_indirect_config.vic_births_object);
ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
- ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
- ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
- ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
+ ASSERT0P(tvd->vdev_indirect_mapping);
+ ASSERT0P(tvd->vdev_indirect_births);
+ ASSERT0P(tvd->vdev_obsolete_sm);
ASSERT0(tvd->vdev_noalloc);
ASSERT0(tvd->vdev_removing);
ASSERT0(tvd->vdev_rebuilding);
@@ -1440,7 +1464,7 @@ vdev_remove_parent(vdev_t *cvd)
if (cvd == cvd->vdev_top)
vdev_top_transfer(mvd, cvd);
- ASSERT(mvd->vdev_children == 0);
+ ASSERT0(mvd->vdev_children);
vdev_free(mvd);
}
@@ -1508,8 +1532,13 @@ vdev_metaslab_group_create(vdev_t *vd)
vd->vdev_mg = metaslab_group_create(mc, vd);
if (!vd->vdev_islog) {
- vd->vdev_log_mg = metaslab_group_create(
- spa_embedded_log_class(spa), vd);
+ if (mc == spa_special_class(spa)) {
+ vd->vdev_log_mg = metaslab_group_create(
+ spa_special_embedded_log_class(spa), vd);
+ } else {
+ vd->vdev_log_mg = metaslab_group_create(
+ spa_embedded_log_class(spa), vd);
+ }
}
/*
@@ -1624,9 +1653,10 @@ vdev_metaslab_init(vdev_t *vd, uint64_t txg)
/*
* Find the emptiest metaslab on the vdev and mark it for use for
* embedded slog by moving it from the regular to the log metaslab
- * group.
+ * group. This works for normal and special vdevs.
*/
- if (vd->vdev_mg->mg_class == spa_normal_class(spa) &&
+ if ((vd->vdev_mg->mg_class == spa_normal_class(spa) ||
+ vd->vdev_mg->mg_class == spa_special_class(spa)) &&
vd->vdev_ms_count > zfs_embedded_slog_min_ms &&
avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) {
uint64_t slog_msid = 0;
@@ -2104,14 +2134,14 @@ vdev_open(vdev_t *vd)
* faulted, bail out of the open.
*/
if (!vd->vdev_removed && vd->vdev_faulted) {
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
} else if (vd->vdev_offline) {
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
return (SET_ERROR(ENXIO));
}
@@ -2167,7 +2197,7 @@ vdev_open(vdev_t *vd)
* the vdev is accessible. If we're faulted, bail.
*/
if (vd->vdev_faulted) {
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
@@ -2176,7 +2206,7 @@ vdev_open(vdev_t *vd)
}
if (vd->vdev_degraded) {
- ASSERT(vd->vdev_children == 0);
+ ASSERT0(vd->vdev_children);
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_ERR_EXCEEDED);
} else {
@@ -3449,7 +3479,9 @@ vdev_dtl_load(vdev_t *vd)
return (error);
ASSERT(vd->vdev_dtl_sm != NULL);
- rt = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
+ rt = zfs_range_tree_create_flags(
+ NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
+ ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "vdev_dtl_load:rt"));
error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC);
if (error == 0) {
mutex_enter(&vd->vdev_dtl_lock);
@@ -3597,7 +3629,8 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg)
ASSERT(vd->vdev_dtl_sm != NULL);
}
- rtsync = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 0, 0);
+ rtsync = zfs_range_tree_create_flags(NULL, ZFS_RANGE_SEG64, NULL, 0, 0,
+ ZFS_RT_F_DYN_NAME, vdev_rt_name(vd, "rtsync"));
mutex_enter(&vd->vdev_dtl_lock);
zfs_range_tree_walk(rt, zfs_range_tree_add, rtsync);
@@ -3912,7 +3945,7 @@ vdev_load(vdev_t *vd)
if (error == 0 && checkpoint_sm_obj != 0) {
objset_t *mos = spa_meta_objset(vd->vdev_spa);
ASSERT(vd->vdev_asize != 0);
- ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
+ ASSERT0P(vd->vdev_checkpoint_sm);
error = space_map_open(&vd->vdev_checkpoint_sm,
mos, checkpoint_sm_obj, 0, vd->vdev_asize,
@@ -3960,7 +3993,7 @@ vdev_load(vdev_t *vd)
if (error == 0 && obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
ASSERT(vd->vdev_asize != 0);
- ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
+ ASSERT0P(vd->vdev_obsolete_sm);
if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
obsolete_sm_object, 0, vd->vdev_asize, 0))) {
@@ -4488,7 +4521,7 @@ top:
/*
* Prevent any future allocations.
*/
- ASSERT3P(tvd->vdev_log_mg, ==, NULL);
+ ASSERT0P(tvd->vdev_log_mg);
metaslab_group_passivate(mg);
(void) spa_vdev_state_exit(spa, vd, 0);
@@ -5161,7 +5194,7 @@ vdev_stat_update(zio_t *zio, uint64_t psize)
int64_t
vdev_deflated_space(vdev_t *vd, int64_t space)
{
- ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
+ ASSERT0((space & (SPA_MINBLOCKSIZE-1)));
ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
@@ -5253,8 +5286,8 @@ vdev_config_dirty(vdev_t *vd)
if (nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
- VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
- ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
+ VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config,
+ ZPOOL_CONFIG_SPARES, &aux, &naux));
}
ASSERT(c < naux);
@@ -5642,7 +5675,7 @@ vdev_expand(vdev_t *vd, uint64_t txg)
(vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
- VERIFY(vdev_metaslab_init(vd, txg) == 0);
+ VERIFY0(vdev_metaslab_init(vd, txg));
vdev_config_dirty(vd);
}
}