aboutsummaryrefslogtreecommitdiff
path: root/sys/contrib/openzfs/module/zfs/spa.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/contrib/openzfs/module/zfs/spa.c')
-rw-r--r--sys/contrib/openzfs/module/zfs/spa.c260
1 files changed, 215 insertions, 45 deletions
diff --git a/sys/contrib/openzfs/module/zfs/spa.c b/sys/contrib/openzfs/module/zfs/spa.c
index 6b52c6cb1f9e..b3bb46da263b 100644
--- a/sys/contrib/openzfs/module/zfs/spa.c
+++ b/sys/contrib/openzfs/module/zfs/spa.c
@@ -100,6 +100,7 @@
#include <sys/vmsystm.h>
#endif /* _KERNEL */
+#include "zfs_crrd.h"
#include "zfs_prop.h"
#include "zfs_comutil.h"
#include <cityhash.h>
@@ -311,6 +312,41 @@ static int zfs_livelist_condense_zthr_cancel = 0;
static int zfs_livelist_condense_new_alloc = 0;
/*
+ * Time variable to decide how often the txg should be added into the
+ * database (in seconds).
+ * The smallest available resolution is in minutes, which means an update occurs
+ * each time we reach `spa_note_txg_time` and the txg has changed. We provide
+ * a 256-slot ring buffer for minute-level resolution. The number is limited by
+ * the size of the structure we use and the maximum amount of bytes we can write
+ * into ZAP. Setting `spa_note_txg_time` to 10 minutes results in approximately
+ * 144 records per day. Given the 256 slots, this provides roughly 1.5 days of
+ * high-resolution data.
+ *
+ * The user can decrease `spa_note_txg_time` to increase resolution within
+ * a day, at the cost of retaining fewer days of data. Alternatively, increasing
+ * the interval allows storing data over a longer period, but with lower
+ * frequency.
+ *
+ * This parameter does not affect the daily or monthly databases, as those only
+ * store one record per day and per month, respectively.
+ */
+static uint_t spa_note_txg_time = 10 * 60;
+
+/*
+ * How often flush txg database to a disk (in seconds).
+ * We flush data every time we write to it, making it the most reliable option.
+ * Since this happens every 10 minutes, it shouldn't introduce any noticeable
+ * overhead for the system. In case of failure, we will always have an
+ * up-to-date version of the database.
+ *
+ * The user can adjust the flush interval to a lower value, but it probably
+ * doesn't make sense to flush more often than the database is updated.
+ * The user can also increase the interval if they're concerned about the
+ * performance of writing the entire database to disk.
+ */
+static uint_t spa_flush_txg_time = 10 * 60;
+
+/*
* ==========================================================================
* SPA properties routines
* ==========================================================================
@@ -390,10 +426,10 @@ spa_prop_add_user(nvlist_t *nvl, const char *propname, char *strval,
{
nvlist_t *propval;
- VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
- VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
- VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
- VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
+ VERIFY0(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP));
+ VERIFY0(nvlist_add_uint64(propval, ZPROP_SOURCE, src));
+ VERIFY0(nvlist_add_string(propval, ZPROP_VALUE, strval));
+ VERIFY0(nvlist_add_nvlist(nvl, propname, propval));
nvlist_free(propval);
}
@@ -417,11 +453,15 @@ spa_prop_get_config(spa_t *spa, nvlist_t *nv)
alloc += metaslab_class_get_alloc(spa_special_class(spa));
alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa));
+ alloc += metaslab_class_get_alloc(
+ spa_special_embedded_log_class(spa));
size = metaslab_class_get_space(mc);
size += metaslab_class_get_space(spa_special_class(spa));
size += metaslab_class_get_space(spa_dedup_class(spa));
size += metaslab_class_get_space(spa_embedded_log_class(spa));
+ size += metaslab_class_get_space(
+ spa_special_embedded_log_class(spa));
spa_prop_add_list(nv, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
spa_prop_add_list(nv, ZPOOL_PROP_SIZE, NULL, size, src);
@@ -925,7 +965,7 @@ spa_prop_set(spa_t *spa, nvlist_t *nvp)
uint64_t ver = 0;
if (prop == ZPOOL_PROP_VERSION) {
- VERIFY(nvpair_value_uint64(elem, &ver) == 0);
+ VERIFY0(nvpair_value_uint64(elem, &ver));
} else {
ASSERT(zpool_prop_feature(nvpair_name(elem)));
ver = SPA_VERSION_FEATURES;
@@ -1255,7 +1295,7 @@ spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
if (tqs->stqs_taskq == NULL) {
- ASSERT3U(tqs->stqs_count, ==, 0);
+ ASSERT0(tqs->stqs_count);
return;
}
@@ -1679,6 +1719,8 @@ spa_activate(spa_t *spa, spa_mode_t mode)
"embedded_log", msp, B_TRUE);
spa->spa_special_class = metaslab_class_create(spa, "special",
msp, B_FALSE);
+ spa->spa_special_embedded_log_class = metaslab_class_create(spa,
+ "special_embedded_log", msp, B_TRUE);
spa->spa_dedup_class = metaslab_class_create(spa, "dedup",
msp, B_FALSE);
@@ -1794,9 +1836,9 @@ static void
spa_deactivate(spa_t *spa)
{
ASSERT(spa->spa_sync_on == B_FALSE);
- ASSERT(spa->spa_dsl_pool == NULL);
- ASSERT(spa->spa_root_vdev == NULL);
- ASSERT(spa->spa_async_zio_root == NULL);
+ ASSERT0P(spa->spa_dsl_pool);
+ ASSERT0P(spa->spa_root_vdev);
+ ASSERT0P(spa->spa_async_zio_root);
ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
spa_evicting_os_wait(spa);
@@ -1853,6 +1895,9 @@ spa_deactivate(spa_t *spa)
metaslab_class_destroy(spa->spa_special_class);
spa->spa_special_class = NULL;
+ metaslab_class_destroy(spa->spa_special_embedded_log_class);
+ spa->spa_special_embedded_log_class = NULL;
+
metaslab_class_destroy(spa->spa_dedup_class);
spa->spa_dedup_class = NULL;
@@ -1976,7 +2021,7 @@ spa_unload_log_sm_flush_all(spa_t *spa)
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
- ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
+ ASSERT0(spa->spa_log_flushall_txg);
spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
dmu_tx_commit(tx);
@@ -2031,6 +2076,111 @@ spa_destroy_aux_threads(spa_t *spa)
}
}
+static void
+spa_sync_time_logger(spa_t *spa, uint64_t txg)
+{
+ uint64_t curtime;
+ dmu_tx_t *tx;
+
+ if (!spa_writeable(spa)) {
+ return;
+ }
+ curtime = gethrestime_sec();
+ if (curtime < spa->spa_last_noted_txg_time + spa_note_txg_time) {
+ return;
+ }
+
+ if (txg > spa->spa_last_noted_txg) {
+ spa->spa_last_noted_txg_time = curtime;
+ spa->spa_last_noted_txg = txg;
+
+ mutex_enter(&spa->spa_txg_log_time_lock);
+ dbrrd_add(&spa->spa_txg_log_time, curtime, txg);
+ mutex_exit(&spa->spa_txg_log_time_lock);
+ }
+
+ if (curtime < spa->spa_last_flush_txg_time + spa_flush_txg_time) {
+ return;
+ }
+ spa->spa_last_flush_txg_time = curtime;
+
+ tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
+
+ VERIFY0(zap_update(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_TXG_LOG_TIME_MINUTES, RRD_ENTRY_SIZE, RRD_STRUCT_ELEM,
+ &spa->spa_txg_log_time.dbr_minutes, tx));
+ VERIFY0(zap_update(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_TXG_LOG_TIME_DAYS, RRD_ENTRY_SIZE, RRD_STRUCT_ELEM,
+ &spa->spa_txg_log_time.dbr_days, tx));
+ VERIFY0(zap_update(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_TXG_LOG_TIME_MONTHS, RRD_ENTRY_SIZE, RRD_STRUCT_ELEM,
+ &spa->spa_txg_log_time.dbr_months, tx));
+ dmu_tx_commit(tx);
+}
+
+static void
+spa_unload_sync_time_logger(spa_t *spa)
+{
+ uint64_t txg;
+ dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
+ VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
+
+ txg = dmu_tx_get_txg(tx);
+ spa->spa_last_noted_txg_time = 0;
+ spa->spa_last_flush_txg_time = 0;
+ spa_sync_time_logger(spa, txg);
+
+ dmu_tx_commit(tx);
+}
+
+static void
+spa_load_txg_log_time(spa_t *spa)
+{
+ int error;
+
+ error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_TXG_LOG_TIME_MINUTES, RRD_ENTRY_SIZE, RRD_STRUCT_ELEM,
+ &spa->spa_txg_log_time.dbr_minutes);
+ if (error != 0 && error != ENOENT) {
+ spa_load_note(spa, "unable to load a txg time database with "
+ "minute resolution [error=%d]", error);
+ }
+ error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_TXG_LOG_TIME_DAYS, RRD_ENTRY_SIZE, RRD_STRUCT_ELEM,
+ &spa->spa_txg_log_time.dbr_days);
+ if (error != 0 && error != ENOENT) {
+ spa_load_note(spa, "unable to load a txg time database with "
+ "day resolution [error=%d]", error);
+ }
+ error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_TXG_LOG_TIME_MONTHS, RRD_ENTRY_SIZE, RRD_STRUCT_ELEM,
+ &spa->spa_txg_log_time.dbr_months);
+ if (error != 0 && error != ENOENT) {
+ spa_load_note(spa, "unable to load a txg time database with "
+ "month resolution [error=%d]", error);
+ }
+}
+
+static boolean_t
+spa_should_sync_time_logger_on_unload(spa_t *spa)
+{
+
+ if (!spa_writeable(spa))
+ return (B_FALSE);
+
+ if (!spa->spa_sync_on)
+ return (B_FALSE);
+
+ if (spa_state(spa) != POOL_STATE_EXPORTED)
+ return (B_FALSE);
+
+ if (spa->spa_last_noted_txg == 0)
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+
/*
* Opposite of spa_load().
*/
@@ -2052,6 +2202,9 @@ spa_unload(spa_t *spa)
* we delay the final TXGs beyond what spa_final_txg is set at.
*/
if (spa->spa_final_txg == UINT64_MAX) {
+ if (spa_should_sync_time_logger_on_unload(spa))
+ spa_unload_sync_time_logger(spa);
+
/*
* If the log space map feature is enabled and the pool is
* getting exported (but not destroyed), we want to spend some
@@ -2127,7 +2280,7 @@ spa_unload(spa_t *spa)
*/
if (spa->spa_root_vdev)
vdev_free(spa->spa_root_vdev);
- ASSERT(spa->spa_root_vdev == NULL);
+ ASSERT0P(spa->spa_root_vdev);
/*
* Close the dsl pool.
@@ -2265,8 +2418,8 @@ spa_load_spares(spa_t *spa)
spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++) {
- VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
- VDEV_ALLOC_SPARE) == 0);
+ VERIFY0(spa_config_parse(spa, &vd, spares[i], NULL, 0,
+ VDEV_ALLOC_SPARE));
ASSERT(vd != NULL);
spa->spa_spares.sav_vdevs[i] = vd;
@@ -2393,8 +2546,8 @@ spa_load_l2cache(spa_t *spa)
/*
* Create new vdev
*/
- VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
- VDEV_ALLOC_L2CACHE) == 0);
+ VERIFY0(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
+ VDEV_ALLOC_L2CACHE));
ASSERT(vd != NULL);
newvdevs[i] = vd;
@@ -2646,7 +2799,7 @@ spa_passivate_log(spa_t *spa)
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
- ASSERT3P(tvd->vdev_log_mg, ==, NULL);
+ ASSERT0P(tvd->vdev_log_mg);
metaslab_group_passivate(tvd->vdev_mg);
slog_found = B_TRUE;
}
@@ -2669,7 +2822,7 @@ spa_activate_log(spa_t *spa)
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
- ASSERT3P(tvd->vdev_log_mg, ==, NULL);
+ ASSERT0P(tvd->vdev_log_mg);
metaslab_group_activate(tvd->vdev_mg);
}
}
@@ -2709,8 +2862,8 @@ spa_claim_notify(zio_t *zio)
return;
mutex_enter(&spa->spa_props_lock); /* any mutex will do */
- if (spa->spa_claim_max_txg < BP_GET_LOGICAL_BIRTH(zio->io_bp))
- spa->spa_claim_max_txg = BP_GET_LOGICAL_BIRTH(zio->io_bp);
+ if (spa->spa_claim_max_txg < BP_GET_BIRTH(zio->io_bp))
+ spa->spa_claim_max_txg = BP_GET_BIRTH(zio->io_bp);
mutex_exit(&spa->spa_props_lock);
}
@@ -3106,7 +3259,7 @@ spa_livelist_delete_cb(void *arg, zthr_t *z)
static void
spa_start_livelist_destroy_thread(spa_t *spa)
{
- ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
+ ASSERT0P(spa->spa_livelist_delete_zthr);
spa->spa_livelist_delete_zthr =
zthr_create("z_livelist_destroy",
spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa,
@@ -3122,7 +3275,7 @@ static int
livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
- ASSERT(tx == NULL);
+ ASSERT0P(tx);
livelist_new_arg_t *lna = arg;
if (bp_freed) {
bplist_append(lna->frees, bp);
@@ -3316,7 +3469,7 @@ spa_start_livelist_condensing_thread(spa_t *spa)
spa->spa_to_condense.syncing = B_FALSE;
spa->spa_to_condense.cancelled = B_FALSE;
- ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL);
+ ASSERT0P(spa->spa_livelist_condense_zthr);
spa->spa_livelist_condense_zthr =
zthr_create("z_livelist_condense",
spa_livelist_condense_cb_check,
@@ -3333,7 +3486,7 @@ spa_spawn_aux_threads(spa_t *spa)
spa_start_livelist_destroy_thread(spa);
spa_start_livelist_condensing_thread(spa);
- ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
+ ASSERT0P(spa->spa_checkpoint_discard_zthr);
spa->spa_checkpoint_discard_zthr =
zthr_create("z_checkpoint_discard",
spa_checkpoint_discard_thread_check,
@@ -3768,20 +3921,17 @@ out:
* ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool
*/
if (error == EREMOTEIO) {
- const char *hostname = "<unknown>";
- uint64_t hostid = 0;
-
if (mmp_label) {
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
- hostname = fnvlist_lookup_string(mmp_label,
- ZPOOL_CONFIG_HOSTNAME);
+ const char *hostname = fnvlist_lookup_string(
+ mmp_label, ZPOOL_CONFIG_HOSTNAME);
fnvlist_add_string(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
}
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
- hostid = fnvlist_lookup_uint64(mmp_label,
- ZPOOL_CONFIG_HOSTID);
+ uint64_t hostid = fnvlist_lookup_uint64(
+ mmp_label, ZPOOL_CONFIG_HOSTID);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTID, hostid);
}
@@ -3941,11 +4091,11 @@ spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
nvlist_free(spa->spa_load_info);
spa->spa_load_info = fnvlist_alloc();
- ASSERT(spa->spa_comment == NULL);
+ ASSERT0P(spa->spa_comment);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
spa->spa_comment = spa_strdup(comment);
- ASSERT(spa->spa_compatibility == NULL);
+ ASSERT0P(spa->spa_compatibility);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY,
&compatibility) == 0)
spa->spa_compatibility = spa_strdup(compatibility);
@@ -4711,6 +4861,9 @@ spa_ld_get_props(spa_t *spa)
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+ /* Load time log */
+ spa_load_txg_log_time(spa);
+
/*
* Load the persistent error log. If we have an older pool, this will
* not be present.
@@ -5760,7 +5913,7 @@ spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
nvlist_free(config);
if (state == SPA_LOAD_RECOVER) {
- ASSERT3P(loadinfo, ==, NULL);
+ ASSERT0P(loadinfo);
spa_import_progress_remove(spa_guid(spa));
return (rewind_error);
} else {
@@ -5899,7 +6052,7 @@ spa_open_common(const char *pool, spa_t **spapp, const void *tag,
}
if (firstopen)
- zvol_create_minors_recursive(spa_name(spa));
+ zvol_create_minors(spa_name(spa));
*spapp = spa;
@@ -6877,7 +7030,7 @@ spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
mutex_exit(&spa_namespace_lock);
- zvol_create_minors_recursive(pool);
+ zvol_create_minors(pool);
spa_import_os(spa);
@@ -7134,6 +7287,9 @@ spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
spa_config_exit(spa, SCL_ALL, FTAG);
}
+ if (spa_should_sync_time_logger_on_unload(spa))
+ spa_unload_sync_time_logger(spa);
+
/*
* If the log space map feature is enabled and the pool is
* getting exported (but not destroyed), we want to spend some
@@ -8935,7 +9091,7 @@ spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
int
spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
{
- ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
+ ASSERT0(spa_config_held(spa, SCL_ALL, RW_WRITER));
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
@@ -8946,7 +9102,7 @@ spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
int
spa_scan_stop(spa_t *spa)
{
- ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
+ ASSERT0(spa_config_held(spa, SCL_ALL, RW_WRITER));
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
@@ -8963,7 +9119,7 @@ int
spa_scan_range(spa_t *spa, pool_scan_func_t func, uint64_t txgstart,
uint64_t txgend)
{
- ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
+ ASSERT0(spa_config_held(spa, SCL_ALL, RW_WRITER));
if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
return (SET_ERROR(ENOTSUP));
@@ -9092,6 +9248,8 @@ spa_async_thread(void *arg)
old_space += metaslab_class_get_space(spa_dedup_class(spa));
old_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
+ old_space += metaslab_class_get_space(
+ spa_special_embedded_log_class(spa));
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
@@ -9100,6 +9258,8 @@ spa_async_thread(void *arg)
new_space += metaslab_class_get_space(spa_dedup_class(spa));
new_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
+ new_space += metaslab_class_get_space(
+ spa_special_embedded_log_class(spa));
mutex_exit(&spa_namespace_lock);
/*
@@ -9388,7 +9548,7 @@ spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
{
zio_t *zio = zio_root(spa, NULL, NULL, 0);
bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
- VERIFY(zio_wait(zio) == 0);
+ VERIFY0(zio_wait(zio));
}
/*
@@ -9427,7 +9587,7 @@ spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
size_t nvsize = 0;
dmu_buf_t *db;
- VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
+ VERIFY0(nvlist_size(nv, &nvsize, NV_ENCODE_XDR));
/*
* Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
@@ -9437,15 +9597,15 @@ spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
packed = vmem_alloc(bufsize, KM_SLEEP);
- VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
- KM_SLEEP) == 0);
+ VERIFY0(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
+ KM_SLEEP));
memset(packed + nvsize, 0, bufsize - nvsize);
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
vmem_free(packed, bufsize);
- VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
+ VERIFY0(dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
dmu_buf_will_dirty(db, tx);
*(uint64_t *)db->db_data = nvsize;
dmu_buf_rele(db, FTAG);
@@ -10180,6 +10340,8 @@ spa_sync(spa_t *spa, uint64_t txg)
*/
brt_pending_apply(spa, txg);
+ spa_sync_time_logger(spa, txg);
+
/*
* Lock out configuration changes.
*/
@@ -10222,6 +10384,7 @@ spa_sync(spa_t *spa, uint64_t txg)
dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
spa->spa_sync_starttime = gethrtime();
+
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
@@ -10309,7 +10472,7 @@ spa_sync(spa_t *spa, uint64_t txg)
metaslab_class_evict_old(spa->spa_normal_class, txg);
metaslab_class_evict_old(spa->spa_log_class, txg);
- /* spa_embedded_log_class has only one metaslab per vdev. */
+ /* Embedded log classes have only one metaslab per vdev. */
metaslab_class_evict_old(spa->spa_special_class, txg);
metaslab_class_evict_old(spa->spa_dedup_class, txg);
@@ -10378,7 +10541,7 @@ spa_sync_tq_create(spa_t *spa, const char *name)
{
kthread_t **kthreads;
- ASSERT(spa->spa_sync_tq == NULL);
+ ASSERT0P(spa->spa_sync_tq);
ASSERT3S(spa->spa_alloc_count, <=, boot_ncpus);
/*
@@ -11095,6 +11258,13 @@ ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT,
"Whether extra ALLOC blkptrs were added to a livelist entry while it "
"was being condensed");
+ZFS_MODULE_PARAM(zfs_spa, spa_, note_txg_time, UINT, ZMOD_RW,
+ "How frequently TXG timestamps are stored internally (in seconds)");
+
+ZFS_MODULE_PARAM(zfs_spa, spa_, flush_txg_time, UINT, ZMOD_RW,
+ "How frequently the TXG timestamps database should be flushed "
+ "to disk (in seconds)");
+
#ifdef _KERNEL
ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_read,
spa_taskq_read_param_set, spa_taskq_read_param_get, ZMOD_RW,