aboutsummaryrefslogtreecommitdiff
path: root/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c')
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c261
1 files changed, 85 insertions, 176 deletions
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c
index 2ee62504c081..f830076f767f 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_object.c
@@ -32,14 +32,6 @@
#include <sys/zfeature.h>
#include <sys/dsl_dataset.h>
-/*
- * Each of the concurrent object allocators will grab
- * 2^dmu_object_alloc_chunk_shift dnode slots at a time. The default is to
- * grab 128 slots, which is 4 blocks worth. This was experimentally
- * determined to be the lowest value that eliminates the measurable effect
- * of lock contention from this code path.
- */
-int dmu_object_alloc_chunk_shift = 7;
static uint64_t
dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize,
@@ -52,10 +44,6 @@ dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize,
dnode_t *dn = NULL;
int dn_slots = dnodesize >> DNODE_SHIFT;
boolean_t restarted = B_FALSE;
- uint64_t *cpuobj = &os->os_obj_next_percpu[CPU_SEQID %
- os->os_obj_next_percpu_len];
- int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
- int error;
if (dn_slots == 0) {
dn_slots = DNODE_MIN_SLOTS;
@@ -63,99 +51,55 @@ dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize,
ASSERT3S(dn_slots, >=, DNODE_MIN_SLOTS);
ASSERT3S(dn_slots, <=, DNODE_MAX_SLOTS);
}
-
- /*
- * The "chunk" of dnodes that is assigned to a CPU-specific
- * allocator needs to be at least one block's worth, to avoid
- * lock contention on the dbuf. It can be at most one L1 block's
- * worth, so that the "rescan after polishing off a L1's worth"
- * logic below will be sure to kick in.
- */
- if (dnodes_per_chunk < DNODES_PER_BLOCK)
- dnodes_per_chunk = DNODES_PER_BLOCK;
- if (dnodes_per_chunk > L1_dnode_count)
- dnodes_per_chunk = L1_dnode_count;
-
- object = *cpuobj;
-
+
+ mutex_enter(&os->os_obj_lock);
for (;;) {
+ object = os->os_obj_next;
/*
- * If we finished a chunk of dnodes, get a new one from
- * the global allocator.
+ * Each time we polish off a L1 bp worth of dnodes (2^12
+ * objects), move to another L1 bp that's still
+ * reasonably sparse (at most 1/4 full). Look from the
+ * beginning at most once per txg. If we still can't
+ * allocate from that L1 block, search for an empty L0
+ * block, which will quickly skip to the end of the
+ * metadnode if the no nearby L0 blocks are empty. This
+ * fallback avoids a pathology where full dnode blocks
+ * containing large dnodes appear sparse because they
+ * have a low blk_fill, leading to many failed
+ * allocation attempts. In the long term a better
+ * mechanism to search for sparse metadnode regions,
+ * such as spacemaps, could be implemented.
+ *
+ * os_scan_dnodes is set during txg sync if enough objects
+ * have been freed since the previous rescan to justify
+ * backfilling again.
+ *
+ * Note that dmu_traverse depends on the behavior that we use
+ * multiple blocks of the dnode object before going back to
+ * reuse objects. Any change to this algorithm should preserve
+ * that property or find another solution to the issues
+ * described in traverse_visitbp.
*/
- if ((P2PHASE(object, dnodes_per_chunk) == 0) ||
- (P2PHASE(object + dn_slots - 1, dnodes_per_chunk) <
- dn_slots)) {
- DNODE_STAT_BUMP(dnode_alloc_next_chunk);
- mutex_enter(&os->os_obj_lock);
- ASSERT0(P2PHASE(os->os_obj_next_chunk,
- dnodes_per_chunk));
- object = os->os_obj_next_chunk;
-
- /*
- * Each time we polish off a L1 bp worth of dnodes
- * (2^12 objects), move to another L1 bp that's
- * still reasonably sparse (at most 1/4 full). Look
- * from the beginning at most once per txg. If we
- * still can't allocate from that L1 block, search
- * for an empty L0 block, which will quickly skip
- * to the end of the metadnode if the no nearby L0
- * blocks are empty. This fallback avoids a
- * pathology where full dnode blocks containing
- * large dnodes appear sparse because they have a
- * low blk_fill, leading to many failed allocation
- * attempts. In the long term a better mechanism to
- * search for sparse metadnode regions, such as
- * spacemaps, could be implemented.
- *
- * os_scan_dnodes is set during txg sync if enough
- * objects have been freed since the previous
- * rescan to justify backfilling again.
- *
- * Note that dmu_traverse depends on the behavior
- * that we use multiple blocks of the dnode object
- * before going back to reuse objects. Any change
- * to this algorithm should preserve that property
- * or find another solution to the issues described
- * in traverse_visitbp.
- */
- if (P2PHASE(object, L1_dnode_count) == 0) {
- uint64_t offset;
- uint64_t blkfill;
- int minlvl;
- if (os->os_rescan_dnodes) {
- offset = 0;
- os->os_rescan_dnodes = B_FALSE;
- } else {
- offset = object << DNODE_SHIFT;
- }
- blkfill = restarted ? 1 : DNODES_PER_BLOCK >> 2;
- minlvl = restarted ? 1 : 2;
- restarted = B_TRUE;
- error = dnode_next_offset(DMU_META_DNODE(os),
- DNODE_FIND_HOLE, &offset, minlvl,
- blkfill, 0);
- if (error == 0) {
- object = offset >> DNODE_SHIFT;
- }
+ if (P2PHASE(object, L1_dnode_count) == 0) {
+ uint64_t offset;
+ uint64_t blkfill;
+ int minlvl;
+ int error;
+ if (os->os_rescan_dnodes) {
+ offset = 0;
+ os->os_rescan_dnodes = B_FALSE;
+ } else {
+ offset = object << DNODE_SHIFT;
}
- /*
- * Note: if "restarted", we may find a L0 that
- * is not suitably aligned.
- */
- os->os_obj_next_chunk =
- P2ALIGN(object, dnodes_per_chunk) +
- dnodes_per_chunk;
- (void) atomic_swap_64(cpuobj, object);
- mutex_exit(&os->os_obj_lock);
+ blkfill = restarted ? 1 : DNODES_PER_BLOCK >> 2;
+ minlvl = restarted ? 1 : 2;
+ restarted = B_TRUE;
+ error = dnode_next_offset(DMU_META_DNODE(os),
+ DNODE_FIND_HOLE, &offset, minlvl, blkfill, 0);
+ if (error == 0)
+ object = offset >> DNODE_SHIFT;
}
-
- /*
- * The value of (*cpuobj) before adding dn_slots is the object
- * ID assigned to us. The value afterwards is the object ID
- * assigned to whoever wants to do an allocation next.
- */
- object = atomic_add_64_nv(cpuobj, dn_slots) - dn_slots;
+ os->os_obj_next = object + dn_slots;
/*
* XXX We should check for an i/o error here and return
@@ -163,45 +107,37 @@ dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize,
* dmu_tx_assign(), but there is currently no mechanism
* to do so.
*/
- error = dnode_hold_impl(os, object, DNODE_MUST_BE_FREE,
- dn_slots, FTAG, &dn);
- if (error == 0) {
- rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
+ (void) dnode_hold_impl(os, object, DNODE_MUST_BE_FREE, dn_slots,
+ FTAG, &dn);
+ if (dn)
+ break;
+
+ if (dmu_object_next(os, &object, B_TRUE, 0) == 0)
+ os->os_obj_next = object;
+ else
/*
- * Another thread could have allocated it; check
- * again now that we have the struct lock.
+ * Skip to next known valid starting point for a dnode.
*/
- if (dn->dn_type == DMU_OT_NONE) {
- dnode_allocate(dn, ot, blocksize, 0,
- bonustype, bonuslen, dn_slots, tx);
- rw_exit(&dn->dn_struct_rwlock);
- dmu_tx_add_new_object(tx, dn);
- dnode_rele(dn, FTAG);
- return (object);
- }
- rw_exit(&dn->dn_struct_rwlock);
- dnode_rele(dn, FTAG);
- DNODE_STAT_BUMP(dnode_alloc_race);
- }
-
- /*
- * Skip to next known valid starting point on error. This
- * is the start of the next block of dnodes.
- */
- if (dmu_object_next(os, &object, B_TRUE, 0) != 0) {
- object = P2ROUNDUP(object + 1, DNODES_PER_BLOCK);
- DNODE_STAT_BUMP(dnode_alloc_next_block);
- }
- (void) atomic_swap_64(cpuobj, object);
+ os->os_obj_next = P2ROUNDUP(object + 1,
+ DNODES_PER_BLOCK);
}
+
+ dnode_allocate(dn, ot, blocksize, indirect_blockshift,
+ bonustype, bonuslen, dn_slots, tx);
+ mutex_exit(&os->os_obj_lock);
+
+ dmu_tx_add_new_object(tx, dn);
+ dnode_rele(dn, FTAG);
+
+ return (object);
}
uint64_t
dmu_object_alloc(objset_t *os, dmu_object_type_t ot, int blocksize,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
{
- return (dmu_object_alloc_impl(os, ot, blocksize, 0, bonustype,
- bonuslen, 0, tx));
+ return dmu_object_alloc_impl(os, ot, blocksize, 0, bonustype,
+ bonuslen, 0, tx);
}
uint64_t
@@ -209,8 +145,8 @@ dmu_object_alloc_ibs(objset_t *os, dmu_object_type_t ot, int blocksize,
int indirect_blockshift, dmu_object_type_t bonustype, int bonuslen,
dmu_tx_t *tx)
{
- return (dmu_object_alloc_impl(os, ot, blocksize, indirect_blockshift,
- bonustype, bonuslen, 0, tx));
+ return dmu_object_alloc_impl(os, ot, blocksize, indirect_blockshift,
+ bonustype, bonuslen, 0, tx);
}
uint64_t
@@ -242,7 +178,7 @@ dmu_object_claim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
dn_slots = DNODE_MIN_SLOTS;
ASSERT3S(dn_slots, >=, DNODE_MIN_SLOTS);
ASSERT3S(dn_slots, <=, DNODE_MAX_SLOTS);
-
+
if (object == DMU_META_DNODE_OBJECT && !dmu_tx_private_ok(tx))
return (SET_ERROR(EBADF));
@@ -275,9 +211,6 @@ dmu_object_reclaim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
int dn_slots = dnodesize >> DNODE_SHIFT;
int err;
- if (dn_slots == 0)
- dn_slots = DNODE_MIN_SLOTS;
-
if (object == DMU_META_DNODE_OBJECT)
return (SET_ERROR(EBADF));
@@ -327,52 +260,28 @@ int
dmu_object_next(objset_t *os, uint64_t *objectp, boolean_t hole, uint64_t txg)
{
uint64_t offset;
- uint64_t start_obj;
+ dmu_object_info_t doi;
struct dsl_dataset *ds = os->os_dsl_dataset;
+ int dnodesize;
int error;
- if (*objectp == 0) {
- start_obj = 1;
- } else if (ds && ds->ds_feature_inuse[SPA_FEATURE_LARGE_DNODE]) {
- uint64_t i = *objectp + 1;
- uint64_t last_obj = *objectp | (DNODES_PER_BLOCK - 1);
- dmu_object_info_t doi;
-
- /*
- * Scan through the remaining meta dnode block. The contents
- * of each slot in the block are known so it can be quickly
- * checked. If the block is exhausted without a match then
- * hand off to dnode_next_offset() for further scanning.
- */
- while (i <= last_obj) {
- error = dmu_object_info(os, i, &doi);
- if (error == ENOENT) {
- if (hole) {
- *objectp = i;
- return (0);
- } else {
- i++;
- }
- } else if (error == EEXIST) {
- i++;
- } else if (error == 0) {
- if (hole) {
- i += doi.doi_dnodesize >> DNODE_SHIFT;
- } else {
- *objectp = i;
- return (0);
- }
- } else {
- return (error);
- }
- }
-
- start_obj = i;
+ /*
+ * Avoid expensive dnode hold if this dataset doesn't use large dnodes.
+ */
+ if (ds && ds->ds_feature_inuse[SPA_FEATURE_LARGE_DNODE]) {
+ error = dmu_object_info(os, *objectp, &doi);
+ if (error && !(error == EINVAL && *objectp == 0))
+ return (SET_ERROR(error));
+ else
+ dnodesize = doi.doi_dnodesize;
} else {
- start_obj = *objectp + 1;
+ dnodesize = DNODE_MIN_SIZE;
}
- offset = start_obj << DNODE_SHIFT;
+ if (*objectp == 0)
+ offset = 1 << DNODE_SHIFT;
+ else
+ offset = (*objectp << DNODE_SHIFT) + dnodesize;
error = dnode_next_offset(DMU_META_DNODE(os),
(hole ? DNODE_FIND_HOLE : 0), &offset, 0, DNODES_PER_BLOCK, txg);