aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2023-10-11 09:42:12 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2023-10-13 23:41:47 +0000
commitcc2d62daa33935489235a4ef253ae61676baf7c4 (patch)
tree8d8146a785c7daab26133b9a6a25eac8afd4e1a6
parente3b2372a76c18551529e1aa43d185f602dccdbf1 (diff)
downloadsrc-cc2d62daa33935489235a4ef253ae61676baf7c4.tar.gz
src-cc2d62daa33935489235a4ef253ae61676baf7c4.zip
vfs: further speed up continuous free vnode recycle
The primary bottleneck *was* vnode_list mtx, which got artificially worsened due to the following work done with the lock held: 1. the global heavily modified numvnodes counter was being read, inducing massive cache line ping pong 2. should the value fit limits (which it normally did) there would be an avoidable write to vn_alloc_cyclecount, which is being read outside of the lock, once more inducing traffic But if vn_alloc_cyclecount is 0, which it normally is even when facing vnode shortage, there is no need to check numvnodes nor set it to 0 again. Another problem was numvnodes adjustment (which made the locked read much worse). While it fundamentally does not scale as it is not distributed in any fashion, it was avoidably slow. When bumping over the vnode limit, it would be modified with atomics 3 times: inc + dec to backpedal in vn_alloc, then final inc in vn_alloc_hard. One can let some slop persist over calls to vnlru_free instead. In principle each thread in the system could get here and bump it, so a limit is put in place to keep things sane. Bench setup same as in prior commits: zfs, 20 separate directory trees each with 1 million files in total and 20 find(1) processes stating them in parallel (one per each tree). Total run time (in seconds) goes down as follows: vnode limit 8388608 400000 before ~20 ~35 after ~8 ~15 With this in place the primary bottleneck is now ZFS. Sponsored by: Rubicon Communications, LLC ("Netgate") (cherry picked from commit 054f45e026d898bdc8f974d33dd748937dee1d6b)
-rw-r--r--sys/kern/vfs_subr.c46
1 files changed, 31 insertions, 15 deletions
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 52c14b4a234a..99933a6b9c88 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -1490,6 +1490,8 @@ static u_long vnlruproc_kicks;
SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, kicks, CTLFLAG_RD, &vnlruproc_kicks, 0,
"Number of times vnlru got woken up due to vnode shortage");
+#define VNLRU_COUNT_SLOP 100
+
/*
* The main freevnodes counter is only updated when a counter local to CPU
* diverges from 0 by more than VNLRU_FREEVNODES_SLOP. CPUs are conditionally
@@ -1632,7 +1634,8 @@ vnlru_proc_sleep(void)
*
* On a kernel with only stock machinery this needs anywhere between 60 and 120
* seconds to execute (time varies *wildly* between runs). With the workaround
- * it consistently stays around 20 seconds.
+ * it consistently stays around 20 seconds [it got further down with later
+ * changes].
*
* That is to say the entire thing needs a fundamental redesign (most notably
* to accommodate faster recycling), the above only tries to get it ouf the way.
@@ -1661,7 +1664,7 @@ vnlru_proc_light_pick(void)
* the limit for a short period, don't bother doing anything in
* that case.
*/
- if (rnumvnodes > desiredvnodes + 10) {
+ if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP + 10) {
if (rnumvnodes - rfreevnodes >= desiredvnodes ||
rfreevnodes <= wantfreevnodes) {
return (-1);
@@ -1795,7 +1798,8 @@ vnlru_proc(void)
* limit (see vn_alloc_hard), no need to call uma_reclaim if
* this happens.
*/
- if (onumvnodes + 1000 > desiredvnodes && numvnodes <= desiredvnodes)
+ if (onumvnodes + VNLRU_COUNT_SLOP + 1000 > desiredvnodes &&
+ numvnodes <= desiredvnodes)
uma_reclaim(UMA_RECLAIM_DRAIN);
if (done == 0) {
if (force == 0 || force == 1) {
@@ -1912,19 +1916,27 @@ SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, alloc_sleeps, CTLFLAG_RD, &vn_alloc_sle
"Number of times vnode allocation blocked waiting on vnlru");
static struct vnode * __noinline
-vn_alloc_hard(struct mount *mp)
+vn_alloc_hard(struct mount *mp, u_long rnumvnodes, bool bumped)
{
- u_long rnumvnodes, rfreevnodes;
+ u_long rfreevnodes;
- mtx_lock(&vnode_list_mtx);
- rnumvnodes = atomic_load_long(&numvnodes);
- if (rnumvnodes + 1 < desiredvnodes) {
- vn_alloc_cyclecount = 0;
- mtx_unlock(&vnode_list_mtx);
- goto alloc;
+ if (bumped) {
+ if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP) {
+ atomic_subtract_long(&numvnodes, 1);
+ bumped = false;
+ }
}
+ mtx_lock(&vnode_list_mtx);
+
if (vn_alloc_cyclecount != 0) {
+ rnumvnodes = atomic_load_long(&numvnodes);
+ if (rnumvnodes + 1 < desiredvnodes) {
+ vn_alloc_cyclecount = 0;
+ mtx_unlock(&vnode_list_mtx);
+ goto alloc;
+ }
+
rfreevnodes = vnlru_read_freevnodes();
if (rfreevnodes < wantfreevnodes) {
if (vn_alloc_cyclecount++ >= rfreevnodes) {
@@ -1953,6 +1965,10 @@ vn_alloc_hard(struct mount *mp)
/*
* Wait for space for a new vnode.
*/
+ if (bumped) {
+ atomic_subtract_long(&numvnodes, 1);
+ bumped = false;
+ }
mtx_lock(&vnode_list_mtx);
vnlru_kick_locked();
vn_alloc_sleeps++;
@@ -1965,7 +1981,8 @@ vn_alloc_hard(struct mount *mp)
}
alloc:
mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
- atomic_add_long(&numvnodes, 1);
+ if (!bumped)
+ atomic_add_long(&numvnodes, 1);
vnlru_kick_cond();
return (uma_zalloc_smr(vnode_zone, M_WAITOK));
}
@@ -1976,11 +1993,10 @@ vn_alloc(struct mount *mp)
u_long rnumvnodes;
if (__predict_false(vn_alloc_cyclecount != 0))
- return (vn_alloc_hard(mp));
+ return (vn_alloc_hard(mp, 0, false));
rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1;
if (__predict_false(vnlru_under(rnumvnodes, vlowat))) {
- atomic_subtract_long(&numvnodes, 1);
- return (vn_alloc_hard(mp));
+ return (vn_alloc_hard(mp, rnumvnodes, true));
}
return (uma_zalloc_smr(vnode_zone, M_WAITOK));