aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2023-10-12 06:47:45 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2023-10-13 23:52:39 +0000
commita6a4a57ca3e8fe5ca539c31cb1fab7acf8a26dd7 (patch)
treef7da58b0ab5820ed260f6f1a50079400623b4220
parent46d5611b8796328b4e326c319cd896f6cd1600f4 (diff)
downloadsrc-a6a4a57ca3e8fe5ca539c31cb1fab7acf8a26dd7.tar.gz
src-a6a4a57ca3e8fe5ca539c31cb1fab7acf8a26dd7.zip
vfs: count recycles by vnlru and by vn_alloc separately
Sponsored by: Rubicon Communications, LLC ("Netgate") (cherry picked from commit a92fc3122d2becfbf5a627af6eda5cedfac57c31)
-rw-r--r--sys/kern/vfs_subr.c52
1 files changed, 36 insertions, 16 deletions
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 2be83da5d73f..530afbe05b3e 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -109,7 +109,7 @@ static void delmntque(struct vnode *vp);
static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
int slpflag, int slptimeo);
static void syncer_shutdown(void *arg, int howto);
-static int vtryrecycle(struct vnode *vp);
+static int vtryrecycle(struct vnode *vp, bool isvnlru);
static void v_init_counters(struct vnode *);
static void vn_seqc_init(struct vnode *);
static void vn_seqc_write_end_free(struct vnode *vp);
@@ -210,6 +210,11 @@ SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_cou
SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count,
"Number of free vnodes recycled to meet vnode cache targets");
+static counter_u64_t direct_recycles_free_count;
+SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, direct_recycles_free, CTLFLAG_RD,
+ &direct_recycles_free_count,
+ "Number of free vnodes recycled by vn_alloc callers to meet vnode cache targets");
+
static counter_u64_t vnode_skipped_requeues;
SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, skipped_requeues, CTLFLAG_RD, &vnode_skipped_requeues,
"Number of times LRU requeue was skipped due to lock contention");
@@ -765,6 +770,7 @@ vntblinit(void *dummy __unused)
vnodes_created = counter_u64_alloc(M_WAITOK);
recycles_count = counter_u64_alloc(M_WAITOK);
recycles_free_count = counter_u64_alloc(M_WAITOK);
+ direct_recycles_free_count = counter_u64_alloc(M_WAITOK);
vnode_skipped_requeues = counter_u64_alloc(M_WAITOK);
/*
@@ -1307,7 +1313,7 @@ SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, max_free_per_call, CTLFLAG_RW,
* Attempt to reduce the free list by the requested amount.
*/
static int
-vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp)
+vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp, bool isvnlru)
{
struct vnode *vp;
struct mount *mp;
@@ -1393,7 +1399,7 @@ vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp)
*
* Check nullfs for one example (null_getwritemount).
*/
- vtryrecycle(vp);
+ vtryrecycle(vp, isvnlru);
count--;
if (count == 0) {
break;
@@ -1409,22 +1415,33 @@ vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp)
* XXX: returns without vnode_list_mtx locked!
*/
static int
-vnlru_free_locked(int count)
+vnlru_free_locked_direct(int count)
{
int ret;
mtx_assert(&vnode_list_mtx, MA_OWNED);
- ret = vnlru_free_impl(count, NULL, vnode_list_free_marker);
+ ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, false);
mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
return (ret);
}
static int
-vnlru_free(int count)
+vnlru_free_locked_vnlru(int count)
+{
+ int ret;
+
+ mtx_assert(&vnode_list_mtx, MA_OWNED);
+ ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, true);
+ mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
+ return (ret);
+}
+
+static int
+vnlru_free_vnlru(int count)
{
mtx_lock(&vnode_list_mtx);
- return (vnlru_free_locked(count));
+ return (vnlru_free_locked_vnlru(count));
}
void
@@ -1435,7 +1452,7 @@ vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp)
MPASS(mvp != NULL);
VNPASS(mvp->v_type == VMARKER, mvp);
mtx_lock(&vnode_list_mtx);
- vnlru_free_impl(count, mnt_op, mvp);
+ vnlru_free_impl(count, mnt_op, mvp, true);
mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
}
@@ -1451,7 +1468,7 @@ vnlru_free(int count, struct vfsops *mnt_op)
return;
mtx_lock(&vnode_list_mtx);
mvp = vnode_list_free_marker;
- if (vnlru_free_impl(count, mnt_op, mvp) == 0) {
+ if (vnlru_free_impl(count, mnt_op, mvp, true) == 0) {
/*
* It is possible the marker was moved over eligible vnodes by
* callers which filtered by different ops. If so, start from
@@ -1461,7 +1478,7 @@ vnlru_free(int count, struct vfsops *mnt_op)
TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist);
}
- vnlru_free_impl(count, mnt_op, mvp);
+ vnlru_free_impl(count, mnt_op, mvp, true);
}
mtx_unlock(&vnode_list_mtx);
}
@@ -1719,7 +1736,7 @@ vnlru_proc_light(void)
return (false);
if (freecount != 0) {
- vnlru_free(freecount);
+ vnlru_free_vnlru(freecount);
}
mtx_lock(&vnode_list_mtx);
@@ -1765,7 +1782,7 @@ vnlru_proc(void)
* try to reduce it by discarding from the free list.
*/
if (rnumvnodes > desiredvnodes + 10) {
- vnlru_free_locked(rnumvnodes - desiredvnodes);
+ vnlru_free_locked_vnlru(rnumvnodes - desiredvnodes);
mtx_lock(&vnode_list_mtx);
rnumvnodes = atomic_load_long(&numvnodes);
}
@@ -1865,7 +1882,7 @@ SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start,
* through vgone().
*/
static int
-vtryrecycle(struct vnode *vp)
+vtryrecycle(struct vnode *vp, bool isvnlru)
{
struct mount *vnmp;
@@ -1911,7 +1928,10 @@ vtryrecycle(struct vnode *vp)
return (EBUSY);
}
if (!VN_IS_DOOMED(vp)) {
- counter_u64_add(recycles_free_count, 1);
+ if (isvnlru)
+ counter_u64_add(recycles_free_count, 1);
+ else
+ counter_u64_add(direct_recycles_free_count, 1);
vgonel(vp);
}
VOP_UNLOCK(vp);
@@ -1984,7 +2004,7 @@ vn_alloc_hard(struct mount *mp, u_long rnumvnodes, bool bumped)
* should be chosen so that we never wait or even reclaim from
* the free list to below its target minimum.
*/
- if (vnlru_free_locked(1) > 0)
+ if (vnlru_free_locked_direct(1) > 0)
goto alloc;
mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) {
@@ -2001,7 +2021,7 @@ vn_alloc_hard(struct mount *mp, u_long rnumvnodes, bool bumped)
msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz);
if (atomic_load_long(&numvnodes) + 1 > desiredvnodes &&
vnlru_read_freevnodes() > 1)
- vnlru_free_locked(1);
+ vnlru_free_locked_direct(1);
else
mtx_unlock(&vnode_list_mtx);
}