aboutsummaryrefslogtreecommitdiff
path: root/sys/sys/refcount.h
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2020-02-28 16:05:18 +0000
committerMark Johnston <markj@FreeBSD.org>2020-02-28 16:05:18 +0000
commitc99d0c5801ce22a9976e491d04ce7e1b8996dfdd (patch)
treea7e90d5c78ec00f8619edaaeabf706ab037b1c9c /sys/sys/refcount.h
parentf37cc7137dc832aec317f0ba0bccde9436b5d9fb (diff)
downloadsrc-c99d0c5801ce22a9976e491d04ce7e1b8996dfdd.tar.gz
src-c99d0c5801ce22a9976e491d04ce7e1b8996dfdd.zip
Add a blocking counter KPI.
refcount(9) was recently extended to support waiting on a refcount to drop to zero, as this was needed for a lockless VM object paging-in-progress counter. However, this adds overhead to all uses of refcount(9) and doesn't really match traditional refcounting semantics: once a counter has dropped to zero, the protected object may be freed at any point and it is not safe to dereference the counter. This change removes that extension and instead adds a new set of KPIs, blockcount_*, for use by VM object PIP and busy. Reviewed by: jeff, kib, mjg Tested by: pho Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D23723
Notes
Notes: svn path=/head/; revision=358432
Diffstat (limited to 'sys/sys/refcount.h')
-rw-r--r--sys/sys/refcount.h107
1 files changed, 44 insertions, 63 deletions
diff --git a/sys/sys/refcount.h b/sys/sys/refcount.h
index a3becb5558ab..28f0b20ee648 100644
--- a/sys/sys/refcount.h
+++ b/sys/sys/refcount.h
@@ -34,18 +34,14 @@
#ifdef _KERNEL
#include <sys/systm.h>
+#include <sys/_blockcount.h>
#else
#include <stdbool.h>
#define KASSERT(exp, msg) /* */
#endif
-#define REFCOUNT_WAITER (1U << 31) /* Refcount has waiter. */
-#define REFCOUNT_SATURATION_VALUE (3U << 29)
-
-#define REFCOUNT_SATURATED(val) (((val) & (1U << 30)) != 0)
-#define REFCOUNT_COUNT(x) ((x) & ~REFCOUNT_WAITER)
-
-bool refcount_release_last(volatile u_int *count, u_int n, u_int old);
+#define REFCOUNT_SATURATED(val) (((val) & (1U << 31)) != 0)
+#define REFCOUNT_SATURATION_VALUE (3U << 30)
/*
* Attempt to handle reference count overflow and underflow. Force the counter
@@ -111,56 +107,6 @@ refcount_acquire_checked(volatile u_int *count)
}
}
-static __inline bool
-refcount_releasen(volatile u_int *count, u_int n)
-{
- u_int old;
-
- KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
- ("refcount_releasen: n=%u too large", n));
-
- /*
- * Paired with acquire fence in refcount_release_last.
- */
- atomic_thread_fence_rel();
- old = atomic_fetchadd_int(count, -n);
- if (__predict_false(n >= REFCOUNT_COUNT(old) ||
- REFCOUNT_SATURATED(old)))
- return (refcount_release_last(count, n, old));
- return (false);
-}
-
-static __inline bool
-refcount_release(volatile u_int *count)
-{
-
- return (refcount_releasen(count, 1));
-}
-
-#ifdef _KERNEL
-struct lock_object;
-void _refcount_sleep(volatile u_int *count, struct lock_object *,
- const char *wmesg, int prio);
-
-static __inline void
-refcount_sleep(volatile u_int *count, const char *wmesg, int prio)
-{
-
- _refcount_sleep(count, NULL, wmesg, prio);
-}
-
-#define refcount_sleep_interlock(count, lock, wmesg, prio) \
- _refcount_sleep((count), (struct lock_object *)(lock), (wmesg), (prio))
-
-static __inline void
-refcount_wait(volatile u_int *count, const char *wmesg, int prio)
-{
-
- while (*count != 0)
- refcount_sleep(count, wmesg, prio);
-}
-#endif
-
/*
* This functions returns non-zero if the refcount was
* incremented. Else zero is returned.
@@ -172,7 +118,7 @@ refcount_acquire_if_gt(volatile u_int *count, u_int n)
old = *count;
for (;;) {
- if (REFCOUNT_COUNT(old) <= n)
+ if (old <= n)
return (false);
if (__predict_false(REFCOUNT_SATURATED(old)))
return (true);
@@ -185,7 +131,41 @@ static __inline __result_use_check bool
refcount_acquire_if_not_zero(volatile u_int *count)
{
- return refcount_acquire_if_gt(count, 0);
+ return (refcount_acquire_if_gt(count, 0));
+}
+
+static __inline bool
+refcount_releasen(volatile u_int *count, u_int n)
+{
+ u_int old;
+
+ KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
+ ("refcount_releasen: n=%u too large", n));
+
+ atomic_thread_fence_rel();
+ old = atomic_fetchadd_int(count, -n);
+ if (__predict_false(old < n || REFCOUNT_SATURATED(old))) {
+ _refcount_update_saturated(count);
+ return (false);
+ }
+ if (old > n)
+ return (false);
+
+ /*
+ * Last reference. Signal the user to call the destructor.
+ *
+ * Ensure that the destructor sees all updates. This synchronizes with
+ * release fences from all routines which drop the count.
+ */
+ atomic_thread_fence_acq();
+ return (true);
+}
+
+static __inline bool
+refcount_release(volatile u_int *count)
+{
+
+ return (refcount_releasen(count, 1));
}
static __inline __result_use_check bool
@@ -197,12 +177,12 @@ refcount_release_if_gt(volatile u_int *count, u_int n)
("refcount_release_if_gt: Use refcount_release for final ref"));
old = *count;
for (;;) {
- if (REFCOUNT_COUNT(old) <= n)
+ if (old <= n)
return (false);
if (__predict_false(REFCOUNT_SATURATED(old)))
return (true);
/*
- * Paired with acquire fence in refcount_release_last.
+ * Paired with acquire fence in refcount_releasen().
*/
if (atomic_fcmpset_rel_int(count, &old, old - 1))
return (true);
@@ -213,6 +193,7 @@ static __inline __result_use_check bool
refcount_release_if_not_last(volatile u_int *count)
{
- return refcount_release_if_gt(count, 1);
+ return (refcount_release_if_gt(count, 1));
}
-#endif /* ! __SYS_REFCOUNT_H__ */
+
+#endif /* !__SYS_REFCOUNT_H__ */