aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/kern_event.c
diff options
context:
space:
mode:
authorEd Schouten <ed@FreeBSD.org>2013-06-16 09:30:35 +0000
committerEd Schouten <ed@FreeBSD.org>2013-06-16 09:30:35 +0000
commit2381f6ef8c09a106cb27147c3ac7272883896d3d (patch)
tree867a068e99b951436672e1a245e4a93e7a637699 /sys/kern/kern_event.c
parentf814793b092639ba716f2b06cc068a7708d40a6a (diff)
downloadsrc-2381f6ef8c09a106cb27147c3ac7272883896d3d.tar.gz
src-2381f6ef8c09a106cb27147c3ac7272883896d3d.zip
Change callout use counter to use C11 atomics.
In order to get some coverage of C11 atomics in kernelspace, switch at least one piece of code in kernelspace to use C11 atomics instead of <machine/atomic.h>. While there, slightly improve the code by adding an assertion to prevent the use count from going negative.
Notes
Notes: svn path=/head/; revision=251803
Diffstat (limited to 'sys/kern/kern_event.c')
-rw-r--r--sys/kern/kern_event.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c
index 362f5eed4449..cd8b232f83f1 100644
--- a/sys/kern/kern_event.c
+++ b/sys/kern/kern_event.c
@@ -47,6 +47,7 @@ __FBSDID("$FreeBSD$");
#include <sys/fcntl.h>
#include <sys/kthread.h>
#include <sys/selinfo.h>
+#include <sys/stdatomic.h>
#include <sys/queue.h>
#include <sys/event.h>
#include <sys/eventvar.h>
@@ -182,9 +183,9 @@ static struct filterops user_filtops = {
};
static uma_zone_t knote_zone;
-static int kq_ncallouts = 0;
-static int kq_calloutmax = (4 * 1024);
-SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
+static atomic_uint kq_ncallouts = ATOMIC_VAR_INIT(0);
+static unsigned int kq_calloutmax = 4 * 1024;
+SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
&kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
/* XXX - ensure not KN_INFLUX?? */
@@ -549,13 +550,15 @@ static int
filt_timerattach(struct knote *kn)
{
struct callout *calloutp;
+ unsigned int ncallouts;
- atomic_add_int(&kq_ncallouts, 1);
-
- if (kq_ncallouts >= kq_calloutmax) {
- atomic_add_int(&kq_ncallouts, -1);
- return (ENOMEM);
- }
+ ncallouts = atomic_load_explicit(&kq_ncallouts, memory_order_relaxed);
+ do {
+ if (ncallouts >= kq_calloutmax)
+ return (ENOMEM);
+ } while (!atomic_compare_exchange_weak_explicit(&kq_ncallouts,
+ &ncallouts, ncallouts + 1, memory_order_relaxed,
+ memory_order_relaxed));
kn->kn_flags |= EV_CLEAR; /* automatically set */
kn->kn_status &= ~KN_DETACHED; /* knlist_add usually sets it */
@@ -573,11 +576,13 @@ static void
filt_timerdetach(struct knote *kn)
{
struct callout *calloutp;
+ unsigned int old;
calloutp = (struct callout *)kn->kn_hook;
callout_drain(calloutp);
free(calloutp, M_KQUEUE);
- atomic_add_int(&kq_ncallouts, -1);
+ old = atomic_fetch_sub_explicit(&kq_ncallouts, 1, memory_order_relaxed);
+ KASSERT(old > 0, ("Number of callouts cannot become negative"));
kn->kn_status |= KN_DETACHED; /* knlist_remove usually clears it */
}