aboutsummaryrefslogtreecommitdiff
path: root/sys/sys/vmmeter.h
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2007-05-18 07:10:50 +0000
committerJeff Roberson <jeff@FreeBSD.org>2007-05-18 07:10:50 +0000
commit222d01951f8677015e3e96c6950e809c0d983c09 (patch)
treec94b660d4b9246fed8cbeadf7851932258d8b72a /sys/sys/vmmeter.h
parent2b7e2ee7a505adbd7f7355bcaadd8573174464e6 (diff)
downloadsrc-222d01951f8677015e3e96c6950e809c0d983c09.tar.gz
src-222d01951f8677015e3e96c6950e809c0d983c09.zip
- define and use VMCNT_{GET,SET,ADD,SUB,PTR} macros for manipulating
vmcnts. This can be used to abstract away pcpu details but also changes to use atomics for all counters now. This means sched lock is no longer responsible for protecting counts in the switch routines. Contributed by: Attilio Rao <attilio@FreeBSD.org>
Notes
Notes: svn path=/head/; revision=169667
Diffstat (limited to 'sys/sys/vmmeter.h')
-rw-r--r--sys/sys/vmmeter.h33
1 files changed, 24 insertions, 9 deletions
diff --git a/sys/sys/vmmeter.h b/sys/sys/vmmeter.h
index 793f32d37738..375a2cedf3b2 100644
--- a/sys/sys/vmmeter.h
+++ b/sys/sys/vmmeter.h
@@ -102,7 +102,18 @@ struct vmmeter {
};
#ifdef _KERNEL
-extern struct vmmeter cnt;
+extern volatile struct vmmeter cnt;
+
+#define VMCNT __DEVOLATILE(struct vmmeter *, &cnt)
+#define VMCNT_SET(member, val) \
+ atomic_store_rel_int(__CONCAT(&cnt.v_, member), val)
+#define VMCNT_ADD(member, val) \
+ atomic_add_int(__CONCAT(&cnt.v_, member), val)
+#define VMCNT_DEC(member, val) \
+ atomic_subtract_int(__CONCAT(&cnt.v_, member), val)
+#define VMCNT_GET(member) (__CONCAT(cnt.v_, member))
+#define VMCNT_PTR(member) \
+ __DEVOLATILE(u_int *, __CONCAT(&cnt.v_, member))
/*
* Return TRUE if we are under our reserved low-free-pages threshold
@@ -112,7 +123,8 @@ static __inline
int
vm_page_count_reserved(void)
{
- return (cnt.v_free_reserved > (cnt.v_free_count + cnt.v_cache_count));
+ return (VMCNT_GET(free_reserved) > (VMCNT_GET(free_count) +
+ VMCNT_GET(cache_count)));
}
/*
@@ -126,7 +138,8 @@ static __inline
int
vm_page_count_severe(void)
{
- return (cnt.v_free_severe > (cnt.v_free_count + cnt.v_cache_count));
+ return (VMCNT_GET(free_severe) > (VMCNT_GET(free_count) +
+ VMCNT_GET(cache_count)));
}
/*
@@ -143,7 +156,8 @@ static __inline
int
vm_page_count_min(void)
{
- return (cnt.v_free_min > (cnt.v_free_count + cnt.v_cache_count));
+ return (VMCNT_GET(free_min) > (VMCNT_GET(free_count) +
+ VMCNT_GET(cache_count)));
}
/*
@@ -155,7 +169,8 @@ static __inline
int
vm_page_count_target(void)
{
- return (cnt.v_free_target > (cnt.v_free_count + cnt.v_cache_count));
+ return (VMCNT_GET(free_target) > (VMCNT_GET(free_count) +
+ VMCNT_GET(cache_count)));
}
/*
@@ -168,8 +183,8 @@ int
vm_paging_target(void)
{
return (
- (cnt.v_free_target + cnt.v_cache_min) -
- (cnt.v_free_count + cnt.v_cache_count)
+ (VMCNT_GET(free_target) + VMCNT_GET(cache_min)) -
+ (VMCNT_GET(free_count) + VMCNT_GET(cache_count))
);
}
@@ -182,8 +197,8 @@ int
vm_paging_needed(void)
{
return (
- (cnt.v_free_reserved + cnt.v_cache_min) >
- (cnt.v_free_count + cnt.v_cache_count)
+ (VMCNT_GET(free_reserved) + VMCNT_GET(cache_min)) >
+ (VMCNT_GET(free_count) + VMCNT_GET(cache_count))
);
}