aboutsummaryrefslogtreecommitdiff
path: root/sys/sys/vmmeter.h
diff options
context:
space:
mode:
authorAttilio Rao <attilio@FreeBSD.org>2007-05-31 22:52:15 +0000
committerAttilio Rao <attilio@FreeBSD.org>2007-05-31 22:52:15 +0000
commit2feb50bf7d6fc45ff13865dd9e480b816e1386a8 (patch)
tree10bf0f11ceeb18c6b03947eb85223abbbbf9cc67 /sys/sys/vmmeter.h
parent281660174a65a6ca30cf2079274cc6560357f6ad (diff)
downloadsrc-2feb50bf7d6fc45ff13865dd9e480b816e1386a8.tar.gz
src-2feb50bf7d6fc45ff13865dd9e480b816e1386a8.zip
Revert VMCNT_* operations introduction.
Probabilly, a general approach is not the better solution here, so we should solve the sched_lock protection problems separately. Requested by: alc Approved by: jeff (mentor)
Notes
Notes: svn path=/head/; revision=170170
Diffstat (limited to 'sys/sys/vmmeter.h')
-rw-r--r--sys/sys/vmmeter.h33
1 files changed, 9 insertions, 24 deletions
diff --git a/sys/sys/vmmeter.h b/sys/sys/vmmeter.h
index 7defd2d480b6..177986b34576 100644
--- a/sys/sys/vmmeter.h
+++ b/sys/sys/vmmeter.h
@@ -102,18 +102,7 @@ struct vmmeter {
};
#ifdef _KERNEL
-extern volatile struct vmmeter cnt;
-
-#define VMCNT __DEVOLATILE(struct vmmeter *, &cnt)
-#define VMCNT_SET(member, val) \
- atomic_store_rel_int(__CONCAT(&cnt.v_, member), val)
-#define VMCNT_ADD(member, val) \
- atomic_add_int(__CONCAT(&cnt.v_, member), val)
-#define VMCNT_SUB(member, val) \
- atomic_subtract_int(__CONCAT(&cnt.v_, member), val)
-#define VMCNT_GET(member) (__CONCAT(cnt.v_, member))
-#define VMCNT_PTR(member) \
- __DEVOLATILE(u_int *, __CONCAT(&cnt.v_, member))
+extern struct vmmeter cnt;
/*
* Return TRUE if we are under our reserved low-free-pages threshold
@@ -123,8 +112,7 @@ static __inline
int
vm_page_count_reserved(void)
{
- return (VMCNT_GET(free_reserved) > (VMCNT_GET(free_count) +
- VMCNT_GET(cache_count)));
+ return (cnt.v_free_reserved > (cnt.v_free_count + cnt.v_cache_count));
}
/*
@@ -138,8 +126,7 @@ static __inline
int
vm_page_count_severe(void)
{
- return (VMCNT_GET(free_severe) > (VMCNT_GET(free_count) +
- VMCNT_GET(cache_count)));
+ return (cnt.v_free_severe > (cnt.v_free_count + cnt.v_cache_count));
}
/*
@@ -156,8 +143,7 @@ static __inline
int
vm_page_count_min(void)
{
- return (VMCNT_GET(free_min) > (VMCNT_GET(free_count) +
- VMCNT_GET(cache_count)));
+ return (cnt.v_free_min > (cnt.v_free_count + cnt.v_cache_count));
}
/*
@@ -169,8 +155,7 @@ static __inline
int
vm_page_count_target(void)
{
- return (VMCNT_GET(free_target) > (VMCNT_GET(free_count) +
- VMCNT_GET(cache_count)));
+ return (cnt.v_free_target > (cnt.v_free_count + cnt.v_cache_count));
}
/*
@@ -183,8 +168,8 @@ int
vm_paging_target(void)
{
return (
- (VMCNT_GET(free_target) + VMCNT_GET(cache_min)) -
- (VMCNT_GET(free_count) + VMCNT_GET(cache_count))
+ (cnt.v_free_target + cnt.v_cache_min) -
+ (cnt.v_free_count + cnt.v_cache_count)
);
}
@@ -197,8 +182,8 @@ int
vm_paging_needed(void)
{
return (
- (VMCNT_GET(free_reserved) + VMCNT_GET(cache_min)) >
- (VMCNT_GET(free_count) + VMCNT_GET(cache_count))
+ (cnt.v_free_reserved + cnt.v_cache_min) >
+ (cnt.v_free_count + cnt.v_cache_count)
);
}