diff options
author | David Greenman <dg@FreeBSD.org> | 1995-03-01 23:30:04 +0000 |
---|---|---|
committer | David Greenman <dg@FreeBSD.org> | 1995-03-01 23:30:04 +0000 |
commit | f919ebde54deef1969167a0390e91f6e772d635f (patch) | |
tree | 939577bd1f960df71501392b5bf83109c6c968b5 /sys/vm/vm_page.h | |
parent | be7f0d04fed168c8a3156a9bcf06df3e90f38083 (diff) | |
download | src-f919ebde54deef1969167a0390e91f6e772d635f.tar.gz src-f919ebde54deef1969167a0390e91f6e772d635f.zip |
Various changes from John and myself that do the following:
New functions create - vm_object_pip_wakeup and pagedaemon_wakeup that
are used to reduce the actual number of wakeups.
New function vm_page_protect which is used in conjuction with some new
page flags to reduce the number of calls to pmap_page_protect.
Minor changes to reduce unnecessary spl nesting.
Rewrote vm_page_alloc() to improve readability.
Various other mostly cosmetic changes.
Notes
Notes:
svn path=/head/; revision=6816
Diffstat (limited to 'sys/vm/vm_page.h')
-rw-r--r-- | sys/vm/vm_page.h | 28 |
1 files changed, 23 insertions, 5 deletions
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index 8d9d238600f8..08be85d9052a 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_page.h,v 1.14 1995/02/14 06:10:24 phk Exp $ + * $Id: vm_page.h,v 1.15 1995/02/20 23:35:43 davidg Exp $ */ /* @@ -71,6 +71,7 @@ #ifndef _VM_PAGE_ #define _VM_PAGE_ +#include <vm/pmap.h> /* * Management of resident (logical) pages. * @@ -130,6 +131,9 @@ struct vm_page { #define PG_TABLED 0x0040 /* page is in VP table (O) */ #define PG_COPYONWRITE 0x0080 /* must copy page before changing (O) */ #define PG_FICTITIOUS 0x0100 /* physical page doesn't exist (O) */ +#define PG_WRITEABLE 0x0200 /* page is mapped writeable */ +#define PG_MAPPED 0x400 /* page is mapped */ + #define PG_DIRTY 0x0800 /* client flag to set when dirty */ #define PG_REFERENCED 0x1000 /* page has been referenced */ #define PG_CACHE 0x4000 /* On VMIO cache */ @@ -245,18 +249,17 @@ void vm_page_set_valid __P((vm_page_t, int, int)); void vm_page_set_invalid __P((vm_page_t, int, int)); int vm_page_is_valid __P((vm_page_t, int, int)); void vm_page_test_dirty __P((vm_page_t)); -int vm_page_unqueue __P((vm_page_t )); +void vm_page_unqueue __P((vm_page_t )); int vm_page_bits __P((int, int)); - /* * Keep page from being freed by the page daemon * much of the same effect as wiring, except much lower * overhead and should be used only for *very* temporary * holding ("wiring"). */ -static __inline void +static inline void vm_page_hold(vm_page_t mem) { mem->hold_count++; @@ -266,7 +269,7 @@ vm_page_hold(vm_page_t mem) #include <sys/systm.h> /* make GCC shut up */ #endif -static __inline void +static inline void vm_page_unhold(vm_page_t mem) { #ifdef DIAGNOSTIC @@ -277,6 +280,21 @@ vm_page_unhold(vm_page_t mem) #endif } +static inline void +vm_page_protect(vm_page_t mem, int prot) +{ + if (prot == VM_PROT_NONE) { + if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) { + pmap_page_protect(VM_PAGE_TO_PHYS(mem), prot); + mem->flags &= ~(PG_WRITEABLE|PG_MAPPED); + } + } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) { + pmap_page_protect(VM_PAGE_TO_PHYS(mem), prot); + mem->flags &= ~PG_WRITEABLE; + } +} + + #endif /* KERNEL */ #define ACT_DECLINE 1 |