diff options
| author | svn2git <svn2git@FreeBSD.org> | 1993-11-01 08:00:00 +0000 |
|---|---|---|
| committer | svn2git <svn2git@FreeBSD.org> | 1993-11-01 08:00:00 +0000 |
| commit | 8503f4f13f77abf7adc8f7e329c6f9c1d52b6a20 (patch) | |
| tree | c5b2ce776438e0a52b492a2ab6ab41360b8ba1f6 /sys/vm | |
Release FreeBSD 1.0upstream/1.0.0_cvsrelease/1.0.0_cvs
This commit was manufactured to restore the state of the 1.0-RELEASE image.
Releases prior to 5.3-RELEASE are omitting the secure/ and crypto/ subdirs.
Diffstat (limited to 'sys/vm')
36 files changed, 13936 insertions, 0 deletions
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c new file mode 100644 index 000000000000..a057e4e8726b --- /dev/null +++ b/sys/vm/device_pager.c @@ -0,0 +1,263 @@ +/* + * Copyright (c) 1990 University of Utah. + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)device_pager.c 7.2 (Berkeley) 4/20/91 + * $Id: device_pager.c,v 1.4 1993/10/16 16:20:10 rgrimes Exp $ + */ + +/* + * Page to/from special files. + */ + +#include "devpager.h" +#if NDEVPAGER > 0 + +#include "param.h" +#include "conf.h" +#include "mman.h" +#include "malloc.h" + +#include "vm.h" +#include "vm_page.h" +#include "vm_kern.h" +#include "device_pager.h" +#include "vnode.h" +#include "specdev.h" + +queue_head_t dev_pager_list; /* list of managed devices */ + +#ifdef DEBUG +int dpagerdebug = 0; +#define DDB_FOLLOW 0x01 +#define DDB_INIT 0x02 +#define DDB_ALLOC 0x04 +#define DDB_FAIL 0x08 +#endif + +void +dev_pager_init() +{ +#ifdef DEBUG + if (dpagerdebug & DDB_FOLLOW) + printf("dev_pager_init()\n"); +#endif + queue_init(&dev_pager_list); +} + +vm_pager_t +dev_pager_alloc(handle, size, prot) + caddr_t handle; + vm_size_t size; + vm_prot_t prot; +{ + dev_t dev; + vm_pager_t pager; + int (*mapfunc)(), nprot; + register vm_object_t object; + register vm_page_t page; + register dev_pager_t devp; + register int npages, off; + extern int nullop(), enodev(); + + +#ifdef DEBUG + if (dpagerdebug & DDB_FOLLOW) + printf("dev_pager_alloc(%x, %x, %x)\n", handle, size, prot); +#endif + /* + * Pageout to device, should never happen. + */ + if (handle == NULL) + panic("dev_pager_alloc called"); + + /* + * Look it up, creating as necessary + */ + pager = vm_pager_lookup(&dev_pager_list, handle); + if (pager == NULL) { + /* + * Validation. Make sure this device can be mapped + * and that range to map is acceptible to device. + */ + dev = ((struct vnode *) handle)->v_rdev; + mapfunc = cdevsw[major(dev)].d_mmap; + if (!mapfunc || mapfunc == enodev || mapfunc == nullop) + return(NULL); + nprot = 0; + if (prot & VM_PROT_READ) + nprot |= PROT_READ; + if (prot & VM_PROT_WRITE) + nprot |= PROT_WRITE; + if (prot & VM_PROT_EXECUTE) + nprot |= PROT_EXEC; + npages = atop(round_page(size)); + for (off = 0; npages--; off += PAGE_SIZE) + if ((*mapfunc)(dev, off, nprot) == -1) + return(NULL); + /* + * Allocate and initialize pager structs + */ + pager = (vm_pager_t)malloc(sizeof *pager, M_VMPAGER, M_WAITOK); + if (pager == NULL) + return(NULL); + devp = (dev_pager_t)malloc(sizeof *devp, M_VMPGDATA, M_WAITOK); + if (devp == NULL) { + free((caddr_t)pager, M_VMPAGER); + return(NULL); + } + devp->devp_dev = dev; + devp->devp_npages = atop(round_page(size)); + pager->pg_handle = handle; + pager->pg_ops = &devicepagerops; + pager->pg_type = PG_DEVICE; + pager->pg_data = (caddr_t)devp; + /* + * Allocate object and vm_page structures to describe memory + */ + npages = devp->devp_npages; + object = devp->devp_object = vm_object_allocate(ptoa(npages)); + vm_object_enter(object, pager); + vm_object_setpager(object, pager, (vm_offset_t)0, FALSE); + devp->devp_pages = (vm_page_t) + kmem_alloc(kernel_map, npages*sizeof(struct vm_page)); + off = 0; + for (page = devp->devp_pages; + page < &devp->devp_pages[npages]; page++) { + vm_object_lock(object); + vm_page_init(page, object, off); + page->phys_addr = + pmap_phys_address((*mapfunc)(dev, off, nprot)); + page->wire_count = 1; + page->fictitious = TRUE; + PAGE_WAKEUP(page); + vm_object_unlock(object); + off += PAGE_SIZE; + } + /* + * Finally, put it on the managed list so other can find it. + */ + queue_enter(&dev_pager_list, pager, vm_pager_t, pg_list); +#ifdef DEBUG + if (dpagerdebug & DDB_ALLOC) + printf("dev_pager_alloc: pages %d@%x\n", + devp->devp_npages, devp->devp_pages); +#endif + } else { + /* + * vm_object_lookup() gains a reference and also + * removes the object from the cache. + */ + devp = (dev_pager_t)pager->pg_data; + if (vm_object_lookup(pager) != devp->devp_object) + panic("dev_pager_setup: bad object"); + } +#ifdef DEBUG + if (dpagerdebug & DDB_ALLOC) { + printf("dev_pager_alloc: pager %x devp %x object %x\n", + pager, devp, object); + vm_object_print(object, FALSE); + } +#endif + return(pager); + +} + +void +dev_pager_dealloc(pager) + vm_pager_t pager; +{ + dev_pager_t devp = (dev_pager_t)pager->pg_data; + register vm_object_t object; + +#ifdef DEBUG + if (dpagerdebug & DDB_FOLLOW) + printf("dev_pager_dealloc(%x)\n", pager); +#endif + queue_remove(&dev_pager_list, pager, vm_pager_t, pg_list); + object = devp->devp_object; +#ifdef DEBUG + if (dpagerdebug & DDB_ALLOC) + printf("dev_pager_dealloc: devp %x object %x pages %d@%x\n", + devp, object, devp->devp_npages, devp->devp_pages); +#endif + while (!queue_empty(&object->memq)) + vm_page_remove((vm_page_t)queue_first(&object->memq)); + kmem_free(kernel_map, devp->devp_pages, + devp->devp_npages * sizeof(struct vm_page)); + free((caddr_t)devp, M_VMPGDATA); + free((caddr_t)pager, M_VMPAGER); + pager->pg_data = 0; +} + +dev_pager_getpage(pager, m, sync) + vm_pager_t pager; + vm_page_t m; + boolean_t sync; +{ +#ifdef DEBUG + if (dpagerdebug & DDB_FOLLOW) + printf("dev_pager_getpage(%x, %x)\n", pager, m); +#endif + return(VM_PAGER_BAD); +} + +dev_pager_putpage(pager, m, sync) + vm_pager_t pager; + vm_page_t m; + boolean_t sync; +{ +#ifdef DEBUG + if (dpagerdebug & DDB_FOLLOW) + printf("dev_pager_putpage(%x, %x)\n", pager, m); +#endif + if (pager == NULL) + return; + panic("dev_pager_putpage called"); +} + +boolean_t +dev_pager_haspage(pager, offset) + vm_pager_t pager; + vm_offset_t offset; +{ +#ifdef DEBUG + if (dpagerdebug & DDB_FOLLOW) + printf("dev_pager_haspage(%x, %x)\n", pager, offset); +#endif + return(TRUE); +} +#endif diff --git a/sys/vm/device_pager.h b/sys/vm/device_pager.h new file mode 100644 index 000000000000..eb60b3bd7ce9 --- /dev/null +++ b/sys/vm/device_pager.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 1990 University of Utah. + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)device_pager.h 7.1 (Berkeley) 12/5/90 + * $Id: device_pager.h,v 1.2 1993/10/16 16:20:12 rgrimes Exp $ + */ + +#ifndef _DEVICE_PAGER_ +#define _DEVICE_PAGER_ 1 + +/* + * Device pager private data. + */ +struct devpager { + queue_head_t devp_list; /* list of managed devices */ + dev_t devp_dev; /* devno of device */ + vm_page_t devp_pages; /* page structs for device */ + int devp_npages; /* size of device in pages */ + int devp_count; /* reference count */ + vm_object_t devp_object; /* object representing this device */ +}; +typedef struct devpager *dev_pager_t; + +#define DEV_PAGER_NULL ((dev_pager_t)0) + +#ifdef KERNEL + +void dev_pager_init(); +vm_pager_t dev_pager_alloc(); +void dev_pager_dealloc(); +boolean_t dev_pager_getpage(), dev_pager_putpage(); +boolean_t dev_pager_haspage(); + +struct pagerops devicepagerops = { + dev_pager_init, + dev_pager_alloc, + dev_pager_dealloc, + dev_pager_getpage, + dev_pager_putpage, + dev_pager_haspage +}; + +#endif + +#endif /* _DEVICE_PAGER_ */ diff --git a/sys/vm/kern_lock.c b/sys/vm/kern_lock.c new file mode 100644 index 000000000000..35f1327ae3c2 --- /dev/null +++ b/sys/vm/kern_lock.c @@ -0,0 +1,535 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)kern_lock.c 7.4 (Berkeley) 4/21/91 + * $Id: kern_lock.c,v 1.2 1993/10/16 16:20:13 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Locking primitives implementation + */ + +#include "param.h" +#include "vm_param.h" +#include "lock.h" + +/* XXX */ +#include "proc.h" +typedef int *thread_t; +#define current_thread() ((thread_t)&curproc->p_thread) +/* XXX */ + +#if NCPUS > 1 + +/* + * Module: lock + * Function: + * Provide reader/writer sychronization. + * Implementation: + * Simple interlock on a bit. Readers first interlock + * increment the reader count, then let go. Writers hold + * the interlock (thus preventing further readers), and + * wait for already-accepted readers to go away. + */ + +/* + * The simple-lock routines are the primitives out of which + * the lock package is built. The implementation is left + * to the machine-dependent code. + */ + +#ifdef notdef +/* + * A sample implementation of simple locks. + * assumes: + * boolean_t test_and_set(boolean_t *) + * indivisibly sets the boolean to TRUE + * and returns its old value + * and that setting a boolean to FALSE is indivisible. + */ +/* + * simple_lock_init initializes a simple lock. A simple lock + * may only be used for exclusive locks. + */ + +void simple_lock_init(l) + simple_lock_t l; +{ + *(boolean_t *)l = FALSE; +} + +void simple_lock(l) + simple_lock_t l; +{ + while (test_and_set((boolean_t *)l)) + continue; +} + +void simple_unlock(l) + simple_lock_t l; +{ + *(boolean_t *)l = FALSE; +} + +boolean_t simple_lock_try(l) + simple_lock_t l; +{ + return (!test_and_set((boolean_t *)l)); +} +#endif notdef +#endif NCPUS > 1 + +#if NCPUS > 1 +int lock_wait_time = 100; +#else NCPUS > 1 + + /* + * It is silly to spin on a uni-processor as if we + * thought something magical would happen to the + * want_write bit while we are executing. + */ +int lock_wait_time = 0; +#endif NCPUS > 1 + + +/* + * Routine: lock_init + * Function: + * Initialize a lock; required before use. + * Note that clients declare the "struct lock" + * variables and then initialize them, rather + * than getting a new one from this module. + */ +void lock_init(l, can_sleep) + lock_t l; + boolean_t can_sleep; +{ + bzero(l, sizeof(lock_data_t)); + simple_lock_init(&l->interlock); + l->want_write = FALSE; + l->want_upgrade = FALSE; + l->read_count = 0; + l->can_sleep = can_sleep; + l->thread = (char *)-1; /* XXX */ + l->recursion_depth = 0; +} + +void lock_sleepable(l, can_sleep) + lock_t l; + boolean_t can_sleep; +{ + simple_lock(&l->interlock); + l->can_sleep = can_sleep; + simple_unlock(&l->interlock); +} + + +/* + * Sleep locks. These use the same data structure and algorithm + * as the spin locks, but the process sleeps while it is waiting + * for the lock. These work on uniprocessor systems. + */ + +void lock_write(l) + register lock_t l; +{ + register int i; + + simple_lock(&l->interlock); + + if (((thread_t)l->thread) == current_thread()) { + /* + * Recursive lock. + */ + l->recursion_depth++; + simple_unlock(&l->interlock); + return; + } + + /* + * Try to acquire the want_write bit. + */ + while (l->want_write) { + if ((i = lock_wait_time) > 0) { + simple_unlock(&l->interlock); + while (--i > 0 && l->want_write) + continue; + simple_lock(&l->interlock); + } + + if (l->can_sleep && l->want_write) { + l->waiting = TRUE; + thread_sleep((int) l, &l->interlock, FALSE); + simple_lock(&l->interlock); + } + } + l->want_write = TRUE; + + /* Wait for readers (and upgrades) to finish */ + + while ((l->read_count != 0) || l->want_upgrade) { + if ((i = lock_wait_time) > 0) { + simple_unlock(&l->interlock); + while (--i > 0 && (l->read_count != 0 || + l->want_upgrade)) + continue; + simple_lock(&l->interlock); + } + + if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) { + l->waiting = TRUE; + thread_sleep((int) l, &l->interlock, FALSE); + simple_lock(&l->interlock); + } + } + simple_unlock(&l->interlock); +} + +void lock_done(l) + register lock_t l; +{ + simple_lock(&l->interlock); + + if (l->read_count != 0) + l->read_count--; + else + if (l->recursion_depth != 0) + l->recursion_depth--; + else + if (l->want_upgrade) + l->want_upgrade = FALSE; + else + l->want_write = FALSE; + + if (l->waiting) { + l->waiting = FALSE; + thread_wakeup((int) l); + } + simple_unlock(&l->interlock); +} + +void lock_read(l) + register lock_t l; +{ + register int i; + + simple_lock(&l->interlock); + + if (((thread_t)l->thread) == current_thread()) { + /* + * Recursive lock. + */ + l->read_count++; + simple_unlock(&l->interlock); + return; + } + + while (l->want_write || l->want_upgrade) { + if ((i = lock_wait_time) > 0) { + simple_unlock(&l->interlock); + while (--i > 0 && (l->want_write || l->want_upgrade)) + continue; + simple_lock(&l->interlock); + } + + if (l->can_sleep && (l->want_write || l->want_upgrade)) { + l->waiting = TRUE; + thread_sleep((int) l, &l->interlock, FALSE); + simple_lock(&l->interlock); + } + } + + l->read_count++; + simple_unlock(&l->interlock); +} + +/* + * Routine: lock_read_to_write + * Function: + * Improves a read-only lock to one with + * write permission. If another reader has + * already requested an upgrade to a write lock, + * no lock is held upon return. + * + * Returns TRUE if the upgrade *failed*. + */ +boolean_t lock_read_to_write(l) + register lock_t l; +{ + register int i; + + simple_lock(&l->interlock); + + l->read_count--; + + if (((thread_t)l->thread) == current_thread()) { + /* + * Recursive lock. + */ + l->recursion_depth++; + simple_unlock(&l->interlock); + return(FALSE); + } + + if (l->want_upgrade) { + /* + * Someone else has requested upgrade. + * Since we've released a read lock, wake + * him up. + */ + if (l->waiting) { + l->waiting = FALSE; + thread_wakeup((int) l); + } + + simple_unlock(&l->interlock); + return (TRUE); + } + + l->want_upgrade = TRUE; + + while (l->read_count != 0) { + if ((i = lock_wait_time) > 0) { + simple_unlock(&l->interlock); + while (--i > 0 && l->read_count != 0) + continue; + simple_lock(&l->interlock); + } + + if (l->can_sleep && l->read_count != 0) { + l->waiting = TRUE; + thread_sleep((int) l, &l->interlock, FALSE); + simple_lock(&l->interlock); + } + } + + simple_unlock(&l->interlock); + return (FALSE); +} + +void lock_write_to_read(l) + register lock_t l; +{ + simple_lock(&l->interlock); + + l->read_count++; + if (l->recursion_depth != 0) + l->recursion_depth--; + else + if (l->want_upgrade) + l->want_upgrade = FALSE; + else + l->want_write = FALSE; + + if (l->waiting) { + l->waiting = FALSE; + thread_wakeup((int) l); + } + + simple_unlock(&l->interlock); +} + + +/* + * Routine: lock_try_write + * Function: + * Tries to get a write lock. + * + * Returns FALSE if the lock is not held on return. + */ + +boolean_t lock_try_write(l) + register lock_t l; +{ + + simple_lock(&l->interlock); + + if (((thread_t)l->thread) == current_thread()) { + /* + * Recursive lock + */ + l->recursion_depth++; + simple_unlock(&l->interlock); + return(TRUE); + } + + if (l->want_write || l->want_upgrade || l->read_count) { + /* + * Can't get lock. + */ + simple_unlock(&l->interlock); + return(FALSE); + } + + /* + * Have lock. + */ + + l->want_write = TRUE; + simple_unlock(&l->interlock); + return(TRUE); +} + +/* + * Routine: lock_try_read + * Function: + * Tries to get a read lock. + * + * Returns FALSE if the lock is not held on return. + */ + +boolean_t lock_try_read(l) + register lock_t l; +{ + simple_lock(&l->interlock); + + if (((thread_t)l->thread) == current_thread()) { + /* + * Recursive lock + */ + l->read_count++; + simple_unlock(&l->interlock); + return(TRUE); + } + + if (l->want_write || l->want_upgrade) { + simple_unlock(&l->interlock); + return(FALSE); + } + + l->read_count++; + simple_unlock(&l->interlock); + return(TRUE); +} + +/* + * Routine: lock_try_read_to_write + * Function: + * Improves a read-only lock to one with + * write permission. If another reader has + * already requested an upgrade to a write lock, + * the read lock is still held upon return. + * + * Returns FALSE if the upgrade *failed*. + */ +boolean_t lock_try_read_to_write(l) + register lock_t l; +{ + + simple_lock(&l->interlock); + + if (((thread_t)l->thread) == current_thread()) { + /* + * Recursive lock + */ + l->read_count--; + l->recursion_depth++; + simple_unlock(&l->interlock); + return(TRUE); + } + + if (l->want_upgrade) { + simple_unlock(&l->interlock); + return(FALSE); + } + l->want_upgrade = TRUE; + l->read_count--; + + while (l->read_count != 0) { + l->waiting = TRUE; + thread_sleep((int) l, &l->interlock, FALSE); + simple_lock(&l->interlock); + } + + simple_unlock(&l->interlock); + return(TRUE); +} + +/* + * Allow a process that has a lock for write to acquire it + * recursively (for read, write, or update). + */ +void lock_set_recursive(l) + lock_t l; +{ + simple_lock(&l->interlock); + if (!l->want_write) { + panic("lock_set_recursive: don't have write lock"); + } + l->thread = (char *) current_thread(); + simple_unlock(&l->interlock); +} + +/* + * Prevent a lock from being re-acquired. + */ +void lock_clear_recursive(l) + lock_t l; +{ + simple_lock(&l->interlock); + if (((thread_t) l->thread) != current_thread()) { + panic("lock_clear_recursive: wrong thread"); + } + if (l->recursion_depth == 0) + l->thread = (char *)-1; /* XXX */ + simple_unlock(&l->interlock); +} diff --git a/sys/vm/lock.h b/sys/vm/lock.h new file mode 100644 index 000000000000..aaf1738c360f --- /dev/null +++ b/sys/vm/lock.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)lock.h 7.3 (Berkeley) 4/21/91 + * $Id + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Locking primitives definitions + */ + +#ifndef _LOCK_H_ +#define _LOCK_H_ + +#define NCPUS 1 /* XXX */ + +/* + * A simple spin lock. + */ + +struct slock { + int lock_data; /* in general 1 bit is sufficient */ +}; + +typedef struct slock simple_lock_data_t; +typedef struct slock *simple_lock_t; + +/* + * The general lock structure. Provides for multiple readers, + * upgrading from read to write, and sleeping until the lock + * can be gained. + */ + +struct lock { +#ifdef vax + /* + * Efficient VAX implementation -- see field description below. + */ + unsigned int read_count:16, + want_upgrade:1, + want_write:1, + waiting:1, + can_sleep:1, + :0; + + simple_lock_data_t interlock; +#else vax +#ifdef ns32000 + /* + * Efficient ns32000 implementation -- + * see field description below. + */ + simple_lock_data_t interlock; + unsigned int read_count:16, + want_upgrade:1, + want_write:1, + waiting:1, + can_sleep:1, + :0; + +#else ns32000 + /* Only the "interlock" field is used for hardware exclusion; + * other fields are modified with normal instructions after + * acquiring the interlock bit. + */ + simple_lock_data_t + interlock; /* Interlock for remaining fields */ + boolean_t want_write; /* Writer is waiting, or locked for write */ + boolean_t want_upgrade; /* Read-to-write upgrade waiting */ + boolean_t waiting; /* Someone is sleeping on lock */ + boolean_t can_sleep; /* Can attempts to lock go to sleep */ + int read_count; /* Number of accepted readers */ +#endif /* ns32000 */ +#endif /* vax */ + char *thread; /* Thread that has lock, if recursive locking allowed */ + /* (should be thread_t, but but we then have mutually + recursive definitions) */ + int recursion_depth;/* Depth of recursion */ +}; + +typedef struct lock lock_data_t; +typedef struct lock *lock_t; + +#if NCPUS > 1 +void simple_lock_init(); +void simple_lock(); +void simple_unlock(); +boolean_t simple_lock_try(); +#else NCPUS > 1 +/* + * No multiprocessor locking is necessary. + */ +#define simple_lock_init(l) +#define simple_lock(l) +#define simple_unlock(l) +#define simple_lock_try(l) (1) /* always succeeds */ +#endif /* NCPUS > 1 */ + +/* Sleep locks must work even if no multiprocessing */ + +void lock_init(); +void lock_sleepable(); +void lock_write(); +void lock_read(); +void lock_done(); +boolean_t lock_read_to_write(); +void lock_write_to_read(); +boolean_t lock_try_write(); +boolean_t lock_try_read(); +boolean_t lock_try_read_to_write(); + +#define lock_read_done(l) lock_done(l) +#define lock_write_done(l) lock_done(l) + +void lock_set_recursive(); +void lock_clear_recursive(); + +#endif /* !_LOCK_H_ */ diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h new file mode 100644 index 000000000000..b5745ebab9bf --- /dev/null +++ b/sys/vm/pmap.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)pmap.h 7.4 (Berkeley) 5/7/91 + * $Id: pmap.h,v 1.2 1993/10/16 16:20:16 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Author: Avadis Tevanian, Jr. + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Machine address mapping definitions -- machine-independent + * section. [For machine-dependent section, see "machine/pmap.h".] + */ + +#ifndef _PMAP_VM_ +#define _PMAP_VM_ + +#include <machine/pmap.h> + +#ifdef KERNEL +void pmap_bootstrap(); +void pmap_init(); +void pmap_pinit __P((struct pmap *pmap)); +void pmap_release __P((struct pmap *pmap)); +vm_offset_t pmap_map(); +pmap_t pmap_create(); +void pmap_destroy(); +void pmap_reference(); +void pmap_remove(); +void pmap_page_protect(); +void pmap_protect(); +void pmap_enter(); +vm_offset_t pmap_extract(); +void pmap_update(); +void pmap_collect(); +void pmap_activate(); +void pmap_deactivate(); +void pmap_copy(); +void pmap_statistics(); +void pmap_clear_reference(); +boolean_t pmap_is_referenced(); +#ifndef pmap_kernel +pmap_t pmap_kernel(); +#endif + +void pmap_redzone(); +boolean_t pmap_access(); + +extern pmap_t kernel_pmap; +#endif + +#endif _PMAP_VM_ diff --git a/sys/vm/queue.h b/sys/vm/queue.h new file mode 100644 index 000000000000..8eaa42a0328e --- /dev/null +++ b/sys/vm/queue.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)queue.h 7.3 (Berkeley) 4/21/91 + * $Id: queue.h,v 1.2 1993/10/16 16:20:18 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Author: Avadis Tevanian, Jr. + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Type definitions for generic queues. + */ + +#ifndef _QUEUE_H_ +#define _QUEUE_H_ + +struct queue_entry { + struct queue_entry *next; /* next element */ + struct queue_entry *prev; /* previous element */ +}; + +typedef struct queue_entry *queue_t; +typedef struct queue_entry queue_head_t; +typedef struct queue_entry queue_chain_t; +typedef struct queue_entry *queue_entry_t; + +#define round_queue(size) (((size)+7) & (~7)) + +#define enqueue(queue,elt) enqueue_tail(queue, elt) +#define dequeue(queue) dequeue_head(queue) + +#define enqueue_head(queue,elt) insque(elt,queue) +#define enqueue_tail(queue,elt) insque(elt,(queue)->prev) +#define remqueue(queue,elt) remque(elt) + +#define queue_init(q) ((q)->next = (q)->prev = q) +#define queue_first(q) ((q)->next) +#define queue_next(qc) ((qc)->next) +#define queue_end(q, qe) ((q) == (qe)) +#define queue_empty(q) queue_end((q), queue_first(q)) + +#define queue_enter(head, elt, type, field) { \ + if (queue_empty((head))) { \ + (head)->next = (queue_entry_t) elt; \ + (head)->prev = (queue_entry_t) elt; \ + (elt)->field.next = head; \ + (elt)->field.prev = head; \ + } else { \ + register queue_entry_t prev = (head)->prev; \ + (elt)->field.prev = prev; \ + (elt)->field.next = head; \ + (head)->prev = (queue_entry_t)(elt); \ + ((type)prev)->field.next = (queue_entry_t)(elt);\ + } \ +} + +#define queue_field(head, thing, type, field) \ + (((head) == (thing)) ? (head) : &((type)(thing))->field) + +#define queue_remove(head, elt, type, field) { \ + register queue_entry_t next = (elt)->field.next; \ + register queue_entry_t prev = (elt)->field.prev; \ + queue_field((head), next, type, field)->prev = prev; \ + queue_field((head), prev, type, field)->next = next; \ +} + +#define queue_assign(to, from, type, field) { \ + ((type)((from)->prev))->field.next = (to); \ + ((type)((from)->next))->field.prev = (to); \ + *to = *from; \ +} + +#define queue_remove_first(h, e, t, f) { \ + e = (t) queue_first((h)); \ + queue_remove((h), (e), t, f); \ +} + +#endif /* !_QUEUE_H_ */ diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c new file mode 100644 index 000000000000..0f4f088feb74 --- /dev/null +++ b/sys/vm/swap_pager.c @@ -0,0 +1,901 @@ +/* + * Copyright (c) 1990 University of Utah. + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ + * from: @(#)swap_pager.c 7.4 (Berkeley) 5/7/91 + * $Id: swap_pager.c,v 1.2 1993/10/16 16:20:19 rgrimes Exp $ + */ + +/* + * Quick hack to page to dedicated partition(s). + * TODO: + * Add multiprocessor locks + * Deal with async writes in a better fashion + */ + +#include "swappager.h" +#if NSWAPPAGER > 0 + +#include "param.h" +#include "proc.h" +#include "buf.h" +#include "systm.h" +#include "specdev.h" +#include "vnode.h" +#include "malloc.h" +#include "queue.h" +#include "rlist.h" + +#include "vm_param.h" +#include "queue.h" +#include "lock.h" +#include "vm_prot.h" +#include "vm_object.h" +#include "vm_page.h" +#include "vm_pageout.h" +#include "swap_pager.h" + +#define NSWSIZES 16 /* size of swtab */ +#define NPENDINGIO 64 /* max # of pending cleans */ +#define MAXDADDRS 64 /* max # of disk addrs for fixed allocations */ + +#ifdef DEBUG +int swpagerdebug = 0 /*0x100*/; +#define SDB_FOLLOW 0x001 +#define SDB_INIT 0x002 +#define SDB_ALLOC 0x004 +#define SDB_IO 0x008 +#define SDB_WRITE 0x010 +#define SDB_FAIL 0x020 +#define SDB_ALLOCBLK 0x040 +#define SDB_FULL 0x080 +#define SDB_ANOM 0x100 +#define SDB_ANOMPANIC 0x200 +#endif + +struct swpagerclean { + queue_head_t spc_list; + int spc_flags; + struct buf *spc_bp; + sw_pager_t spc_swp; + vm_offset_t spc_kva; + vm_page_t spc_m; +} swcleanlist[NPENDINGIO]; +typedef struct swpagerclean *swp_clean_t; + +/* spc_flags values */ +#define SPC_FREE 0x00 +#define SPC_BUSY 0x01 +#define SPC_DONE 0x02 +#define SPC_ERROR 0x04 +#define SPC_DIRTY 0x08 + +struct swtab { + vm_size_t st_osize; /* size of object (bytes) */ + int st_bsize; /* vs. size of swap block (DEV_BSIZE units) */ +#ifdef DEBUG + u_long st_inuse; /* number in this range in use */ + u_long st_usecnt; /* total used of this size */ +#endif +} swtab[NSWSIZES+1]; + +#ifdef DEBUG +int swap_pager_pendingio; /* max pending async "clean" ops */ +int swap_pager_poip; /* pageouts in progress */ +int swap_pager_piip; /* pageins in progress */ +#endif + +queue_head_t swap_pager_inuse; /* list of pending page cleans */ +queue_head_t swap_pager_free; /* list of free pager clean structs */ +queue_head_t swap_pager_list; /* list of "named" anon regions */ + +void +swap_pager_init() +{ + register swp_clean_t spc; + register int i, bsize; + extern int dmmin, dmmax; + int maxbsize; + +#ifdef DEBUG + if (swpagerdebug & (SDB_FOLLOW|SDB_INIT)) + printf("swpg_init()\n"); +#endif + dfltpagerops = &swappagerops; + queue_init(&swap_pager_list); + + /* + * Initialize clean lists + */ + queue_init(&swap_pager_inuse); + queue_init(&swap_pager_free); + for (i = 0, spc = swcleanlist; i < NPENDINGIO; i++, spc++) { + queue_enter(&swap_pager_free, spc, swp_clean_t, spc_list); + spc->spc_flags = SPC_FREE; + } + + /* + * Calculate the swap allocation constants. + */ + if (dmmin == 0) { + dmmin = DMMIN; + if (dmmin < CLBYTES/DEV_BSIZE) + dmmin = CLBYTES/DEV_BSIZE; + } + if (dmmax == 0) + dmmax = DMMAX; + + /* + * Fill in our table of object size vs. allocation size + */ + bsize = btodb(PAGE_SIZE); + if (bsize < dmmin) + bsize = dmmin; + maxbsize = btodb(sizeof(sw_bm_t) * NBBY * PAGE_SIZE); + if (maxbsize > dmmax) + maxbsize = dmmax; + for (i = 0; i < NSWSIZES; i++) { + swtab[i].st_osize = (vm_size_t) (MAXDADDRS * dbtob(bsize)); + swtab[i].st_bsize = bsize; +#ifdef DEBUG + if (swpagerdebug & SDB_INIT) + printf("swpg_init: ix %d, size %x, bsize %x\n", + i, swtab[i].st_osize, swtab[i].st_bsize); +#endif + if (bsize >= maxbsize) + break; + bsize *= 2; + } + swtab[i].st_osize = 0; + swtab[i].st_bsize = bsize; +} + +/* + * Allocate a pager structure and associated resources. + * Note that if we are called from the pageout daemon (handle == NULL) + * we should not wait for memory as it could resulting in deadlock. + */ +vm_pager_t +swap_pager_alloc(handle, size, prot) + caddr_t handle; + register vm_size_t size; + vm_prot_t prot; +{ + register vm_pager_t pager; + register sw_pager_t swp; + struct swtab *swt; + int waitok; + +#ifdef DEBUG + if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOC)) + printf("swpg_alloc(%x, %x, %x)\n", handle, size, prot); +#endif + /* + * If this is a "named" anonymous region, look it up and + * return the appropriate pager if it exists. + */ + if (handle) { + pager = vm_pager_lookup(&swap_pager_list, handle); + if (pager != NULL) { + /* + * Use vm_object_lookup to gain a reference + * to the object and also to remove from the + * object cache. + */ + if (vm_object_lookup(pager) == NULL) + panic("swap_pager_alloc: bad object"); + return(pager); + } + } + /* + * Pager doesn't exist, allocate swap management resources + * and initialize. + */ + waitok = handle ? M_WAITOK : M_NOWAIT; + pager = (vm_pager_t)malloc(sizeof *pager, M_VMPAGER, waitok); + if (pager == NULL) + return(NULL); + swp = (sw_pager_t)malloc(sizeof *swp, M_VMPGDATA, waitok); + if (swp == NULL) { +#ifdef DEBUG + if (swpagerdebug & SDB_FAIL) + printf("swpg_alloc: swpager malloc failed\n"); +#endif + free((caddr_t)pager, M_VMPAGER); + return(NULL); + } + size = round_page(size); + for (swt = swtab; swt->st_osize; swt++) + if (size <= swt->st_osize) + break; +#ifdef DEBUG + swt->st_inuse++; + swt->st_usecnt++; +#endif + swp->sw_osize = size; + swp->sw_bsize = swt->st_bsize; + swp->sw_nblocks = (btodb(size) + swp->sw_bsize - 1) / swp->sw_bsize; + swp->sw_blocks = (sw_blk_t) + malloc(swp->sw_nblocks*sizeof(*swp->sw_blocks), + M_VMPGDATA, M_NOWAIT); + if (swp->sw_blocks == NULL) { + free((caddr_t)swp, M_VMPGDATA); + free((caddr_t)pager, M_VMPAGER); +#ifdef DEBUG + if (swpagerdebug & SDB_FAIL) + printf("swpg_alloc: sw_blocks malloc failed\n"); + swt->st_inuse--; + swt->st_usecnt--; +#endif + return(FALSE); + } + bzero((caddr_t)swp->sw_blocks, + swp->sw_nblocks * sizeof(*swp->sw_blocks)); + swp->sw_poip = 0; + if (handle) { + vm_object_t object; + + swp->sw_flags = SW_NAMED; + queue_enter(&swap_pager_list, pager, vm_pager_t, pg_list); + /* + * Consistant with other pagers: return with object + * referenced. Can't do this with handle == NULL + * since it might be the pageout daemon calling. + */ + object = vm_object_allocate(size); + vm_object_enter(object, pager); + vm_object_setpager(object, pager, 0, FALSE); + } else { + swp->sw_flags = 0; + queue_init(&pager->pg_list); + } + pager->pg_handle = handle; + pager->pg_ops = &swappagerops; + pager->pg_type = PG_SWAP; + pager->pg_data = (caddr_t)swp; + +#ifdef DEBUG + if (swpagerdebug & SDB_ALLOC) + printf("swpg_alloc: pg_data %x, %x of %x at %x\n", + swp, swp->sw_nblocks, swp->sw_bsize, swp->sw_blocks); +#endif + return(pager); +} + +void +swap_pager_dealloc(pager) + vm_pager_t pager; +{ + register int i; + register sw_blk_t bp; + register sw_pager_t swp; + struct swtab *swt; + int s; + +#ifdef DEBUG + /* save panic time state */ + if ((swpagerdebug & SDB_ANOMPANIC) && panicstr) + return; + if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOC)) + printf("swpg_dealloc(%x)\n", pager); +#endif + /* + * Remove from list right away so lookups will fail if we + * block for pageout completion. + */ + swp = (sw_pager_t) pager->pg_data; + if (swp->sw_flags & SW_NAMED) { + queue_remove(&swap_pager_list, pager, vm_pager_t, pg_list); + swp->sw_flags &= ~SW_NAMED; + } +#ifdef DEBUG + for (swt = swtab; swt->st_osize; swt++) + if (swp->sw_osize <= swt->st_osize) + break; + swt->st_inuse--; +#endif + + /* + * Wait for all pageouts to finish and remove + * all entries from cleaning list. + */ + s = splbio(); + while (swp->sw_poip) { + swp->sw_flags |= SW_WANTED; + assert_wait((int)swp); + thread_block(); + } + splx(s); + (void) swap_pager_clean(NULL, B_WRITE); + + /* + * Free left over swap blocks + */ + s = splbio(); + for (i = 0, bp = swp->sw_blocks; i < swp->sw_nblocks; i++, bp++) + if (bp->swb_block) { +#ifdef DEBUG + if (swpagerdebug & (SDB_ALLOCBLK|SDB_FULL)) + printf("swpg_dealloc: blk %x\n", + bp->swb_block); +#endif + rlist_free(&swapmap, (unsigned)bp->swb_block, + (unsigned)bp->swb_block + swp->sw_bsize - 1); + } + splx(s); + /* + * Free swap management resources + */ + free((caddr_t)swp->sw_blocks, M_VMPGDATA); + free((caddr_t)swp, M_VMPGDATA); + free((caddr_t)pager, M_VMPAGER); +} + +swap_pager_getpage(pager, m, sync) + vm_pager_t pager; + vm_page_t m; + boolean_t sync; +{ +#ifdef DEBUG + if (swpagerdebug & SDB_FOLLOW) + printf("swpg_getpage(%x, %x, %d)\n", pager, m, sync); +#endif + return(swap_pager_io((sw_pager_t)pager->pg_data, m, B_READ)); +} + +swap_pager_putpage(pager, m, sync) + vm_pager_t pager; + vm_page_t m; + boolean_t sync; +{ + int flags; + +#ifdef DEBUG + if (swpagerdebug & SDB_FOLLOW) + printf("swpg_putpage(%x, %x, %d)\n", pager, m, sync); +#endif + if (pager == NULL) { + (void) swap_pager_clean(NULL, B_WRITE); + return; + } + flags = B_WRITE; + if (!sync) + flags |= B_ASYNC; + return(swap_pager_io((sw_pager_t)pager->pg_data, m, flags)); +} + +boolean_t +swap_pager_haspage(pager, offset) + vm_pager_t pager; + vm_offset_t offset; +{ + register sw_pager_t swp; + register sw_blk_t swb; + int ix; + +#ifdef DEBUG + if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOCBLK)) + printf("swpg_haspage(%x, %x) ", pager, offset); +#endif + swp = (sw_pager_t) pager->pg_data; + ix = offset / dbtob(swp->sw_bsize); + if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks) { +#ifdef DEBUG + if (swpagerdebug & (SDB_FAIL|SDB_FOLLOW|SDB_ALLOCBLK)) + printf("swpg_haspage: %x bad offset %x, ix %x\n", + swp->sw_blocks, offset, ix); +#endif + return(FALSE); + } + swb = &swp->sw_blocks[ix]; + if (swb->swb_block) + ix = atop(offset % dbtob(swp->sw_bsize)); +#ifdef DEBUG + if (swpagerdebug & SDB_ALLOCBLK) + printf("%x blk %x+%x ", swp->sw_blocks, swb->swb_block, ix); + if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOCBLK)) + printf("-> %c\n", + "FT"[swb->swb_block && (swb->swb_mask & (1 << ix))]); +#endif + if (swb->swb_block && (swb->swb_mask & (1 << ix))) + return(TRUE); + return(FALSE); +} + +/* + * Scaled down version of swap(). + * Assumes that PAGE_SIZE < MAXPHYS; i.e. only one operation needed. + * BOGUS: lower level IO routines expect a KVA so we have to map our + * provided physical page into the KVA to keep them happy. + */ +swap_pager_io(swp, m, flags) + register sw_pager_t swp; + vm_page_t m; + int flags; +{ + register struct buf *bp; + register sw_blk_t swb; + register int s; + int ix; + boolean_t rv; + vm_offset_t kva, off; + swp_clean_t spc; + +#ifdef DEBUG + /* save panic time state */ + if ((swpagerdebug & SDB_ANOMPANIC) && panicstr) + return; + if (swpagerdebug & (SDB_FOLLOW|SDB_IO)) + printf("swpg_io(%x, %x, %x)\n", swp, m, flags); +#endif + + /* + * For reads (pageins) and synchronous writes, we clean up + * all completed async pageouts. + */ + if ((flags & B_ASYNC) == 0) { + s = splbio(); +#ifdef DEBUG + /* + * Check to see if this page is currently being cleaned. + * If it is, we just wait til the operation is done before + * continuing. + */ + while (swap_pager_clean(m, flags&B_READ)) { + if (swpagerdebug & SDB_ANOM) + printf("swap_pager_io: page %x cleaning\n", m); + + swp->sw_flags |= SW_WANTED; + assert_wait((int)swp); + thread_block(); + } +#else + (void) swap_pager_clean(m, flags&B_READ); +#endif + splx(s); + } + /* + * For async writes (pageouts), we cleanup completed pageouts so + * that all available resources are freed. Also tells us if this + * page is already being cleaned. If it is, or no resources + * are available, we try again later. + */ + else if (swap_pager_clean(m, B_WRITE) || + queue_empty(&swap_pager_free)) { +#ifdef DEBUG + if ((swpagerdebug & SDB_ANOM) && + !queue_empty(&swap_pager_free)) + printf("swap_pager_io: page %x already cleaning\n", m); +#endif + return(VM_PAGER_FAIL); + } + + /* + * Determine swap block and allocate as necessary. + */ + off = m->offset + m->object->paging_offset; + ix = off / dbtob(swp->sw_bsize); + if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks) { +#ifdef DEBUG + if (swpagerdebug & SDB_FAIL) + printf("swpg_io: bad offset %x+%x(%d) in %x\n", + m->offset, m->object->paging_offset, + ix, swp->sw_blocks); +#endif + return(VM_PAGER_FAIL); + } + s = splbio(); + swb = &swp->sw_blocks[ix]; + off = off % dbtob(swp->sw_bsize); + if (flags & B_READ) { + if (swb->swb_block == 0 || + (swb->swb_mask & (1 << atop(off))) == 0) { +#ifdef DEBUG + if (swpagerdebug & (SDB_ALLOCBLK|SDB_FAIL)) + printf("swpg_io: %x bad read: blk %x+%x, mask %x, off %x+%x\n", + swp->sw_blocks, + swb->swb_block, atop(off), + swb->swb_mask, + m->offset, m->object->paging_offset); +#endif + /* XXX: should we zero page here?? */ + splx(s); + return(VM_PAGER_FAIL); + } + } else if (swb->swb_block == 0) { +#ifdef old + swb->swb_block = rmalloc(swapmap, swp->sw_bsize); + if (swb->swb_block == 0) { +#else + if (!rlist_alloc(&swapmap, (unsigned)swp->sw_bsize, + (unsigned *)&swb->swb_block)) { +#endif +#ifdef DEBUG + if (swpagerdebug & SDB_FAIL) + printf("swpg_io: rmalloc of %x failed\n", + swp->sw_bsize); +#endif + splx(s); + return(VM_PAGER_FAIL); + } +#ifdef DEBUG + if (swpagerdebug & (SDB_FULL|SDB_ALLOCBLK)) + printf("swpg_io: %x alloc blk %x at ix %x\n", + swp->sw_blocks, swb->swb_block, ix); +#endif + } + splx(s); + + /* + * Allocate a kernel virtual address and initialize so that PTE + * is available for lower level IO drivers. + */ + kva = vm_pager_map_page(m); + + /* + * Get a swap buffer header and perform the IO + */ + s = splbio(); + while (bswlist.av_forw == NULL) { +#ifdef DEBUG + if (swpagerdebug & SDB_ANOM) + printf("swap_pager_io: wait on swbuf for %x (%d)\n", + m, flags); +#endif + bswlist.b_flags |= B_WANTED; + sleep((caddr_t)&bswlist, PSWP+1); + } + bp = bswlist.av_forw; + bswlist.av_forw = bp->av_forw; + splx(s); + bp->b_flags = B_BUSY | (flags & B_READ); + bp->b_proc = &proc0; /* XXX (but without B_PHYS set this is ok) */ + bp->b_un.b_addr = (caddr_t)kva; + bp->b_blkno = swb->swb_block + btodb(off); + VHOLD(swapdev_vp); + bp->b_vp = swapdev_vp; + if (swapdev_vp->v_type == VBLK) + bp->b_dev = swapdev_vp->v_rdev; + bp->b_bcount = PAGE_SIZE; + if ((bp->b_flags & B_READ) == 0) + swapdev_vp->v_numoutput++; + + /* + * If this is an async write we set up additional buffer fields + * and place a "cleaning" entry on the inuse queue. + */ + if ((flags & (B_READ|B_ASYNC)) == B_ASYNC) { +#ifdef DEBUG + if (queue_empty(&swap_pager_free)) + panic("swpg_io: lost spc"); +#endif + queue_remove_first(&swap_pager_free, + spc, swp_clean_t, spc_list); +#ifdef DEBUG + if (spc->spc_flags != SPC_FREE) + panic("swpg_io: bad free spc"); +#endif + spc->spc_flags = SPC_BUSY; + spc->spc_bp = bp; + spc->spc_swp = swp; + spc->spc_kva = kva; + spc->spc_m = m; + bp->b_flags |= B_CALL; + bp->b_iodone = swap_pager_iodone; + s = splbio(); + swp->sw_poip++; + queue_enter(&swap_pager_inuse, spc, swp_clean_t, spc_list); + +#ifdef DEBUG + swap_pager_poip++; + if (swpagerdebug & SDB_WRITE) + printf("swpg_io: write: bp=%x swp=%x spc=%x poip=%d\n", + bp, swp, spc, swp->sw_poip); + if ((swpagerdebug & SDB_ALLOCBLK) && + (swb->swb_mask & (1 << atop(off))) == 0) + printf("swpg_io: %x write blk %x+%x\n", + swp->sw_blocks, swb->swb_block, atop(off)); +#endif + swb->swb_mask |= (1 << atop(off)); + splx(s); + } +#ifdef DEBUG + if (swpagerdebug & SDB_IO) + printf("swpg_io: IO start: bp %x, db %x, va %x, pa %x\n", + bp, swb->swb_block+btodb(off), kva, VM_PAGE_TO_PHYS(m)); +#endif + VOP_STRATEGY(bp); + if ((flags & (B_READ|B_ASYNC)) == B_ASYNC) { +#ifdef DEBUG + if (swpagerdebug & SDB_IO) + printf("swpg_io: IO started: bp %x\n", bp); +#endif + return(VM_PAGER_PEND); + } + s = splbio(); +#ifdef DEBUG + if (flags & B_READ) + swap_pager_piip++; + else + swap_pager_poip++; +#endif + while ((bp->b_flags & B_DONE) == 0) { + assert_wait((int)bp); + thread_block(); + } +#ifdef DEBUG + if (flags & B_READ) + --swap_pager_piip; + else + --swap_pager_poip; +#endif + rv = (bp->b_flags & B_ERROR) ? VM_PAGER_FAIL : VM_PAGER_OK; + bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_DIRTY); + bp->av_forw = bswlist.av_forw; + bswlist.av_forw = bp; + if (bp->b_vp) + brelvp(bp); + if (bswlist.b_flags & B_WANTED) { + bswlist.b_flags &= ~B_WANTED; + thread_wakeup((int)&bswlist); + } + if ((flags & B_READ) == 0 && rv == VM_PAGER_OK) { + m->clean = TRUE; + pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + } + splx(s); +#ifdef DEBUG + if (swpagerdebug & SDB_IO) + printf("swpg_io: IO done: bp %x, rv %d\n", bp, rv); + if ((swpagerdebug & SDB_FAIL) && rv == VM_PAGER_FAIL) + printf("swpg_io: IO error\n"); +#endif + vm_pager_unmap_page(kva); + return(rv); +} + +boolean_t +swap_pager_clean(m, rw) + vm_page_t m; + int rw; +{ + register swp_clean_t spc, tspc; + register int s; + +#ifdef DEBUG + /* save panic time state */ + if ((swpagerdebug & SDB_ANOMPANIC) && panicstr) + return; + if (swpagerdebug & SDB_FOLLOW) + printf("swpg_clean(%x, %d)\n", m, rw); +#endif + tspc = NULL; + for (;;) { + /* + * Look up and removal from inuse list must be done + * at splbio() to avoid conflicts with swap_pager_iodone. + */ + s = splbio(); + spc = (swp_clean_t) queue_first(&swap_pager_inuse); + while (!queue_end(&swap_pager_inuse, (queue_entry_t)spc)) { + if ((spc->spc_flags & SPC_DONE) && + swap_pager_finish(spc)) { + queue_remove(&swap_pager_inuse, spc, + swp_clean_t, spc_list); + break; + } + if (m && m == spc->spc_m) { +#ifdef DEBUG + if (swpagerdebug & SDB_ANOM) + printf("swap_pager_clean: page %x on list, flags %x\n", + m, spc->spc_flags); +#endif + tspc = spc; + } + spc = (swp_clean_t) queue_next(&spc->spc_list); + } + + /* + * No operations done, thats all we can do for now. + */ + if (queue_end(&swap_pager_inuse, (queue_entry_t)spc)) + break; + splx(s); + + /* + * The desired page was found to be busy earlier in + * the scan but has since completed. + */ + if (tspc && tspc == spc) { +#ifdef DEBUG + if (swpagerdebug & SDB_ANOM) + printf("swap_pager_clean: page %x done while looking\n", + m); +#endif + tspc = NULL; + } + spc->spc_flags = SPC_FREE; + vm_pager_unmap_page(spc->spc_kva); + queue_enter(&swap_pager_free, spc, swp_clean_t, spc_list); +#ifdef DEBUG + if (swpagerdebug & SDB_WRITE) + printf("swpg_clean: free spc %x\n", spc); +#endif + } +#ifdef DEBUG + /* + * If we found that the desired page is already being cleaned + * mark it so that swap_pager_iodone() will not set the clean + * flag before the pageout daemon has another chance to clean it. + */ + if (tspc && rw == B_WRITE) { + if (swpagerdebug & SDB_ANOM) + printf("swap_pager_clean: page %x on clean list\n", + tspc); + tspc->spc_flags |= SPC_DIRTY; + } +#endif + splx(s); + +#ifdef DEBUG + if (swpagerdebug & SDB_WRITE) + printf("swpg_clean: return %d\n", tspc ? TRUE : FALSE); + if ((swpagerdebug & SDB_ANOM) && tspc) + printf("swpg_clean: %s of cleaning page %x\n", + rw == B_READ ? "get" : "put", m); +#endif + return(tspc ? TRUE : FALSE); +} + +swap_pager_finish(spc) + register swp_clean_t spc; +{ + vm_object_t object = spc->spc_m->object; + + /* + * Mark the paging operation as done. + * (XXX) If we cannot get the lock, leave it til later. + * (XXX) Also we are assuming that an async write is a + * pageout operation that has incremented the counter. + */ + if (!vm_object_lock_try(object)) + return(0); + + if (--object->paging_in_progress == 0) + thread_wakeup((int) object); + +#ifdef DEBUG + /* + * XXX: this isn't even close to the right thing to do, + * introduces a variety of race conditions. + * + * If dirty, vm_pageout() has attempted to clean the page + * again. In this case we do not do anything as we will + * see the page again shortly. + */ + if (spc->spc_flags & SPC_DIRTY) { + if (swpagerdebug & SDB_ANOM) + printf("swap_pager_finish: page %x dirty again\n", + spc->spc_m); + spc->spc_m->busy = FALSE; + PAGE_WAKEUP(spc->spc_m); + vm_object_unlock(object); + return(1); + } +#endif + /* + * If no error mark as clean and inform the pmap system. + * If error, mark as dirty so we will try again. + * (XXX could get stuck doing this, should give up after awhile) + */ + if (spc->spc_flags & SPC_ERROR) { + printf("swap_pager_finish: clean of page %x failed\n", + VM_PAGE_TO_PHYS(spc->spc_m)); + spc->spc_m->laundry = TRUE; + } else { + spc->spc_m->clean = TRUE; + pmap_clear_modify(VM_PAGE_TO_PHYS(spc->spc_m)); + } + spc->spc_m->busy = FALSE; + PAGE_WAKEUP(spc->spc_m); + + vm_object_unlock(object); + return(1); +} + +swap_pager_iodone(bp) + register struct buf *bp; +{ + register swp_clean_t spc; + daddr_t blk; + int s; + +#ifdef DEBUG + /* save panic time state */ + if ((swpagerdebug & SDB_ANOMPANIC) && panicstr) + return; + if (swpagerdebug & SDB_FOLLOW) + printf("swpg_iodone(%x)\n", bp); +#endif + s = splbio(); + spc = (swp_clean_t) queue_first(&swap_pager_inuse); + while (!queue_end(&swap_pager_inuse, (queue_entry_t)spc)) { + if (spc->spc_bp == bp) + break; + spc = (swp_clean_t) queue_next(&spc->spc_list); + } +#ifdef DEBUG + if (queue_end(&swap_pager_inuse, (queue_entry_t)spc)) + panic("swap_pager_iodone: bp not found"); +#endif + + spc->spc_flags &= ~SPC_BUSY; + spc->spc_flags |= SPC_DONE; + if (bp->b_flags & B_ERROR) { + spc->spc_flags |= SPC_ERROR; +printf("error %d blkno %d sz %d ", bp->b_error, bp->b_blkno, bp->b_bcount); + } + spc->spc_bp = NULL; + blk = bp->b_blkno; + +#ifdef DEBUG + --swap_pager_poip; + if (swpagerdebug & SDB_WRITE) + printf("swpg_iodone: bp=%x swp=%x flags=%x spc=%x poip=%x\n", + bp, spc->spc_swp, spc->spc_swp->sw_flags, + spc, spc->spc_swp->sw_poip); +#endif + + spc->spc_swp->sw_poip--; + if (spc->spc_swp->sw_flags & SW_WANTED) { + spc->spc_swp->sw_flags &= ~SW_WANTED; + thread_wakeup((int)spc->spc_swp); + } + + bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_DIRTY); + bp->av_forw = bswlist.av_forw; + bswlist.av_forw = bp; + if (bp->b_vp) + brelvp(bp); + if (bswlist.b_flags & B_WANTED) { + bswlist.b_flags &= ~B_WANTED; + thread_wakeup((int)&bswlist); + } + thread_wakeup((int) &vm_pages_needed); + splx(s); +} +#endif diff --git a/sys/vm/swap_pager.h b/sys/vm/swap_pager.h new file mode 100644 index 000000000000..f49a03d121d6 --- /dev/null +++ b/sys/vm/swap_pager.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 1990 University of Utah. + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)swap_pager.h 7.1 (Berkeley) 12/5/90 + * $Id: swap_pager.h,v 1.2 1993/10/16 16:20:21 rgrimes Exp $ + */ + +#ifndef _SWAP_PAGER_ +#define _SWAP_PAGER_ 1 + +/* + * In the swap pager, the backing store for an object is organized as an + * array of some number of "swap blocks". A swap block consists of a bitmask + * and some number of contiguous DEV_BSIZE disk blocks. The minimum size + * of a swap block is: + * + * max(PAGE_SIZE, dmmin*DEV_BSIZE) [ 32k currently ] + * + * bytes (since the pager interface is page oriented), the maximum size is: + * + * min(#bits(swb_mask)*PAGE_SIZE, dmmax*DEV_BSIZE) [ 128k currently ] + * + * where dmmin and dmmax are left over from the old VM interface. The bitmask + * (swb_mask) is used by swap_pager_haspage() to determine if a particular + * page has actually been written; i.e. the pager copy of the page is valid. + * All swap blocks in the backing store of an object will be the same size. + * + * The reason for variable sized swap blocks is to reduce fragmentation of + * swap resources. Whenever possible we allocate smaller swap blocks to + * smaller objects. The swap block size is determined from a table of + * object-size vs. swap-block-size computed at boot time. + */ +typedef int sw_bm_t; /* pager bitmask */ + +struct swblock { + sw_bm_t swb_mask; /* bitmask of valid pages in this block */ + daddr_t swb_block; /* starting disk block for this block */ +}; +typedef struct swblock *sw_blk_t; + +/* + * Swap pager private data. + */ +struct swpager { + vm_size_t sw_osize; /* size of object we are backing (bytes) */ + int sw_bsize; /* size of swap blocks (DEV_BSIZE units) */ + int sw_nblocks;/* number of blocks in list (sw_blk_t units) */ + sw_blk_t sw_blocks; /* pointer to list of swap blocks */ + short sw_flags; /* flags */ + short sw_poip; /* pageouts in progress */ +}; +typedef struct swpager *sw_pager_t; + +#define SW_WANTED 0x01 +#define SW_NAMED 0x02 + +#ifdef KERNEL + +void swap_pager_init(); +vm_pager_t swap_pager_alloc(); +void swap_pager_dealloc(); +boolean_t swap_pager_getpage(), swap_pager_putpage(); +boolean_t swap_pager_haspage(); + +struct pagerops swappagerops = { + swap_pager_init, + swap_pager_alloc, + swap_pager_dealloc, + swap_pager_getpage, + swap_pager_putpage, + swap_pager_haspage +}; + +int swap_pager_iodone(); +boolean_t swap_pager_clean(); + +#endif + +#endif /* _SWAP_PAGER_ */ diff --git a/sys/vm/vm.h b/sys/vm/vm.h new file mode 100644 index 000000000000..ac04bd7092b0 --- /dev/null +++ b/sys/vm/vm.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm.h 7.1 (Berkeley) 5/5/91 + * $Id: vm.h,v 1.2 1993/10/16 16:20:22 rgrimes Exp $ + */ + +#ifndef VM_H +#define VM_H +#include <vm/vm_param.h> +#include <vm/lock.h> +#include <vm/queue.h> +#include <vm/vm_prot.h> +#include <vm/vm_inherit.h> +#include <vm/vm_map.h> +#include <vm/vm_object.h> +#include <vm/vm_statistics.h> +#include <vm/pmap.h> + +/* + * Shareable process virtual address space. + * May eventually be merged with vm_map. + * Several fields are temporary (text, data stuff). + */ +struct vmspace { + struct vm_map vm_map; /* VM address map */ + struct pmap vm_pmap; /* private physical map */ + int vm_refcnt; /* number of references */ + caddr_t vm_shm; /* SYS5 shared memory private data XXX */ +/* we copy from vm_startcopy to the end of the structure on fork */ +#define vm_startcopy vm_rssize + segsz_t vm_rssize; /* current resident set size in pages */ + segsz_t vm_swrss; /* resident set size before last swap */ + segsz_t vm_tsize; /* text size (pages) XXX */ + segsz_t vm_dsize; /* data size (pages) XXX */ + segsz_t vm_ssize; /* stack size (pages) */ + caddr_t vm_taddr; /* user virtual address of text XXX */ + caddr_t vm_daddr; /* user virtual address of data XXX */ + caddr_t vm_maxsaddr; /* user VA at max stack growth */ +}; + +struct vmspace *vmspace_alloc __P((vm_offset_t min, vm_offset_t max, + int pageable)); +struct vmspace *vmspace_fork __P((struct vmspace *)); +void vmspace_free __P((struct vmspace *)); +#endif /* VM_H */ diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c new file mode 100644 index 000000000000..2151a5fa50e8 --- /dev/null +++ b/sys/vm/vm_fault.c @@ -0,0 +1,1073 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_fault.c 7.6 (Berkeley) 5/7/91 + * $Id: vm_fault.c,v 1.5 1993/10/16 16:20:24 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Page fault handling module. + */ + +#include "param.h" + +#include "vm.h" +#include "vm_page.h" +#include "vm_pageout.h" + +/* + * vm_fault: + * + * Handle a page fault occuring at the given address, + * requiring the given permissions, in the map specified. + * If successful, the page is inserted into the + * associated physical map. + * + * NOTE: the given address should be truncated to the + * proper page address. + * + * KERN_SUCCESS is returned if the page fault is handled; otherwise, + * a standard error specifying why the fault is fatal is returned. + * + * + * The map in question must be referenced, and remains so. + * Caller may hold no locks. + */ +vm_fault(map, vaddr, fault_type, change_wiring) + vm_map_t map; + vm_offset_t vaddr; + vm_prot_t fault_type; + boolean_t change_wiring; +{ + vm_object_t first_object; + vm_offset_t first_offset; + vm_map_entry_t entry; + register vm_object_t object; + register vm_offset_t offset; + register vm_page_t m; + vm_page_t first_m; + vm_prot_t prot; + int result; + boolean_t wired; + boolean_t su; + boolean_t lookup_still_valid; + boolean_t page_exists; + vm_page_t old_m; + vm_object_t next_object; + + vm_stat.faults++; /* needs lock XXX */ +/* + * Recovery actions + */ +#define FREE_PAGE(m) { \ + PAGE_WAKEUP(m); \ + vm_page_lock_queues(); \ + vm_page_free(m); \ + vm_page_unlock_queues(); \ +} + +#define RELEASE_PAGE(m) { \ + PAGE_WAKEUP(m); \ + vm_page_lock_queues(); \ + vm_page_activate(m); \ + vm_page_unlock_queues(); \ +} + +#define UNLOCK_MAP { \ + if (lookup_still_valid) { \ + vm_map_lookup_done(map, entry); \ + lookup_still_valid = FALSE; \ + } \ +} + +#define UNLOCK_THINGS { \ + object->paging_in_progress--; \ + vm_object_unlock(object); \ + if (object != first_object) { \ + vm_object_lock(first_object); \ + FREE_PAGE(first_m); \ + first_object->paging_in_progress--; \ + vm_object_unlock(first_object); \ + } \ + UNLOCK_MAP; \ +} + +#define UNLOCK_AND_DEALLOCATE { \ + UNLOCK_THINGS; \ + vm_object_deallocate(first_object); \ +} + + RetryFault: ; + + /* + * Find the backing store object and offset into + * it to begin the search. + */ + + if ((result = vm_map_lookup(&map, vaddr, fault_type, &entry, + &first_object, &first_offset, + &prot, &wired, &su)) != KERN_SUCCESS) { + return(result); + } + lookup_still_valid = TRUE; + + if (wired) + fault_type = prot; + + first_m = NULL; + + /* + * Make a reference to this object to + * prevent its disposal while we are messing with + * it. Once we have the reference, the map is free + * to be diddled. Since objects reference their + * shadows (and copies), they will stay around as well. + */ + + vm_object_lock(first_object); + + first_object->ref_count++; + first_object->paging_in_progress++; + + /* + * INVARIANTS (through entire routine): + * + * 1) At all times, we must either have the object + * lock or a busy page in some object to prevent + * some other thread from trying to bring in + * the same page. + * + * Note that we cannot hold any locks during the + * pager access or when waiting for memory, so + * we use a busy page then. + * + * Note also that we aren't as concerned about + * more than one thead attempting to pager_data_unlock + * the same page at once, so we don't hold the page + * as busy then, but do record the highest unlock + * value so far. [Unlock requests may also be delivered + * out of order.] + * + * 2) Once we have a busy page, we must remove it from + * the pageout queues, so that the pageout daemon + * will not grab it away. + * + * 3) To prevent another thread from racing us down the + * shadow chain and entering a new page in the top + * object before we do, we must keep a busy page in + * the top object while following the shadow chain. + * + * 4) We must increment paging_in_progress on any object + * for which we have a busy page, to prevent + * vm_object_collapse from removing the busy page + * without our noticing. + */ + + /* + * Search for the page at object/offset. + */ + + object = first_object; + offset = first_offset; + + /* + * See whether this page is resident + */ + + while (TRUE) { + m = vm_page_lookup(object, offset); + if (m != NULL) { + /* + * If the page is being brought in, + * wait for it and then retry. + */ + if (m->busy) { +#ifdef DOTHREADS + int wait_result; + + PAGE_ASSERT_WAIT(m, !change_wiring); + UNLOCK_THINGS; + thread_block(); + wait_result = current_thread()->wait_result; + vm_object_deallocate(first_object); + if (wait_result != THREAD_AWAKENED) + return(KERN_SUCCESS); + goto RetryFault; +#else + PAGE_ASSERT_WAIT(m, !change_wiring); + UNLOCK_THINGS; +thread_wakeup(&vm_pages_needed); /* XXX! */ + thread_block(); + vm_object_deallocate(first_object); + goto RetryFault; +#endif + } + + if (m->absent) + panic("vm_fault: absent"); + + /* + * If the desired access to this page has + * been locked out, request that it be unlocked. + */ + + if (fault_type & m->page_lock) { +#ifdef DOTHREADS + int wait_result; + + if ((fault_type & m->unlock_request) != fault_type) + panic("vm_fault: pager_data_unlock"); + + PAGE_ASSERT_WAIT(m, !change_wiring); + UNLOCK_THINGS; + thread_block(); + wait_result = current_thread()->wait_result; + vm_object_deallocate(first_object); + if (wait_result != THREAD_AWAKENED) + return(KERN_SUCCESS); + goto RetryFault; +#else + if ((fault_type & m->unlock_request) != fault_type) + panic("vm_fault: pager_data_unlock"); + + PAGE_ASSERT_WAIT(m, !change_wiring); + UNLOCK_THINGS; +thread_wakeup(&vm_pages_needed); /* XXX */ + thread_block(); + vm_object_deallocate(first_object); + goto RetryFault; +#endif + } + + /* + * Remove the page from the pageout daemon's + * reach while we play with it. + */ + + vm_page_lock_queues(); + if (m->inactive) { + queue_remove(&vm_page_queue_inactive, m, + vm_page_t, pageq); + m->inactive = FALSE; + vm_page_inactive_count--; + vm_stat.reactivations++; + } + + if (m->active) { + queue_remove(&vm_page_queue_active, m, + vm_page_t, pageq); + m->active = FALSE; + vm_page_active_count--; + } + vm_page_unlock_queues(); + + /* + * Mark page busy for other threads. + */ + m->busy = TRUE; + m->absent = FALSE; + break; + } + + if (((object->pager != NULL) && + (!change_wiring || wired)) + || (object == first_object)) { + + /* + * Allocate a new page for this object/offset + * pair. + */ + + m = vm_page_alloc(object, offset); + + if (m == NULL) { + UNLOCK_AND_DEALLOCATE; + VM_WAIT; + goto RetryFault; + } + } + + if ((object->pager != NULL) && + (!change_wiring || wired)) { + int rv; + + /* + * Now that we have a busy page, we can + * release the object lock. + */ + vm_object_unlock(object); + + /* + * Call the pager to retrieve the data, if any, + * after releasing the lock on the map. + */ + UNLOCK_MAP; + + rv = vm_pager_get(object->pager, m, TRUE); + if (rv == VM_PAGER_OK) { + /* + * Found the page. + * Leave it busy while we play with it. + */ + vm_object_lock(object); + + /* + * Relookup in case pager changed page. + * Pager is responsible for disposition + * of old page if moved. + */ + m = vm_page_lookup(object, offset); + + vm_stat.pageins++; + m->fake = FALSE; + pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + break; + } + + /* + * Remove the bogus page (which does not + * exist at this object/offset); before + * doing so, we must get back our object + * lock to preserve our invariant. + * + * Also wake up any other thread that may want + * to bring in this page. + * + * If this is the top-level object, we must + * leave the busy page to prevent another + * thread from rushing past us, and inserting + * the page in that object at the same time + * that we are. + */ + + vm_object_lock(object); + /* + * Data outside the range of the pager; an error + */ + if (rv == VM_PAGER_BAD) { + FREE_PAGE(m); + UNLOCK_AND_DEALLOCATE; + return(KERN_PROTECTION_FAILURE); /* XXX */ + } + if (object != first_object) { + FREE_PAGE(m); + /* + * XXX - we cannot just fall out at this + * point, m has been freed and is invalid! + */ + } + } + + /* + * We get here if the object has no pager (or unwiring) + * or the pager doesn't have the page. + */ + if (object == first_object) + first_m = m; + + /* + * Move on to the next object. Lock the next + * object before unlocking the current one. + */ + + offset += object->shadow_offset; + next_object = object->shadow; + if (next_object == NULL) { + /* + * If there's no object left, fill the page + * in the top object with zeros. + */ + if (object != first_object) { + object->paging_in_progress--; + vm_object_unlock(object); + + object = first_object; + offset = first_offset; + m = first_m; + vm_object_lock(object); + } + first_m = NULL; + + vm_page_zero_fill(m); + vm_stat.zero_fill_count++; + m->fake = FALSE; + m->absent = FALSE; + break; + } + else { + vm_object_lock(next_object); + if (object != first_object) + object->paging_in_progress--; + vm_object_unlock(object); + object = next_object; + object->paging_in_progress++; + } + } + + if (m->absent || m->active || m->inactive || !m->busy) + panic("vm_fault: absent or active or inactive or not busy after main loop"); + + /* + * PAGE HAS BEEN FOUND. + * [Loop invariant still holds -- the object lock + * is held.] + */ + + old_m = m; /* save page that would be copied */ + + /* + * If the page is being written, but isn't + * already owned by the top-level object, + * we have to copy it into a new page owned + * by the top-level object. + */ + + if (object != first_object) { + /* + * We only really need to copy if we + * want to write it. + */ + + if (fault_type & VM_PROT_WRITE) { + + /* + * If we try to collapse first_object at this + * point, we may deadlock when we try to get + * the lock on an intermediate object (since we + * have the bottom object locked). We can't + * unlock the bottom object, because the page + * we found may move (by collapse) if we do. + * + * Instead, we first copy the page. Then, when + * we have no more use for the bottom object, + * we unlock it and try to collapse. + * + * Note that we copy the page even if we didn't + * need to... that's the breaks. + */ + + /* + * We already have an empty page in + * first_object - use it. + */ + + vm_page_copy(m, first_m); + first_m->fake = FALSE; + first_m->absent = FALSE; + + /* + * If another map is truly sharing this + * page with us, we have to flush all + * uses of the original page, since we + * can't distinguish those which want the + * original from those which need the + * new copy. + * + * XXX If we know that only one map has + * access to this page, then we could + * avoid the pmap_page_protect() call. + */ + + vm_page_lock_queues(); + vm_page_activate(m); + vm_page_deactivate(m); + pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); + vm_page_unlock_queues(); + + /* + * We no longer need the old page or object. + */ + PAGE_WAKEUP(m); + object->paging_in_progress--; + vm_object_unlock(object); + + /* + * Only use the new page below... + */ + + vm_stat.cow_faults++; + m = first_m; + object = first_object; + offset = first_offset; + + /* + * Now that we've gotten the copy out of the + * way, let's try to collapse the top object. + */ + vm_object_lock(object); + /* + * But we have to play ugly games with + * paging_in_progress to do that... + */ + object->paging_in_progress--; + vm_object_collapse(object); + object->paging_in_progress++; + } + else { + prot &= (~VM_PROT_WRITE); + m->copy_on_write = TRUE; + } + } + + if (m->active || m->inactive) + panic("vm_fault: active or inactive before copy object handling"); + + /* + * If the page is being written, but hasn't been + * copied to the copy-object, we have to copy it there. + */ + RetryCopy: + if (first_object->copy != NULL) { + vm_object_t copy_object = first_object->copy; + vm_offset_t copy_offset; + vm_page_t copy_m; + + /* + * We only need to copy if we want to write it. + */ + if ((fault_type & VM_PROT_WRITE) == 0) { + prot &= ~VM_PROT_WRITE; + m->copy_on_write = TRUE; + } + else { + /* + * Try to get the lock on the copy_object. + */ + if (!vm_object_lock_try(copy_object)) { + vm_object_unlock(object); + /* should spin a bit here... */ + vm_object_lock(object); + goto RetryCopy; + } + + /* + * Make another reference to the copy-object, + * to keep it from disappearing during the + * copy. + */ + copy_object->ref_count++; + + /* + * Does the page exist in the copy? + */ + copy_offset = first_offset + - copy_object->shadow_offset; + copy_m = vm_page_lookup(copy_object, copy_offset); + if (page_exists = (copy_m != NULL)) { + if (copy_m->busy) { +#ifdef DOTHREADS + int wait_result; + + /* + * If the page is being brought + * in, wait for it and then retry. + */ + PAGE_ASSERT_WAIT(copy_m, !change_wiring); + RELEASE_PAGE(m); + copy_object->ref_count--; + vm_object_unlock(copy_object); + UNLOCK_THINGS; + thread_block(); + wait_result = current_thread()->wait_result; + vm_object_deallocate(first_object); + if (wait_result != THREAD_AWAKENED) + return(KERN_SUCCESS); + goto RetryFault; +#else + /* + * If the page is being brought + * in, wait for it and then retry. + */ + PAGE_ASSERT_WAIT(copy_m, !change_wiring); + RELEASE_PAGE(m); + copy_object->ref_count--; + vm_object_unlock(copy_object); + UNLOCK_THINGS; +thread_wakeup(&vm_pages_needed); /* XXX */ + thread_block(); + vm_object_deallocate(first_object); + goto RetryFault; +#endif + } + } + + /* + * If the page is not in memory (in the object) + * and the object has a pager, we have to check + * if the pager has the data in secondary + * storage. + */ + if (!page_exists) { + + /* + * If we don't allocate a (blank) page + * here... another thread could try + * to page it in, allocate a page, and + * then block on the busy page in its + * shadow (first_object). Then we'd + * trip over the busy page after we + * found that the copy_object's pager + * doesn't have the page... + */ + copy_m = vm_page_alloc(copy_object, + copy_offset); + if (copy_m == NULL) { + /* + * Wait for a page, then retry. + */ + RELEASE_PAGE(m); + copy_object->ref_count--; + vm_object_unlock(copy_object); + UNLOCK_AND_DEALLOCATE; + VM_WAIT; + goto RetryFault; + } + + if (copy_object->pager != NULL) { + vm_object_unlock(object); + vm_object_unlock(copy_object); + UNLOCK_MAP; + + page_exists = vm_pager_has_page( + copy_object->pager, + (copy_offset + copy_object->paging_offset)); + + vm_object_lock(copy_object); + + /* + * Since the map is unlocked, someone + * else could have copied this object + * and put a different copy_object + * between the two. Or, the last + * reference to the copy-object (other + * than the one we have) may have + * disappeared - if that has happened, + * we don't need to make the copy. + */ + if (copy_object->shadow != object || + copy_object->ref_count == 1) { + /* + * Gaah... start over! + */ + FREE_PAGE(copy_m); + vm_object_unlock(copy_object); + vm_object_deallocate(copy_object); + /* may block */ + vm_object_lock(object); + goto RetryCopy; + } + vm_object_lock(object); + + if (page_exists) { + /* + * We didn't need the page + */ + FREE_PAGE(copy_m); + } + } + } + if (!page_exists) { + /* + * Must copy page into copy-object. + */ + vm_page_copy(m, copy_m); + copy_m->fake = FALSE; + copy_m->absent = FALSE; + + /* + * Things to remember: + * 1. The copied page must be marked 'dirty' + * so it will be paged out to the copy + * object. + * 2. If the old page was in use by any users + * of the copy-object, it must be removed + * from all pmaps. (We can't know which + * pmaps use it.) + */ + vm_page_lock_queues(); + pmap_page_protect(VM_PAGE_TO_PHYS(old_m), + VM_PROT_NONE); + copy_m->clean = FALSE; + vm_page_activate(copy_m); /* XXX */ + vm_page_unlock_queues(); + + PAGE_WAKEUP(copy_m); + } + /* + * The reference count on copy_object must be + * at least 2: one for our extra reference, + * and at least one from the outside world + * (we checked that when we last locked + * copy_object). + */ + copy_object->ref_count--; + vm_object_unlock(copy_object); + m->copy_on_write = FALSE; + } + } + + if (m->active || m->inactive) + panic("vm_fault: active or inactive before retrying lookup"); + + /* + * We must verify that the maps have not changed + * since our last lookup. + */ + + if (!lookup_still_valid) { + vm_object_t retry_object; + vm_offset_t retry_offset; + vm_prot_t retry_prot; + + /* + * Since map entries may be pageable, make sure we can + * take a page fault on them. + */ + vm_object_unlock(object); + + /* + * To avoid trying to write_lock the map while another + * thread has it read_locked (in vm_map_pageable), we + * do not try for write permission. If the page is + * still writable, we will get write permission. If it + * is not, or has been marked needs_copy, we enter the + * mapping without write permission, and will merely + * take another fault. + */ + result = vm_map_lookup(&map, vaddr, + fault_type & ~VM_PROT_WRITE, &entry, + &retry_object, &retry_offset, &retry_prot, + &wired, &su); + + vm_object_lock(object); + + /* + * If we don't need the page any longer, put it on the + * active list (the easiest thing to do here). If no + * one needs it, pageout will grab it eventually. + */ + + if (result != KERN_SUCCESS) { + RELEASE_PAGE(m); + UNLOCK_AND_DEALLOCATE; + return(result); + } + + lookup_still_valid = TRUE; + + if ((retry_object != first_object) || + (retry_offset != first_offset)) { + RELEASE_PAGE(m); + UNLOCK_AND_DEALLOCATE; + goto RetryFault; + } + + /* + * Check whether the protection has changed or the object + * has been copied while we left the map unlocked. + * Changing from read to write permission is OK - we leave + * the page write-protected, and catch the write fault. + * Changing from write to read permission means that we + * can't mark the page write-enabled after all. + */ + prot &= retry_prot; + if (m->copy_on_write) + prot &= ~VM_PROT_WRITE; + } + + /* + * (the various bits we're fiddling with here are locked by + * the object's lock) + */ + + /* XXX This distorts the meaning of the copy_on_write bit */ + + if (prot & VM_PROT_WRITE) + m->copy_on_write = FALSE; + + /* + * It's critically important that a wired-down page be faulted + * only once in each map for which it is wired. + */ + + if (m->active || m->inactive) + panic("vm_fault: active or inactive before pmap_enter"); + + vm_object_unlock(object); + + /* + * Put this page into the physical map. + * We had to do the unlock above because pmap_enter + * may cause other faults. We don't put the + * page back on the active queue until later so + * that the page-out daemon won't find us (yet). + */ + + pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), + prot & ~(m->page_lock), wired); + + /* + * If the page is not wired down, then put it where the + * pageout daemon can find it. + */ + vm_object_lock(object); + vm_page_lock_queues(); + if (change_wiring) { + if (wired) + vm_page_wire(m); + else + vm_page_unwire(m); + } + else + vm_page_activate(m); + vm_page_unlock_queues(); + + /* + * Unlock everything, and return + */ + + PAGE_WAKEUP(m); + UNLOCK_AND_DEALLOCATE; + + return(KERN_SUCCESS); + +} + +/* + * vm_fault_wire: + * + * Wire down a range of virtual addresses in a map. + */ +void vm_fault_wire(map, start, end) + vm_map_t map; + vm_offset_t start, end; +{ + + register vm_offset_t va; + register pmap_t pmap; + + pmap = vm_map_pmap(map); + + /* + * Inform the physical mapping system that the + * range of addresses may not fault, so that + * page tables and such can be locked down as well. + */ + + pmap_pageable(pmap, start, end, FALSE); + + /* + * We simulate a fault to get the page and enter it + * in the physical map. + */ + + for (va = start; va < end; va += PAGE_SIZE) { + (void) vm_fault(map, va, VM_PROT_NONE, TRUE); + } +} + + +/* + * vm_fault_unwire: + * + * Unwire a range of virtual addresses in a map. + */ +void vm_fault_unwire(map, start, end) + vm_map_t map; + vm_offset_t start, end; +{ + + register vm_offset_t va, pa; + register pmap_t pmap; + + pmap = vm_map_pmap(map); + + /* + * Since the pages are wired down, we must be able to + * get their mappings from the physical map system. + */ + + vm_page_lock_queues(); + + for (va = start; va < end; va += PAGE_SIZE) { + pa = pmap_extract(pmap, va); + if (pa == (vm_offset_t) 0) { + panic("unwire: page not in pmap"); + } + pmap_change_wiring(pmap, va, FALSE); + vm_page_unwire(PHYS_TO_VM_PAGE(pa)); + } + vm_page_unlock_queues(); + + /* + * Inform the physical mapping system that the range + * of addresses may fault, so that page tables and + * such may be unwired themselves. + */ + + pmap_pageable(pmap, start, end, TRUE); + +} + +/* + * Routine: + * vm_fault_copy_entry + * Function: + * Copy all of the pages from a wired-down map entry to another. + * + * In/out conditions: + * The source and destination maps must be locked for write. + * The source map entry must be wired down (or be a sharing map + * entry corresponding to a main map entry that is wired down). + */ + +void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) + vm_map_t dst_map; + vm_map_t src_map; + vm_map_entry_t dst_entry; + vm_map_entry_t src_entry; +{ + + vm_object_t dst_object; + vm_object_t src_object; + vm_offset_t dst_offset; + vm_offset_t src_offset; + vm_prot_t prot; + vm_offset_t vaddr; + vm_page_t dst_m; + vm_page_t src_m; + +#ifdef lint + src_map++; +#endif lint + + src_object = src_entry->object.vm_object; + src_offset = src_entry->offset; + + /* + * Create the top-level object for the destination entry. + * (Doesn't actually shadow anything - we copy the pages + * directly.) + */ + dst_object = vm_object_allocate( + (vm_size_t) (dst_entry->end - dst_entry->start)); + + dst_entry->object.vm_object = dst_object; + dst_entry->offset = 0; + + prot = dst_entry->max_protection; + + /* + * Loop through all of the pages in the entry's range, copying + * each one from the source object (it should be there) to the + * destination object. + */ + for (vaddr = dst_entry->start, dst_offset = 0; + vaddr < dst_entry->end; + vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { + + /* + * Allocate a page in the destination object + */ + vm_object_lock(dst_object); + do { + dst_m = vm_page_alloc(dst_object, dst_offset); + if (dst_m == NULL) { + vm_object_unlock(dst_object); + VM_WAIT; + vm_object_lock(dst_object); + } + } while (dst_m == NULL); + + /* + * Find the page in the source object, and copy it in. + * (Because the source is wired down, the page will be + * in memory.) + */ + vm_object_lock(src_object); + src_m = vm_page_lookup(src_object, dst_offset + src_offset); + if (src_m == NULL) + panic("vm_fault_copy_wired: page missing"); + + vm_page_copy(src_m, dst_m); + + /* + * Enter it in the pmap... + */ + vm_object_unlock(src_object); + vm_object_unlock(dst_object); + + pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), + prot, FALSE); + + /* + * Mark it no longer busy, and put it on the active list. + */ + vm_object_lock(dst_object); + vm_page_lock_queues(); + vm_page_activate(dst_m); + vm_page_unlock_queues(); + PAGE_WAKEUP(dst_m); + vm_object_unlock(dst_object); + } + +} diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c new file mode 100644 index 000000000000..59e6b3f50a18 --- /dev/null +++ b/sys/vm/vm_glue.c @@ -0,0 +1,560 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_glue.c 7.8 (Berkeley) 5/15/91 + * $Id: vm_glue.c,v 1.9 1993/10/19 00:54:49 nate Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +#include "ddb.h" +#include "param.h" +#include "systm.h" +#include "proc.h" +#include "resourcevar.h" +#include "buf.h" +#include "user.h" + +#include "vm.h" +#include "vm_page.h" +#include "vm_kern.h" + +int avefree = 0; /* XXX */ +int readbuffers = 0; /* XXX allow kgdb to read kernel buffer pool */ + +kernacc(addr, len, rw) + caddr_t addr; + int len, rw; +{ + boolean_t rv; + vm_offset_t saddr, eaddr; + vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; + + saddr = trunc_page(addr); + eaddr = round_page(addr+len); + rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); + /* + * XXX there are still some things (e.g. the buffer cache) that + * are managed behind the VM system's back so even though an + * address is accessible in the mind of the VM system, there may + * not be physical pages where the VM thinks there is. This can + * lead to bogus allocation of pages in the kernel address space + * or worse, inconsistencies at the pmap level. We only worry + * about the buffer cache for now. + */ + if (!readbuffers && rv && (eaddr > (vm_offset_t)buffers && + saddr < (vm_offset_t)buffers + MAXBSIZE * nbuf)) + rv = FALSE; + return(rv == TRUE); +} + +useracc(addr, len, rw) + caddr_t addr; + int len, rw; +{ + boolean_t rv; + vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; + + /* + * XXX - specially disallow access to user page tables - they are + * in the map. + * + * XXX - don't specially disallow access to the user area - treat + * it as incorrectly as elsewhere. + * + * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was + * only used (as an end address) in trap.c. Use it as an end + * address here too. + */ + if ((vm_offset_t) addr >= VM_MAXUSER_ADDRESS + || (vm_offset_t) addr + len > VM_MAXUSER_ADDRESS + || (vm_offset_t) addr + len <= (vm_offset_t) addr) + return (FALSE); + + rv = vm_map_check_protection(&curproc->p_vmspace->vm_map, + trunc_page(addr), round_page(addr+len), prot); + return(rv == TRUE); +} + +#ifdef KGDB +/* + * Change protections on kernel pages from addr to addr+len + * (presumably so debugger can plant a breakpoint). + * All addresses are assumed to reside in the Sysmap, + */ +chgkprot(addr, len, rw) + register caddr_t addr; + int len, rw; +{ + vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; + + vm_map_protect(kernel_map, trunc_page(addr), + round_page(addr+len), prot, FALSE); +} +#endif + +vslock(addr, len) + caddr_t addr; + u_int len; +{ + vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), + round_page(addr+len), FALSE); +} + +vsunlock(addr, len, dirtied) + caddr_t addr; + u_int len; + int dirtied; +{ +#ifdef lint + dirtied++; +#endif lint + vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), + round_page(addr+len), TRUE); +} + +/* + * Implement fork's actions on an address space. + * Here we arrange for the address space to be copied or referenced, + * allocate a user struct (pcb and kernel stack), then call the + * machine-dependent layer to fill those in and make the new process + * ready to run. + * NOTE: the kernel stack may be at a different location in the child + * process, and thus addresses of automatic variables may be invalid + * after cpu_fork returns in the child process. We do nothing here + * after cpu_fork returns. + */ +vm_fork(p1, p2, isvfork) + register struct proc *p1, *p2; + int isvfork; +{ + register struct user *up; + vm_offset_t addr; + +#ifdef i386 + /* + * avoid copying any of the parent's pagetables or other per-process + * objects that reside in the map by marking all of them non-inheritable + */ + (void)vm_map_inherit(&p1->p_vmspace->vm_map, + UPT_MIN_ADDRESS-UPAGES*NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE); +#endif + p2->p_vmspace = vmspace_fork(p1->p_vmspace); + +#ifdef SYSVSHM + if (p1->p_vmspace->vm_shm) + shmfork(p1, p2, isvfork); +#endif + + /* + * Allocate a wired-down (for now) pcb and kernel stack for the process + */ +#ifdef notyet + addr = kmem_alloc_pageable(kernel_map, ctob(UPAGES)); + vm_map_pageable(kernel_map, addr, addr + ctob(UPAGES), FALSE); +#else + addr = kmem_alloc(kernel_map, ctob(UPAGES)); +#endif + up = (struct user *)addr; + p2->p_addr = up; + + /* + * p_stats and p_sigacts currently point at fields + * in the user struct but not at &u, instead at p_addr. + * Copy p_sigacts and parts of p_stats; zero the rest + * of p_stats (statistics). + */ + p2->p_stats = &up->u_stats; + p2->p_sigacts = &up->u_sigacts; + up->u_sigacts = *p1->p_sigacts; + bzero(&up->u_stats.pstat_startzero, + (unsigned) ((caddr_t)&up->u_stats.pstat_endzero - + (caddr_t)&up->u_stats.pstat_startzero)); + bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, + ((caddr_t)&up->u_stats.pstat_endcopy - + (caddr_t)&up->u_stats.pstat_startcopy)); + +#ifdef i386 + { u_int addr = UPT_MIN_ADDRESS - UPAGES*NBPG; struct vm_map *vp; + + vp = &p2->p_vmspace->vm_map; + + /* ream out old pagetables and kernel stack */ + (void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr); + (void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE); + } +#endif + /* + * cpu_fork will copy and update the kernel stack and pcb, + * and make the child ready to run. It marks the child + * so that it can return differently than the parent. + * It returns twice, once in the parent process and + * once in the child. + */ + return (cpu_fork(p1, p2)); +} + +/* + * Set default limits for VM system. + * Called for proc 0, and then inherited by all others. + */ +vm_init_limits(p) + register struct proc *p; +{ + + /* + * Set up the initial limits on process VM. + * Set the maximum resident set size to be all + * of (reasonably) available memory. This causes + * any single, large process to start random page + * replacement once it fills memory. + */ + p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ; + p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ; + p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ; + p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ; + p->p_rlimit[RLIMIT_RSS].rlim_cur = p->p_rlimit[RLIMIT_RSS].rlim_max = + ptoa(vm_page_free_count); +} + +#include "../vm/vm_pageout.h" + +#ifdef DEBUG +int enableswap = 1; +int swapdebug = 0; +#define SDB_FOLLOW 1 +#define SDB_SWAPIN 2 +#define SDB_SWAPOUT 4 +#endif + +/* + * Brutally simple: + * 1. Attempt to swapin every swaped-out, runnable process in + * order of priority. + * 2. If not enough memory, wake the pageout daemon and let it + * clear some space. + */ +sched() +{ + register struct proc *p; + register int pri; + struct proc *pp; + int ppri; + vm_offset_t addr; + vm_size_t size; + +loop: +#ifdef DEBUG + if (!enableswap) { + pp = NULL; + goto noswap; + } +#endif + pp = NULL; + ppri = INT_MIN; + for (p = allproc; p != NULL; p = p->p_nxt) + if (p->p_stat == SRUN && (p->p_flag & SLOAD) == 0) { + pri = p->p_time + p->p_slptime - p->p_nice * 8; + if (pri > ppri) { + pp = p; + ppri = pri; + } + } +#ifdef DEBUG + if (swapdebug & SDB_FOLLOW) + printf("sched: running, procp %x pri %d\n", pp, ppri); +noswap: +#endif + /* + * Nothing to do, back to sleep + */ + if ((p = pp) == NULL) { + sleep((caddr_t)&proc0, PVM); + goto loop; + } + + /* + * We would like to bring someone in. + * This part is really bogus cuz we could deadlock on memory + * despite our feeble check. + */ + size = round_page(ctob(UPAGES)); + addr = (vm_offset_t) p->p_addr; + if (vm_page_free_count > atop(size)) { +#ifdef DEBUG + if (swapdebug & SDB_SWAPIN) + printf("swapin: pid %d(%s)@%x, pri %d free %d\n", + p->p_pid, p->p_comm, p->p_addr, + ppri, vm_page_free_count); +#endif + vm_map_pageable(kernel_map, addr, addr+size, FALSE); + (void) splclock(); + if (p->p_stat == SRUN) + setrq(p); + p->p_flag |= SLOAD; + (void) spl0(); + p->p_time = 0; + goto loop; + } + /* + * Not enough memory, jab the pageout daemon and wait til the + * coast is clear. + */ +#ifdef DEBUG + if (swapdebug & SDB_FOLLOW) + printf("sched: no room for pid %d(%s), free %d\n", + p->p_pid, p->p_comm, vm_page_free_count); +#endif + (void) splhigh(); + VM_WAIT; + (void) spl0(); +#ifdef DEBUG + if (swapdebug & SDB_FOLLOW) + printf("sched: room again, free %d\n", vm_page_free_count); +#endif + goto loop; +} + +#define swappable(p) \ + (((p)->p_flag & (SSYS|SLOAD|SKEEP|SWEXIT|SPHYSIO)) == SLOAD) + +/* + * Swapout is driven by the pageout daemon. Very simple, we find eligible + * procs and unwire their u-areas. We try to always "swap" at least one + * process in case we need the room for a swapin. + * If any procs have been sleeping/stopped for at least maxslp seconds, + * they are swapped. Else, we swap the longest-sleeping or stopped process, + * if any, otherwise the longest-resident process. + */ +swapout_threads() +{ + register struct proc *p; + struct proc *outp, *outp2; + int outpri, outpri2; + int didswap = 0; + extern int maxslp; + +#ifdef DEBUG + if (!enableswap) + return; +#endif + outp = outp2 = NULL; + outpri = outpri2 = 0; + for (p = allproc; p != NULL; p = p->p_nxt) { + if (!swappable(p)) + continue; + switch (p->p_stat) { + case SRUN: + if (p->p_time > outpri2) { + outp2 = p; + outpri2 = p->p_time; + } + continue; + + case SSLEEP: + case SSTOP: + if (p->p_slptime > maxslp) { + swapout(p); + didswap++; + } else if (p->p_slptime > outpri) { + outp = p; + outpri = p->p_slptime; + } + continue; + } + } + /* + * If we didn't get rid of any real duds, toss out the next most + * likely sleeping/stopped or running candidate. We only do this + * if we are real low on memory since we don't gain much by doing + * it (UPAGES pages). + */ + if (didswap == 0 && + vm_page_free_count <= atop(round_page(ctob(UPAGES)))) { + if ((p = outp) == 0) + p = outp2; +#ifdef DEBUG + if (swapdebug & SDB_SWAPOUT) + printf("swapout_threads: no duds, try procp %x\n", p); +#endif + if (p) + swapout(p); + } +} + +swapout(p) + register struct proc *p; +{ + vm_offset_t addr; + vm_size_t size; + +#ifdef DEBUG + if (swapdebug & SDB_SWAPOUT) + printf("swapout: pid %d(%s)@%x, stat %x pri %d free %d\n", + p->p_pid, p->p_comm, p->p_addr, p->p_stat, + p->p_slptime, vm_page_free_count); +#endif + size = round_page(ctob(UPAGES)); + addr = (vm_offset_t) p->p_addr; + p->p_stats->p_ru.ru_nswap++ ; /* record in resource stats */ +#ifdef notyet +#ifdef hp300 + /* + * Ugh! u-area is double mapped to a fixed address behind the + * back of the VM system and accesses are usually through that + * address rather than the per-process address. Hence reference + * and modify information are recorded at the fixed address and + * lost at context switch time. We assume the u-struct and + * kernel stack are always accessed/modified and force it to be so. + */ + { + register int i; + volatile long tmp; + + for (i = 0; i < UPAGES; i++) { + tmp = *(long *)addr; *(long *)addr = tmp; + addr += NBPG; + } + addr = (vm_offset_t) p->p_addr; + } +#endif + vm_map_pageable(kernel_map, addr, addr+size, TRUE); + pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map)); +#endif + (void) splhigh(); + p->p_flag &= ~SLOAD; + if (p->p_stat == SRUN) + remrq(p); + (void) spl0(); + p->p_time = 0; +} + +/* + * The rest of these routines fake thread handling + */ + +void +assert_wait(event, ruptible) + int event; + boolean_t ruptible; +{ +#ifdef lint + ruptible++; +#endif + curproc->p_thread = event; +} + +void +thread_block() +{ + int s = splhigh(); + + if (curproc->p_thread) + sleep((caddr_t)curproc->p_thread, PVM); + splx(s); +} + +thread_sleep(event, lock, ruptible) + int event; + simple_lock_t lock; + boolean_t ruptible; +{ +#ifdef lint + ruptible++; +#endif + int s = splhigh(); + + curproc->p_thread = event; + simple_unlock(lock); + if (curproc->p_thread) + sleep((caddr_t)event, PVM); + splx(s); +} + +thread_wakeup(event) + int event; +{ + int s = splhigh(); + + wakeup((caddr_t)event); + splx(s); +} + +/* + * DEBUG stuff + */ + +#if defined(DEBUG) || (NDDB > 0) +int indent = 0; + +/*ARGSUSED2*/ +iprintf(a, b, c, d, e, f, g, h) + char *a; +{ + register int i; + + i = indent; + while (i >= 8) { + printf("\t"); + i -= 8; + } + for (; i > 0; --i) + printf(" "); + printf(a, b, c, d, e, f, g, h); +} +#endif /* defined(DEBUG) || (NDDB > 0) */ diff --git a/sys/vm/vm_inherit.h b/sys/vm/vm_inherit.h new file mode 100644 index 000000000000..e748162bb172 --- /dev/null +++ b/sys/vm/vm_inherit.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_inherit.h 7.2 (Berkeley) 4/21/91 + * $Id: vm_inherit.h,v 1.2 1993/10/16 16:20:27 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Virtual memory map inheritance definitions. + */ + +#ifndef _VM_INHERIT_ +#define _VM_INHERIT_ + +/* + * Types defined: + * + * vm_inherit_t inheritance codes. + */ + +typedef int vm_inherit_t; /* might want to change this */ + +/* + * Enumeration of valid values for vm_inherit_t. + */ + +#define VM_INHERIT_SHARE ((vm_inherit_t) 0) /* share with child */ +#define VM_INHERIT_COPY ((vm_inherit_t) 1) /* copy into child */ +#define VM_INHERIT_NONE ((vm_inherit_t) 2) /* absent from child */ +#define VM_INHERIT_DONATE_COPY ((vm_inherit_t) 3) /* copy and delete */ + +#define VM_INHERIT_DEFAULT VM_INHERIT_COPY + +#endif _VM_INHERIT_ diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c new file mode 100644 index 000000000000..1899300bac56 --- /dev/null +++ b/sys/vm/vm_init.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_init.c 7.3 (Berkeley) 4/21/91 + * $Id: vm_init.c,v 1.2 1993/10/16 16:20:28 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Initialize the Virtual Memory subsystem. + */ + +#include "param.h" + +#include "vm.h" +#include "vm_page.h" +#include "vm_kern.h" + +/* + * vm_init initializes the virtual memory system. + * This is done only by the first cpu up. + * + * The start and end address of physical memory is passed in. + */ + +void vm_mem_init() +{ + extern vm_offset_t avail_start, avail_end; + extern vm_offset_t virtual_avail, virtual_end; + + /* + * Initializes resident memory structures. + * From here on, all physical memory is accounted for, + * and we use only virtual addresses. + */ + + virtual_avail = vm_page_startup(avail_start, avail_end, virtual_avail); + /* + * Initialize other VM packages + */ + vm_object_init(); + vm_map_startup(); + kmem_init(virtual_avail, virtual_end); + pmap_init(avail_start, avail_end); + vm_pager_init(); +} diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c new file mode 100644 index 000000000000..e042d0ad9b1f --- /dev/null +++ b/sys/vm/vm_kern.c @@ -0,0 +1,610 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_kern.c 7.4 (Berkeley) 5/7/91 + * $Id: vm_kern.c,v 1.3 1993/10/16 16:20:30 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + * + */ + +/* + * Kernel memory management. + */ + +#include "param.h" +#include "syslog.h" + +#include "vm.h" +#include "vm_page.h" +#include "vm_pageout.h" +#include "vm_kern.h" + +/* + * kmem_alloc_pageable: + * + * Allocate pageable memory to the kernel's address map. + * map must be "kernel_map" below. + */ + +vm_offset_t kmem_alloc_pageable(map, size) + vm_map_t map; + register vm_size_t size; +{ + vm_offset_t addr; + register int result; + +#if 0 + if (map != kernel_map) + panic("kmem_alloc_pageable: not called with kernel_map"); +#endif 0 + + size = round_page(size); + + addr = vm_map_min(map); + result = vm_map_find(map, NULL, (vm_offset_t) 0, + &addr, size, TRUE); + if (result != KERN_SUCCESS) { + return(0); + } + + return(addr); +} + +/* + * Allocate wired-down memory in the kernel's address map + * or a submap. + */ +vm_offset_t kmem_alloc(map, size) + register vm_map_t map; + register vm_size_t size; +{ + vm_offset_t addr; + register int result; + register vm_offset_t offset; + extern vm_object_t kernel_object; + vm_offset_t i; + + size = round_page(size); + + /* + * Use the kernel object for wired-down kernel pages. + * Assume that no region of the kernel object is + * referenced more than once. + */ + + addr = vm_map_min(map); + result = vm_map_find(map, NULL, (vm_offset_t) 0, + &addr, size, TRUE); + if (result != KERN_SUCCESS) { + return(0); + } + + /* + * Since we didn't know where the new region would + * start, we couldn't supply the correct offset into + * the kernel object. Re-allocate that address + * region with the correct offset. + */ + + offset = addr - VM_MIN_KERNEL_ADDRESS; + vm_object_reference(kernel_object); + + vm_map_lock(map); + vm_map_delete(map, addr, addr + size); + vm_map_insert(map, kernel_object, offset, addr, addr + size); + vm_map_unlock(map); + + /* + * Guarantee that there are pages already in this object + * before calling vm_map_pageable. This is to prevent the + * following scenario: + * + * 1) Threads have swapped out, so that there is a + * pager for the kernel_object. + * 2) The kmsg zone is empty, and so we are kmem_allocing + * a new page for it. + * 3) vm_map_pageable calls vm_fault; there is no page, + * but there is a pager, so we call + * pager_data_request. But the kmsg zone is empty, + * so we must kmem_alloc. + * 4) goto 1 + * 5) Even if the kmsg zone is not empty: when we get + * the data back from the pager, it will be (very + * stale) non-zero data. kmem_alloc is defined to + * return zero-filled memory. + * + * We're intentionally not activating the pages we allocate + * to prevent a race with page-out. vm_map_pageable will wire + * the pages. + */ + + vm_object_lock(kernel_object); + for (i = 0 ; i < size; i+= PAGE_SIZE) { + vm_page_t mem; + + while ((mem = vm_page_alloc(kernel_object, offset+i)) == NULL) { + vm_object_unlock(kernel_object); + VM_WAIT; + vm_object_lock(kernel_object); + } + vm_page_zero_fill(mem); + mem->busy = FALSE; + } + vm_object_unlock(kernel_object); + + /* + * And finally, mark the data as non-pageable. + */ + + (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); + + /* + * Try to coalesce the map + */ + + vm_map_simplify(map, addr); + + return(addr); +} + +/* + * kmem_free: + * + * Release a region of kernel virtual memory allocated + * with kmem_alloc, and return the physical pages + * associated with that region. + */ +void kmem_free(map, addr, size) + vm_map_t map; + register vm_offset_t addr; + vm_size_t size; +{ + (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); + vm_map_simplify(map, addr); +} + +/* + * kmem_suballoc: + * + * Allocates a map to manage a subrange + * of the kernel virtual address space. + * + * Arguments are as follows: + * + * parent Map to take range from + * size Size of range to find + * min, max Returned endpoints of map + * pageable Can the region be paged + */ +vm_map_t kmem_suballoc(parent, min, max, size, pageable) + register vm_map_t parent; + vm_offset_t *min, *max; + register vm_size_t size; + boolean_t pageable; +{ + register int ret; + vm_map_t result; + + size = round_page(size); + + *min = (vm_offset_t) vm_map_min(parent); + ret = vm_map_find(parent, NULL, (vm_offset_t) 0, + min, size, TRUE); + if (ret != KERN_SUCCESS) { + printf("kmem_suballoc: bad status return of %d.\n", ret); + panic("kmem_suballoc"); + } + *max = *min + size; + pmap_reference(vm_map_pmap(parent)); + result = vm_map_create(vm_map_pmap(parent), *min, *max, pageable); + if (result == NULL) + panic("kmem_suballoc: cannot create submap"); + if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) + panic("kmem_suballoc: unable to change range to submap"); + return(result); +} + +/* + * vm_move: + * + * Move memory from source to destination map, possibly deallocating + * the source map reference to the memory. + * + * Parameters are as follows: + * + * src_map Source address map + * src_addr Address within source map + * dst_map Destination address map + * num_bytes Amount of data (in bytes) to copy/move + * src_dealloc Should source be removed after copy? + * + * Assumes the src and dst maps are not already locked. + * + * Returns new destination address or 0 (if a failure occurs). + */ +vm_offset_t vm_move(src_map,src_addr,dst_map,num_bytes,src_dealloc) + vm_map_t src_map; + register vm_offset_t src_addr; + register vm_map_t dst_map; + vm_offset_t num_bytes; + boolean_t src_dealloc; +{ + register vm_offset_t src_start; /* Beginning of region */ + register vm_size_t src_size; /* Size of rounded region */ + vm_offset_t dst_start; /* destination address */ + register int result; + + /* + * Page-align the source region + */ + + src_start = trunc_page(src_addr); + src_size = round_page(src_addr + num_bytes) - src_start; + + /* + * If there's no destination, we can be at most deallocating + * the source range. + */ + if (dst_map == NULL) { + if (src_dealloc) + if (vm_deallocate(src_map, src_start, src_size) + != KERN_SUCCESS) { + printf("vm_move: deallocate of source"); + printf(" failed, dealloc_only clause\n"); + } + return(0); + } + + /* + * Allocate a place to put the copy + */ + + dst_start = (vm_offset_t) 0; + if ((result = vm_allocate(dst_map, &dst_start, src_size, TRUE)) + == KERN_SUCCESS) { + /* + * Perform the copy, asking for deallocation if desired + */ + result = vm_map_copy(dst_map, src_map, dst_start, src_size, + src_start, FALSE, src_dealloc); + } + + /* + * Return the destination address corresponding to + * the source address given (rather than the front + * of the newly-allocated page). + */ + + if (result == KERN_SUCCESS) + return(dst_start + (src_addr - src_start)); + return(0); +} + +/* + * Allocate wired-down memory in the kernel's address map for the higher + * level kernel memory allocator (kern/kern_malloc.c). We cannot use + * kmem_alloc() because we may need to allocate memory at interrupt + * level where we cannot block (canwait == FALSE). + * + * This routine has its own private kernel submap (kmem_map) and object + * (kmem_object). This, combined with the fact that only malloc uses + * this routine, ensures that we will never block in map or object waits. + * + * Note that this still only works in a uni-processor environment and + * when called at splhigh(). + * + * We don't worry about expanding the map (adding entries) since entries + * for wired maps are statically allocated. + */ +vm_offset_t +kmem_malloc(map, size, canwait) + register vm_map_t map; + register vm_size_t size; + boolean_t canwait; +{ + register vm_offset_t offset, i; + vm_map_entry_t entry; + vm_offset_t addr; + vm_page_t m; + extern vm_object_t kmem_object; + + if (map != kmem_map && map != mb_map) + panic("kern_malloc_alloc: map != {kmem,mb}_map"); + + size = round_page(size); + addr = vm_map_min(map); + + if (vm_map_find(map, NULL, (vm_offset_t)0, + &addr, size, TRUE) != KERN_SUCCESS) { + if (!canwait) { + if (map == kmem_map) + panic("kmem_malloc: kmem_map too small"); + else if (map == mb_map) + log(LOG_WARNING, + "kmem_malloc: mb_map too small (can't wait)\n"); + } + return 0; + } + + /* + * Since we didn't know where the new region would start, + * we couldn't supply the correct offset into the kmem object. + * Re-allocate that address region with the correct offset. + */ + offset = addr - vm_map_min(kmem_map); + vm_object_reference(kmem_object); + + vm_map_lock(map); + vm_map_delete(map, addr, addr + size); + vm_map_insert(map, kmem_object, offset, addr, addr + size); + + /* + * If we can wait, just mark the range as wired + * (will fault pages as necessary). + */ + if (canwait) { + vm_map_unlock(map); + (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, + FALSE); + vm_map_simplify(map, addr); + return(addr); + } + + /* + * If we cannot wait then we must allocate all memory up front, + * pulling it off the active queue to prevent pageout. + */ + vm_object_lock(kmem_object); + for (i = 0; i < size; i += PAGE_SIZE) { + m = vm_page_alloc(kmem_object, offset + i); + + /* + * Ran out of space, free everything up and return. + * Don't need to lock page queues here as we know + * that the pages we got aren't on any queues. + */ + if (m == NULL) { + while (i != 0) { + i -= PAGE_SIZE; + m = vm_page_lookup(kmem_object, offset + i); + vm_page_free(m); + } + vm_object_unlock(kmem_object); + vm_map_delete(map, addr, addr + size); + vm_map_unlock(map); + return(0); + } +#if 0 + vm_page_zero_fill(m); +#endif + m->busy = FALSE; + } + vm_object_unlock(kmem_object); + + /* + * Mark map entry as non-pageable. + * Assert: vm_map_insert() will never be able to extend the previous + * entry so there will be a new entry exactly corresponding to this + * address range and it will have wired_count == 0. + */ + if (!vm_map_lookup_entry(map, addr, &entry) || + entry->start != addr || entry->end != addr + size || + entry->wired_count) + panic("kmem_malloc: entry not found or misaligned"); + entry->wired_count++; + + /* + * Loop thru pages, entering them in the pmap. + * (We cannot add them to the wired count without + * wrapping the vm_page_queue_lock in splimp...) + */ + for (i = 0; i < size; i += PAGE_SIZE) { + vm_object_lock(kmem_object); + m = vm_page_lookup(kmem_object, offset + i); + vm_object_unlock(kmem_object); + pmap_enter(map->pmap, addr + i, VM_PAGE_TO_PHYS(m), + VM_PROT_DEFAULT, TRUE); + } + vm_map_unlock(map); + + vm_map_simplify(map, addr); + return(addr); +} + +/* + * kmem_alloc_wait + * + * Allocates pageable memory from a sub-map of the kernel. If the submap + * has no room, the caller sleeps waiting for more memory in the submap. + * + */ +vm_offset_t kmem_alloc_wait(map, size) + vm_map_t map; + vm_size_t size; +{ + vm_offset_t addr; + int result; + + size = round_page(size); + + do { + /* + * To make this work for more than one map, + * use the map's lock to lock out sleepers/wakers. + * Unfortunately, vm_map_find also grabs the map lock. + */ + vm_map_lock(map); + lock_set_recursive(&map->lock); + + addr = vm_map_min(map); + result = vm_map_find(map, NULL, (vm_offset_t) 0, + &addr, size, TRUE); + + lock_clear_recursive(&map->lock); + if (result != KERN_SUCCESS) { + + if ( (vm_map_max(map) - vm_map_min(map)) < size ) { + vm_map_unlock(map); + return(0); + } + + assert_wait((int)map, TRUE); + vm_map_unlock(map); +thread_wakeup(&vm_pages_needed); /* XXX */ + thread_block(); + } + else { + vm_map_unlock(map); + } + + } while (result != KERN_SUCCESS); + + return(addr); +} + +/* + * kmem_alloc_wired_wait + * + * Allocates nonpageable memory from a sub-map of the kernel. If the submap + * has no room, the caller sleeps waiting for more memory in the submap. + * + */ +vm_offset_t kmem_alloc_wired_wait(map, size) + vm_map_t map; + vm_size_t size; +{ + vm_offset_t addr; + int result; + + size = round_page(size); + + do { + /* + * To make this work for more than one map, + * use the map's lock to lock out sleepers/wakers. + * Unfortunately, vm_map_find also grabs the map lock. + */ + vm_map_lock(map); + lock_set_recursive(&map->lock); + + addr = vm_map_min(map); + result = vm_map_find(map, NULL, (vm_offset_t) 0, + &addr, size, FALSE); + + lock_clear_recursive(&map->lock); + if (result != KERN_SUCCESS) { + + if ( (vm_map_max(map) - vm_map_min(map)) < size ) { + vm_map_unlock(map); + return(0); + } + + assert_wait((int)map, TRUE); + vm_map_unlock(map); +thread_wakeup(&vm_pages_needed); /* XXX */ + thread_block(); + } + else { + vm_map_unlock(map); + } + + } while (result != KERN_SUCCESS); + + return(addr); +} + +/* + * kmem_free_wakeup + * + * Returns memory to a submap of the kernel, and wakes up any threads + * waiting for memory in that map. + */ +void kmem_free_wakeup(map, addr, size) + vm_map_t map; + vm_offset_t addr; + vm_size_t size; +{ + vm_map_lock(map); + (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); + thread_wakeup((int)map); + vm_map_unlock(map); + vm_map_simplify(map, addr); +} + +/* + * kmem_init: + * + * Initialize the kernel's virtual memory map, taking + * into account all memory allocated up to this time. + */ +void kmem_init(start, end) + vm_offset_t start; + vm_offset_t end; +{ + vm_offset_t addr; + extern vm_map_t kernel_map; + + addr = VM_MIN_KERNEL_ADDRESS; + kernel_map = vm_map_create(pmap_kernel(), addr, end, FALSE); + (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0, + &addr, (start - VM_MIN_KERNEL_ADDRESS), + FALSE); +} diff --git a/sys/vm/vm_kern.h b/sys/vm/vm_kern.h new file mode 100644 index 000000000000..ed0abc0f97ac --- /dev/null +++ b/sys/vm/vm_kern.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_kern.h 7.2 (Berkeley) 4/21/91 + * $Id: vm_kern.h,v 1.2 1993/10/16 16:20:31 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Kernel memory management definitions. + */ + +void kmem_init(); +vm_offset_t kmem_alloc(); +vm_offset_t kmem_alloc_pageable(); +void kmem_free(); +vm_map_t kmem_suballoc(); + +vm_offset_t vm_move(); + +vm_offset_t kmem_alloc_wait(); +void kmem_free_wakeup(); + +vm_map_t kernel_map; +vm_map_t mb_map; +vm_map_t kmem_map; +vm_map_t phys_map; +vm_map_t buffer_map; diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c new file mode 100644 index 000000000000..3422d4073dbc --- /dev/null +++ b/sys/vm/vm_map.c @@ -0,0 +1,2480 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_map.c 7.3 (Berkeley) 4/21/91 + * $Id: vm_map.c,v 1.7 1993/10/16 16:20:33 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Virtual memory mapping module. + */ + +#include "ddb.h" +#include "param.h" +#include "systm.h" +#include "malloc.h" +#include "vm.h" +#include "vm_page.h" +#include "vm_object.h" + +/* + * Virtual memory maps provide for the mapping, protection, + * and sharing of virtual memory objects. In addition, + * this module provides for an efficient virtual copy of + * memory from one map to another. + * + * Synchronization is required prior to most operations. + * + * Maps consist of an ordered doubly-linked list of simple + * entries; a single hint is used to speed up lookups. + * + * In order to properly represent the sharing of virtual + * memory regions among maps, the map structure is bi-level. + * Top-level ("address") maps refer to regions of sharable + * virtual memory. These regions are implemented as + * ("sharing") maps, which then refer to the actual virtual + * memory objects. When two address maps "share" memory, + * their top-level maps both have references to the same + * sharing map. When memory is virtual-copied from one + * address map to another, the references in the sharing + * maps are actually copied -- no copying occurs at the + * virtual memory object level. + * + * Since portions of maps are specified by start/end addreses, + * which may not align with existing map entries, all + * routines merely "clip" entries to these start/end values. + * [That is, an entry is split into two, bordering at a + * start or end value.] Note that these clippings may not + * always be necessary (as the two resulting entries are then + * not changed); however, the clipping is done for convenience. + * No attempt is currently made to "glue back together" two + * abutting entries. + * + * As mentioned above, virtual copy operations are performed + * by copying VM object references from one sharing map to + * another, and then marking both regions as copy-on-write. + * It is important to note that only one writeable reference + * to a VM object region exists in any map -- this means that + * shadow object creation can be delayed until a write operation + * occurs. + */ + +/* + * vm_map_startup: + * + * Initialize the vm_map module. Must be called before + * any other vm_map routines. + * + * Map and entry structures are allocated from the general + * purpose memory pool with some exceptions: + * + * - The kernel map and kmem submap are allocated statically. + * - Kernel map entries are allocated out of a static pool. + * + * These restrictions are necessary since malloc() uses the + * maps and requires map entries. + */ + +vm_offset_t kentry_data; +vm_size_t kentry_data_size; +vm_map_entry_t kentry_free; +vm_map_t kmap_free; + +void vm_map_startup() +{ + register int i; + register vm_map_entry_t mep; + vm_map_t mp; + + /* + * Static map structures for allocation before initialization of + * kernel map or kmem map. vm_map_create knows how to deal with them. + */ + kmap_free = mp = (vm_map_t) kentry_data; + i = MAX_KMAP; + while (--i > 0) { + mp->header.next = (vm_map_entry_t) (mp + 1); + mp++; + } + mp++->header.next = NULL; + + /* + * Form a free list of statically allocated kernel map entries + * with the rest. + */ + kentry_free = mep = (vm_map_entry_t) mp; + i = (kentry_data_size - MAX_KMAP * sizeof *mp) / sizeof *mep; + while (--i > 0) { + mep->next = mep + 1; + mep++; + } + mep->next = NULL; +} + +/* + * Allocate a vmspace structure, including a vm_map and pmap, + * and initialize those structures. The refcnt is set to 1. + * The remaining fields must be initialized by the caller. + */ +struct vmspace * +vmspace_alloc(min, max, pageable) + vm_offset_t min, max; + int pageable; +{ + register struct vmspace *vm; + + MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK); + bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm); + vm_map_init(&vm->vm_map, min, max, pageable); + pmap_pinit(&vm->vm_pmap); + vm->vm_map.pmap = &vm->vm_pmap; /* XXX */ + vm->vm_refcnt = 1; + return (vm); +} + +void +vmspace_free(vm) + register struct vmspace *vm; +{ + + if (--vm->vm_refcnt == 0) { + /* + * Lock the map, to wait out all other references to it. + * Delete all of the mappings and pages they hold, + * then call the pmap module to reclaim anything left. + */ + vm_map_lock(&vm->vm_map); + (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset, + vm->vm_map.max_offset); + pmap_release(&vm->vm_pmap); + FREE(vm, M_VMMAP); + } +} + +/* + * vm_map_create: + * + * Creates and returns a new empty VM map with + * the given physical map structure, and having + * the given lower and upper address bounds. + */ +vm_map_t vm_map_create(pmap, min, max, pageable) + pmap_t pmap; + vm_offset_t min, max; + boolean_t pageable; +{ + register vm_map_t result; + extern vm_map_t kernel_map, kmem_map; + + if (kmem_map == NULL) { + result = kmap_free; + kmap_free = (vm_map_t) result->header.next; + if (result == NULL) + panic("vm_map_create: out of maps"); + } else + MALLOC(result, vm_map_t, sizeof(struct vm_map), + M_VMMAP, M_WAITOK); + + vm_map_init(result, min, max, pageable); + result->pmap = pmap; + return(result); +} + +/* + * Initialize an existing vm_map structure + * such as that in the vmspace structure. + * The pmap is set elsewhere. + */ +void +vm_map_init(map, min, max, pageable) + register struct vm_map *map; + vm_offset_t min, max; + boolean_t pageable; +{ + map->header.next = map->header.prev = &map->header; + map->nentries = 0; + map->size = 0; + map->ref_count = 1; + map->is_main_map = TRUE; + map->min_offset = min; + map->max_offset = max; + map->entries_pageable = pageable; + map->first_free = &map->header; + map->hint = &map->header; + map->timestamp = 0; + lock_init(&map->lock, TRUE); + simple_lock_init(&map->ref_lock); + simple_lock_init(&map->hint_lock); +} + +/* + * vm_map_entry_create: [ internal use only ] + * + * Allocates a VM map entry for insertion. + * No entry fields are filled in. This routine is + */ +vm_map_entry_t vm_map_entry_create(map) + vm_map_t map; +{ + vm_map_entry_t entry; + extern vm_map_t kernel_map, kmem_map, mb_map, buffer_map, pager_map; + + if (map == kernel_map || map == kmem_map || map == mb_map + || map == buffer_map || map == pager_map) { + if (entry = kentry_free) + kentry_free = kentry_free->next; + } else + MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry), + M_VMMAPENT, M_WAITOK); + if (entry == NULL) + panic("vm_map_entry_create: out of map entries"); + + return(entry); +} + +/* + * vm_map_entry_dispose: [ internal use only ] + * + * Inverse of vm_map_entry_create. + */ +void vm_map_entry_dispose(map, entry) + vm_map_t map; + vm_map_entry_t entry; +{ + extern vm_map_t kernel_map, kmem_map, mb_map, buffer_map, pager_map; + + if (map == kernel_map || map == kmem_map || map == mb_map + || map == buffer_map || map == pager_map) { + entry->next = kentry_free; + kentry_free = entry; + } else + FREE(entry, M_VMMAPENT); +} + +/* + * vm_map_entry_{un,}link: + * + * Insert/remove entries from maps. + */ +#define vm_map_entry_link(map, after_where, entry) \ + { \ + (map)->nentries++; \ + (entry)->prev = (after_where); \ + (entry)->next = (after_where)->next; \ + (entry)->prev->next = (entry); \ + (entry)->next->prev = (entry); \ + } +#define vm_map_entry_unlink(map, entry) \ + { \ + (map)->nentries--; \ + (entry)->next->prev = (entry)->prev; \ + (entry)->prev->next = (entry)->next; \ + } + +/* + * vm_map_reference: + * + * Creates another valid reference to the given map. + * + */ +void vm_map_reference(map) + register vm_map_t map; +{ + if (map == NULL) + return; + + simple_lock(&map->ref_lock); + map->ref_count++; + simple_unlock(&map->ref_lock); +} + +/* + * vm_map_deallocate: + * + * Removes a reference from the specified map, + * destroying it if no references remain. + * The map should not be locked. + */ +void vm_map_deallocate(map) + register vm_map_t map; +{ + register int c; + + if (map == NULL) + return; + + simple_lock(&map->ref_lock); + c = --map->ref_count; + simple_unlock(&map->ref_lock); + + if (c > 0) { + return; + } + + /* + * Lock the map, to wait out all other references + * to it. + */ + + vm_map_lock(map); + + (void) vm_map_delete(map, map->min_offset, map->max_offset); + + pmap_destroy(map->pmap); + + FREE(map, M_VMMAP); +} + +/* + * vm_map_insert: [ internal use only ] + * + * Inserts the given whole VM object into the target + * map at the specified address range. The object's + * size should match that of the address range. + * + * Requires that the map be locked, and leaves it so. + */ +vm_map_insert(map, object, offset, start, end) + vm_map_t map; + vm_object_t object; + vm_offset_t offset; + vm_offset_t start; + vm_offset_t end; +{ + register vm_map_entry_t new_entry; + register vm_map_entry_t prev_entry; + vm_map_entry_t temp_entry; + + /* + * Check that the start and end points are not bogus. + */ + + if ((start < map->min_offset) || (end > map->max_offset) || + (start >= end)) + return(KERN_INVALID_ADDRESS); + + /* + * Find the entry prior to the proposed + * starting address; if it's part of an + * existing entry, this range is bogus. + */ + + if (vm_map_lookup_entry(map, start, &temp_entry)) + return(KERN_NO_SPACE); + + prev_entry = temp_entry; + + /* + * Assert that the next entry doesn't overlap the + * end point. + */ + + if ((prev_entry->next != &map->header) && + (prev_entry->next->start < end)) + return(KERN_NO_SPACE); + + /* + * See if we can avoid creating a new entry by + * extending one of our neighbors. + */ + + if (object == NULL) { + if ((prev_entry != &map->header) && + (prev_entry->end == start) && + (map->is_main_map) && + (prev_entry->is_a_map == FALSE) && + (prev_entry->is_sub_map == FALSE) && + (prev_entry->inheritance == VM_INHERIT_DEFAULT) && + (prev_entry->protection == VM_PROT_DEFAULT) && + (prev_entry->max_protection == VM_PROT_DEFAULT) && + (prev_entry->wired_count == 0)) { + + if (vm_object_coalesce(prev_entry->object.vm_object, + NULL, + prev_entry->offset, + (vm_offset_t) 0, + (vm_size_t)(prev_entry->end + - prev_entry->start), + (vm_size_t)(end - prev_entry->end))) { + /* + * Coalesced the two objects - can extend + * the previous map entry to include the + * new range. + */ + map->size += (end - prev_entry->end); + prev_entry->end = end; + return(KERN_SUCCESS); + } + } + } + + /* + * Create a new entry + */ + + new_entry = vm_map_entry_create(map); + new_entry->start = start; + new_entry->end = end; + + new_entry->is_a_map = FALSE; + new_entry->is_sub_map = FALSE; + new_entry->object.vm_object = object; + new_entry->offset = offset; + + new_entry->copy_on_write = FALSE; + new_entry->needs_copy = FALSE; + + if (map->is_main_map) { + new_entry->inheritance = VM_INHERIT_DEFAULT; + new_entry->protection = VM_PROT_DEFAULT; + new_entry->max_protection = VM_PROT_DEFAULT; + new_entry->wired_count = 0; + } + + /* + * Insert the new entry into the list + */ + + vm_map_entry_link(map, prev_entry, new_entry); + map->size += new_entry->end - new_entry->start; + + /* + * Update the free space hint + */ + + if ((map->first_free == prev_entry) && (prev_entry->end >= new_entry->start)) + map->first_free = new_entry; + + return(KERN_SUCCESS); +} + +/* + * SAVE_HINT: + * + * Saves the specified entry as the hint for + * future lookups. Performs necessary interlocks. + */ +#define SAVE_HINT(map,value) \ + simple_lock(&(map)->hint_lock); \ + (map)->hint = (value); \ + simple_unlock(&(map)->hint_lock); + +/* + * vm_map_lookup_entry: [ internal use only ] + * + * Finds the map entry containing (or + * immediately preceding) the specified address + * in the given map; the entry is returned + * in the "entry" parameter. The boolean + * result indicates whether the address is + * actually contained in the map. + */ +boolean_t vm_map_lookup_entry(map, address, entry) + register vm_map_t map; + register vm_offset_t address; + vm_map_entry_t *entry; /* OUT */ +{ + register vm_map_entry_t cur; + register vm_map_entry_t last; + + /* + * Start looking either from the head of the + * list, or from the hint. + */ + + simple_lock(&map->hint_lock); + cur = map->hint; + simple_unlock(&map->hint_lock); + + if (cur == &map->header) + cur = cur->next; + + if (address >= cur->start) { + /* + * Go from hint to end of list. + * + * But first, make a quick check to see if + * we are already looking at the entry we + * want (which is usually the case). + * Note also that we don't need to save the hint + * here... it is the same hint (unless we are + * at the header, in which case the hint didn't + * buy us anything anyway). + */ + last = &map->header; + if ((cur != last) && (cur->end > address)) { + *entry = cur; + return(TRUE); + } + } + else { + /* + * Go from start to hint, *inclusively* + */ + last = cur->next; + cur = map->header.next; + } + + /* + * Search linearly + */ + + while (cur != last) { + if (cur->end > address) { + if (address >= cur->start) { + /* + * Save this lookup for future + * hints, and return + */ + + *entry = cur; + SAVE_HINT(map, cur); + return(TRUE); + } + break; + } + cur = cur->next; + } + *entry = cur->prev; + SAVE_HINT(map, *entry); + return(FALSE); +} + +/* + * vm_map_find finds an unallocated region in the target address + * map with the given length. The search is defined to be + * first-fit from the specified address; the region found is + * returned in the same parameter. + * + */ +vm_map_find(map, object, offset, addr, length, find_space) + vm_map_t map; + vm_object_t object; + vm_offset_t offset; + vm_offset_t *addr; /* IN/OUT */ + vm_size_t length; + boolean_t find_space; +{ + register vm_map_entry_t entry; + register vm_offset_t start; + register vm_offset_t end; + int result; + + start = *addr; + + vm_map_lock(map); + + if (find_space) { + /* + * Calculate the first possible address. + */ + + if (start < map->min_offset) + start = map->min_offset; + if (start > map->max_offset) { + vm_map_unlock(map); + return (KERN_NO_SPACE); + } + + /* + * Look for the first possible address; + * if there's already something at this + * address, we have to start after it. + */ + + if (start == map->min_offset) { + if ((entry = map->first_free) != &map->header) + start = entry->end; + } else { + vm_map_entry_t tmp_entry; + if (vm_map_lookup_entry(map, start, &tmp_entry)) + start = tmp_entry->end; + entry = tmp_entry; + } + + /* + * In any case, the "entry" always precedes + * the proposed new region throughout the + * loop: + */ + + while (TRUE) { + register vm_map_entry_t next; + + /* + * Find the end of the proposed new region. + * Be sure we didn't go beyond the end, or + * wrap around the address. + */ + + end = start + length; + + if ((end > map->max_offset) || (end < start)) { + vm_map_unlock(map); + return (KERN_NO_SPACE); + } + + /* + * If there are no more entries, we must win. + */ + + next = entry->next; + if (next == &map->header) + break; + + /* + * If there is another entry, it must be + * after the end of the potential new region. + */ + + if (next->start >= end) + break; + + /* + * Didn't fit -- move to the next entry. + */ + + entry = next; + start = entry->end; + } + *addr = start; + + SAVE_HINT(map, entry); + } + + result = vm_map_insert(map, object, offset, start, start + length); + + vm_map_unlock(map); + return(result); +} + +/* + * vm_map_simplify_entry: [ internal use only ] + * + * Simplify the given map entry by: + * removing extra sharing maps + * [XXX maybe later] merging with a neighbor + */ +void vm_map_simplify_entry(map, entry) + vm_map_t map; + vm_map_entry_t entry; +{ +#ifdef lint + map++; +#endif lint + + /* + * If this entry corresponds to a sharing map, then + * see if we can remove the level of indirection. + * If it's not a sharing map, then it points to + * a VM object, so see if we can merge with either + * of our neighbors. + */ + + if (entry->is_sub_map) + return; + if (entry->is_a_map) { +#if 0 + vm_map_t my_share_map; + int count; + + my_share_map = entry->object.share_map; + simple_lock(&my_share_map->ref_lock); + count = my_share_map->ref_count; + simple_unlock(&my_share_map->ref_lock); + + if (count == 1) { + /* Can move the region from + * entry->start to entry->end (+ entry->offset) + * in my_share_map into place of entry. + * Later. + */ + } +#endif 0 + } + else { + /* + * Try to merge with our neighbors. + * + * Conditions for merge are: + * + * 1. entries are adjacent. + * 2. both entries point to objects + * with null pagers. + * + * If a merge is possible, we replace the two + * entries with a single entry, then merge + * the two objects into a single object. + * + * Now, all that is left to do is write the + * code! + */ + } +} + +/* + * vm_map_clip_start: [ internal use only ] + * + * Asserts that the given entry begins at or after + * the specified address; if necessary, + * it splits the entry into two. + */ +#define vm_map_clip_start(map, entry, startaddr) \ +{ \ + if (startaddr > entry->start) \ + _vm_map_clip_start(map, entry, startaddr); \ +} + +/* + * This routine is called only when it is known that + * the entry must be split. + */ +void _vm_map_clip_start(map, entry, start) + register vm_map_t map; + register vm_map_entry_t entry; + register vm_offset_t start; +{ + register vm_map_entry_t new_entry; + + /* + * See if we can simplify this entry first + */ + + vm_map_simplify_entry(map, entry); + + /* + * Split off the front portion -- + * note that we must insert the new + * entry BEFORE this one, so that + * this entry has the specified starting + * address. + */ + + new_entry = vm_map_entry_create(map); + *new_entry = *entry; + + new_entry->end = start; + entry->offset += (start - entry->start); + entry->start = start; + + vm_map_entry_link(map, entry->prev, new_entry); + + if (entry->is_a_map || entry->is_sub_map) + vm_map_reference(new_entry->object.share_map); + else + vm_object_reference(new_entry->object.vm_object); +} + +/* + * vm_map_clip_end: [ internal use only ] + * + * Asserts that the given entry ends at or before + * the specified address; if necessary, + * it splits the entry into two. + */ + +void _vm_map_clip_end(); +#define vm_map_clip_end(map, entry, endaddr) \ +{ \ + if (endaddr < entry->end) \ + _vm_map_clip_end(map, entry, endaddr); \ +} + +/* + * This routine is called only when it is known that + * the entry must be split. + */ +void _vm_map_clip_end(map, entry, end) + register vm_map_t map; + register vm_map_entry_t entry; + register vm_offset_t end; +{ + register vm_map_entry_t new_entry; + + /* + * Create a new entry and insert it + * AFTER the specified entry + */ + + new_entry = vm_map_entry_create(map); + *new_entry = *entry; + + new_entry->start = entry->end = end; + new_entry->offset += (end - entry->start); + + vm_map_entry_link(map, entry, new_entry); + + if (entry->is_a_map || entry->is_sub_map) + vm_map_reference(new_entry->object.share_map); + else + vm_object_reference(new_entry->object.vm_object); +} + +/* + * VM_MAP_RANGE_CHECK: [ internal use only ] + * + * Asserts that the starting and ending region + * addresses fall within the valid range of the map. + */ +#define VM_MAP_RANGE_CHECK(map, start, end) \ + { \ + if (start < vm_map_min(map)) \ + start = vm_map_min(map); \ + if (end > vm_map_max(map)) \ + end = vm_map_max(map); \ + if (start > end) \ + start = end; \ + } + +/* + * vm_map_submap: [ kernel use only ] + * + * Mark the given range as handled by a subordinate map. + * + * This range must have been created with vm_map_find, + * and no other operations may have been performed on this + * range prior to calling vm_map_submap. + * + * Only a limited number of operations can be performed + * within this rage after calling vm_map_submap: + * vm_fault + * [Don't try vm_map_copy!] + * + * To remove a submapping, one must first remove the + * range from the superior map, and then destroy the + * submap (if desired). [Better yet, don't try it.] + */ +vm_map_submap(map, start, end, submap) + register vm_map_t map; + register vm_offset_t start; + register vm_offset_t end; + vm_map_t submap; +{ + vm_map_entry_t entry; + register int result = KERN_INVALID_ARGUMENT; + + vm_map_lock(map); + + VM_MAP_RANGE_CHECK(map, start, end); + + if (vm_map_lookup_entry(map, start, &entry)) { + vm_map_clip_start(map, entry, start); + } + else + entry = entry->next; + + vm_map_clip_end(map, entry, end); + + if ((entry->start == start) && (entry->end == end) && + (!entry->is_a_map) && + (entry->object.vm_object == NULL) && + (!entry->copy_on_write)) { + entry->is_a_map = FALSE; + entry->is_sub_map = TRUE; + vm_map_reference(entry->object.sub_map = submap); + result = KERN_SUCCESS; + } + vm_map_unlock(map); + + return(result); +} + +/* + * vm_map_protect: + * + * Sets the protection of the specified address + * region in the target map. If "set_max" is + * specified, the maximum protection is to be set; + * otherwise, only the current protection is affected. + */ +vm_map_protect(map, start, end, new_prot, set_max) + register vm_map_t map; + register vm_offset_t start; + register vm_offset_t end; + register vm_prot_t new_prot; + register boolean_t set_max; +{ + register vm_map_entry_t current; + vm_map_entry_t entry; + + vm_map_lock(map); + + VM_MAP_RANGE_CHECK(map, start, end); + + if (vm_map_lookup_entry(map, start, &entry)) { + vm_map_clip_start(map, entry, start); + } + else + entry = entry->next; + + /* + * Make a first pass to check for protection + * violations. + */ + + current = entry; + while ((current != &map->header) && (current->start < end)) { + if (current->is_sub_map) + return(KERN_INVALID_ARGUMENT); + if ((new_prot & current->max_protection) != new_prot) { + vm_map_unlock(map); + return(KERN_PROTECTION_FAILURE); + } + + current = current->next; + } + + /* + * Go back and fix up protections. + * [Note that clipping is not necessary the second time.] + */ + + current = entry; + + while ((current != &map->header) && (current->start < end)) { + vm_prot_t old_prot; + + vm_map_clip_end(map, current, end); + + old_prot = current->protection; + if (set_max) + current->protection = + (current->max_protection = new_prot) & + old_prot; + else + current->protection = new_prot; + + /* + * Update physical map if necessary. + * Worry about copy-on-write here -- CHECK THIS XXX + */ + + if (current->protection != old_prot) { + +#define MASK(entry) ((entry)->copy_on_write ? ~VM_PROT_WRITE : \ + VM_PROT_ALL) +#define max(a,b) ((a) > (b) ? (a) : (b)) + + if (current->is_a_map) { + vm_map_entry_t share_entry; + vm_offset_t share_end; + + vm_map_lock(current->object.share_map); + (void) vm_map_lookup_entry( + current->object.share_map, + current->offset, + &share_entry); + share_end = current->offset + + (current->end - current->start); + while ((share_entry != + ¤t->object.share_map->header) && + (share_entry->start < share_end)) { + + pmap_protect(map->pmap, + (max(share_entry->start, + current->offset) - + current->offset + + current->start), + min(share_entry->end, + share_end) - + current->offset + + current->start, + current->protection & + MASK(share_entry)); + + share_entry = share_entry->next; + } + vm_map_unlock(current->object.share_map); + } + else + pmap_protect(map->pmap, current->start, + current->end, + current->protection & MASK(entry)); +#undef max +#undef MASK + } + current = current->next; + } + + vm_map_unlock(map); + return(KERN_SUCCESS); +} + +/* + * vm_map_inherit: + * + * Sets the inheritance of the specified address + * range in the target map. Inheritance + * affects how the map will be shared with + * child maps at the time of vm_map_fork. + */ +vm_map_inherit(map, start, end, new_inheritance) + register vm_map_t map; + register vm_offset_t start; + register vm_offset_t end; + register vm_inherit_t new_inheritance; +{ + register vm_map_entry_t entry; + vm_map_entry_t temp_entry; + + switch (new_inheritance) { + case VM_INHERIT_NONE: + case VM_INHERIT_COPY: + case VM_INHERIT_SHARE: + break; + default: + return(KERN_INVALID_ARGUMENT); + } + + vm_map_lock(map); + + VM_MAP_RANGE_CHECK(map, start, end); + + if (vm_map_lookup_entry(map, start, &temp_entry)) { + entry = temp_entry; + vm_map_clip_start(map, entry, start); + } + else + entry = temp_entry->next; + + while ((entry != &map->header) && (entry->start < end)) { + vm_map_clip_end(map, entry, end); + + entry->inheritance = new_inheritance; + + entry = entry->next; + } + + vm_map_unlock(map); + return(KERN_SUCCESS); +} + +/* + * vm_map_pageable: + * + * Sets the pageability of the specified address + * range in the target map. Regions specified + * as not pageable require locked-down physical + * memory and physical page maps. + * + * The map must not be locked, but a reference + * must remain to the map throughout the call. + */ +vm_map_pageable(map, start, end, new_pageable) + register vm_map_t map; + register vm_offset_t start; + register vm_offset_t end; + register boolean_t new_pageable; +{ + register vm_map_entry_t entry; + vm_map_entry_t temp_entry; + + vm_map_lock(map); + + VM_MAP_RANGE_CHECK(map, start, end); + + /* + * Only one pageability change may take place at one + * time, since vm_fault assumes it will be called + * only once for each wiring/unwiring. Therefore, we + * have to make sure we're actually changing the pageability + * for the entire region. We do so before making any changes. + */ + + if (vm_map_lookup_entry(map, start, &temp_entry)) { + entry = temp_entry; + vm_map_clip_start(map, entry, start); + } + else + entry = temp_entry->next; + temp_entry = entry; + + /* + * Actions are rather different for wiring and unwiring, + * so we have two separate cases. + */ + + if (new_pageable) { + + /* + * Unwiring. First ensure that the range to be + * unwired is really wired down. + */ + while ((entry != &map->header) && (entry->start < end)) { + + if (entry->wired_count == 0) { + vm_map_unlock(map); + return(KERN_INVALID_ARGUMENT); + } + entry = entry->next; + } + + /* + * Now decrement the wiring count for each region. + * If a region becomes completely unwired, + * unwire its physical pages and mappings. + */ + lock_set_recursive(&map->lock); + + entry = temp_entry; + while ((entry != &map->header) && (entry->start < end)) { + vm_map_clip_end(map, entry, end); + + entry->wired_count--; + if (entry->wired_count == 0) + vm_fault_unwire(map, entry->start, entry->end); + + entry = entry->next; + } + lock_clear_recursive(&map->lock); + } + + else { + /* + * Wiring. We must do this in two passes: + * + * 1. Holding the write lock, we increment the + * wiring count. For any area that is not already + * wired, we create any shadow objects that need + * to be created. + * + * 2. We downgrade to a read lock, and call + * vm_fault_wire to fault in the pages for any + * newly wired area (wired_count is 1). + * + * Downgrading to a read lock for vm_fault_wire avoids + * a possible deadlock with another thread that may have + * faulted on one of the pages to be wired (it would mark + * the page busy, blocking us, then in turn block on the + * map lock that we hold). Because of problems in the + * recursive lock package, we cannot upgrade to a write + * lock in vm_map_lookup. Thus, any actions that require + * the write lock must be done beforehand. Because we + * keep the read lock on the map, the copy-on-write status + * of the entries we modify here cannot change. + */ + + /* + * Pass 1. + */ + entry = temp_entry; + while ((entry != &map->header) && (entry->start < end)) { + vm_map_clip_end(map, entry, end); + + entry->wired_count++; + if (entry->wired_count == 1) { + + /* + * Perform actions of vm_map_lookup that need + * the write lock on the map: create a shadow + * object for a copy-on-write region, or an + * object for a zero-fill region. + * + * We don't have to do this for entries that + * point to sharing maps, because we won't hold + * the lock on the sharing map. + */ + if (!entry->is_a_map) { + if (entry->needs_copy && + ((entry->protection & VM_PROT_WRITE) != 0)) { + + vm_object_shadow(&entry->object.vm_object, + &entry->offset, + (vm_size_t)(entry->end + - entry->start)); + entry->needs_copy = FALSE; + } + else if (entry->object.vm_object == NULL) { + entry->object.vm_object = + vm_object_allocate((vm_size_t)(entry->end + - entry->start)); + entry->offset = (vm_offset_t)0; + } + } + } + + entry = entry->next; + } + + /* + * Pass 2. + */ + + /* + * HACK HACK HACK HACK + * + * If we are wiring in the kernel map or a submap of it, + * unlock the map to avoid deadlocks. We trust that the + * kernel threads are well-behaved, and therefore will + * not do anything destructive to this region of the map + * while we have it unlocked. We cannot trust user threads + * to do the same. + * + * HACK HACK HACK HACK + */ + if (vm_map_pmap(map) == kernel_pmap) { + vm_map_unlock(map); /* trust me ... */ + } + else { + lock_set_recursive(&map->lock); + lock_write_to_read(&map->lock); + } + + entry = temp_entry; + while (entry != &map->header && entry->start < end) { + if (entry->wired_count == 1) { + vm_fault_wire(map, entry->start, entry->end); + } + entry = entry->next; + } + + if (vm_map_pmap(map) == kernel_pmap) { + vm_map_lock(map); + } + else { + lock_clear_recursive(&map->lock); + } + } + + vm_map_unlock(map); + + return(KERN_SUCCESS); +} + +/* + * vm_map_entry_unwire: [ internal use only ] + * + * Make the region specified by this entry pageable. + * + * The map in question should be locked. + * [This is the reason for this routine's existence.] + */ +void vm_map_entry_unwire(map, entry) + vm_map_t map; + register vm_map_entry_t entry; +{ + vm_fault_unwire(map, entry->start, entry->end); + entry->wired_count = 0; +} + +/* + * vm_map_entry_delete: [ internal use only ] + * + * Deallocate the given entry from the target map. + */ +void vm_map_entry_delete(map, entry) + register vm_map_t map; + register vm_map_entry_t entry; +{ + if (entry->wired_count != 0) + vm_map_entry_unwire(map, entry); + + vm_map_entry_unlink(map, entry); + map->size -= entry->end - entry->start; + + if (entry->is_a_map || entry->is_sub_map) + vm_map_deallocate(entry->object.share_map); + else + vm_object_deallocate(entry->object.vm_object); + + vm_map_entry_dispose(map, entry); +} + +/* + * vm_map_delete: [ internal use only ] + * + * Deallocates the given address range from the target + * map. + * + * When called with a sharing map, removes pages from + * that region from all physical maps. + */ +vm_map_delete(map, start, end) + register vm_map_t map; + vm_offset_t start; + register vm_offset_t end; +{ + register vm_map_entry_t entry; + vm_map_entry_t first_entry; + + /* + * Find the start of the region, and clip it + */ + + if (!vm_map_lookup_entry(map, start, &first_entry)) + entry = first_entry->next; + else { + entry = first_entry; + vm_map_clip_start(map, entry, start); + + /* + * Fix the lookup hint now, rather than each + * time though the loop. + */ + + SAVE_HINT(map, entry->prev); + } + + /* + * Save the free space hint + */ + + if (map->first_free->start >= start) + map->first_free = entry->prev; + + /* + * Step through all entries in this region + */ + + while ((entry != &map->header) && (entry->start < end)) { + vm_map_entry_t next; + register vm_offset_t s, e; + register vm_object_t object; + + vm_map_clip_end(map, entry, end); + + next = entry->next; + s = entry->start; + e = entry->end; + + /* + * Unwire before removing addresses from the pmap; + * otherwise, unwiring will put the entries back in + * the pmap. + */ + + object = entry->object.vm_object; + if (entry->wired_count != 0) + vm_map_entry_unwire(map, entry); + + /* + * If this is a sharing map, we must remove + * *all* references to this data, since we can't + * find all of the physical maps which are sharing + * it. + */ + + if (object == kernel_object || object == kmem_object) + vm_object_page_remove(object, entry->offset, + entry->offset + (e - s)); + else if (!map->is_main_map) + vm_object_pmap_remove(object, + entry->offset, + entry->offset + (e - s)); + else + pmap_remove(map->pmap, s, e); + + /* + * Delete the entry (which may delete the object) + * only after removing all pmap entries pointing + * to its pages. (Otherwise, its page frames may + * be reallocated, and any modify bits will be + * set in the wrong object!) + */ + + vm_map_entry_delete(map, entry); + entry = next; + } + return(KERN_SUCCESS); +} + +/* + * vm_map_remove: + * + * Remove the given address range from the target map. + * This is the exported form of vm_map_delete. + */ +vm_map_remove(map, start, end) + register vm_map_t map; + register vm_offset_t start; + register vm_offset_t end; +{ + register int result; + + vm_map_lock(map); + VM_MAP_RANGE_CHECK(map, start, end); + result = vm_map_delete(map, start, end); + vm_map_unlock(map); + + return(result); +} + +/* + * vm_map_check_protection: + * + * Assert that the target map allows the specified + * privilege on the entire address region given. + * The entire region must be allocated. + */ +boolean_t vm_map_check_protection(map, start, end, protection) + register vm_map_t map; + register vm_offset_t start; + register vm_offset_t end; + register vm_prot_t protection; +{ + register vm_map_entry_t entry; + vm_map_entry_t tmp_entry; + + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + return(FALSE); + } + + entry = tmp_entry; + + while (start < end) { + if (entry == &map->header) { + return(FALSE); + } + + /* + * No holes allowed! + */ + + if (start < entry->start) { + return(FALSE); + } + + /* + * Check protection associated with entry. + */ + + if ((entry->protection & protection) != protection) { + return(FALSE); + } + + /* go to next entry */ + + start = entry->end; + entry = entry->next; + } + return(TRUE); +} + +/* + * vm_map_copy_entry: + * + * Copies the contents of the source entry to the destination + * entry. The entries *must* be aligned properly. + */ +void vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry) + vm_map_t src_map, dst_map; + register vm_map_entry_t src_entry, dst_entry; +{ + vm_object_t temp_object; + + if (src_entry->is_sub_map || dst_entry->is_sub_map) + return; + + if (dst_entry->object.vm_object != NULL && + !dst_entry->object.vm_object->internal) + printf("vm_map_copy_entry: copying over permanent data!\n"); + + /* + * If our destination map was wired down, + * unwire it now. + */ + + if (dst_entry->wired_count != 0) + vm_map_entry_unwire(dst_map, dst_entry); + + /* + * If we're dealing with a sharing map, we + * must remove the destination pages from + * all maps (since we cannot know which maps + * this sharing map belongs in). + */ + + if (dst_map->is_main_map) + pmap_remove(dst_map->pmap, dst_entry->start, dst_entry->end); + else + vm_object_pmap_remove(dst_entry->object.vm_object, + dst_entry->offset, + dst_entry->offset + + (dst_entry->end - dst_entry->start)); + + if (src_entry->wired_count == 0) { + + boolean_t src_needs_copy; + + /* + * If the source entry is marked needs_copy, + * it is already write-protected. + */ + if (!src_entry->needs_copy) { + + boolean_t su; + + /* + * If the source entry has only one mapping, + * we can just protect the virtual address + * range. + */ + if (!(su = src_map->is_main_map)) { + simple_lock(&src_map->ref_lock); + su = (src_map->ref_count == 1); + simple_unlock(&src_map->ref_lock); + } + + if (su) { + pmap_protect(src_map->pmap, + src_entry->start, + src_entry->end, + src_entry->protection & ~VM_PROT_WRITE); + } + else { + vm_object_pmap_copy(src_entry->object.vm_object, + src_entry->offset, + src_entry->offset + (src_entry->end + -src_entry->start)); + } + } + + /* + * Make a copy of the object. + */ + temp_object = dst_entry->object.vm_object; + vm_object_copy(src_entry->object.vm_object, + src_entry->offset, + (vm_size_t)(src_entry->end - + src_entry->start), + &dst_entry->object.vm_object, + &dst_entry->offset, + &src_needs_copy); + /* + * If we didn't get a copy-object now, mark the + * source map entry so that a shadow will be created + * to hold its changed pages. + */ + if (src_needs_copy) + src_entry->needs_copy = TRUE; + + /* + * The destination always needs to have a shadow + * created. + */ + dst_entry->needs_copy = TRUE; + + /* + * Mark the entries copy-on-write, so that write-enabling + * the entry won't make copy-on-write pages writable. + */ + src_entry->copy_on_write = TRUE; + dst_entry->copy_on_write = TRUE; + /* + * Get rid of the old object. + */ + vm_object_deallocate(temp_object); + + pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start, + dst_entry->end - dst_entry->start, src_entry->start); + } + else { + /* + * Of course, wired down pages can't be set copy-on-write. + * Cause wired pages to be copied into the new + * map by simulating faults (the new pages are + * pageable) + */ + vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry); + } +} + +/* + * vm_map_copy: + * + * Perform a virtual memory copy from the source + * address map/range to the destination map/range. + * + * If src_destroy or dst_alloc is requested, + * the source and destination regions should be + * disjoint, not only in the top-level map, but + * in the sharing maps as well. [The best way + * to guarantee this is to use a new intermediate + * map to make copies. This also reduces map + * fragmentation.] + */ +vm_map_copy(dst_map, src_map, + dst_addr, len, src_addr, + dst_alloc, src_destroy) + vm_map_t dst_map; + vm_map_t src_map; + vm_offset_t dst_addr; + vm_size_t len; + vm_offset_t src_addr; + boolean_t dst_alloc; + boolean_t src_destroy; +{ + register + vm_map_entry_t src_entry; + register + vm_map_entry_t dst_entry; + vm_map_entry_t tmp_entry; + vm_offset_t src_start; + vm_offset_t src_end; + vm_offset_t dst_start; + vm_offset_t dst_end; + vm_offset_t src_clip; + vm_offset_t dst_clip; + int result; + boolean_t old_src_destroy; + + /* + * XXX While we figure out why src_destroy screws up, + * we'll do it by explicitly vm_map_delete'ing at the end. + */ + + old_src_destroy = src_destroy; + src_destroy = FALSE; + + /* + * Compute start and end of region in both maps + */ + + src_start = src_addr; + src_end = src_start + len; + dst_start = dst_addr; + dst_end = dst_start + len; + + /* + * Check that the region can exist in both source + * and destination. + */ + + if ((dst_end < dst_start) || (src_end < src_start)) + return(KERN_NO_SPACE); + + /* + * Lock the maps in question -- we avoid deadlock + * by ordering lock acquisition by map value + */ + + if (src_map == dst_map) { + vm_map_lock(src_map); + } + else if ((int) src_map < (int) dst_map) { + vm_map_lock(src_map); + vm_map_lock(dst_map); + } else { + vm_map_lock(dst_map); + vm_map_lock(src_map); + } + + result = KERN_SUCCESS; + + /* + * Check protections... source must be completely readable and + * destination must be completely writable. [Note that if we're + * allocating the destination region, we don't have to worry + * about protection, but instead about whether the region + * exists.] + */ + + if (src_map->is_main_map && dst_map->is_main_map) { + if (!vm_map_check_protection(src_map, src_start, src_end, + VM_PROT_READ)) { + result = KERN_PROTECTION_FAILURE; + goto Return; + } + + if (dst_alloc) { + /* XXX Consider making this a vm_map_find instead */ + if ((result = vm_map_insert(dst_map, NULL, + (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS) + goto Return; + } + else if (!vm_map_check_protection(dst_map, dst_start, dst_end, + VM_PROT_WRITE)) { + result = KERN_PROTECTION_FAILURE; + goto Return; + } + } + + /* + * Find the start entries and clip. + * + * Note that checking protection asserts that the + * lookup cannot fail. + * + * Also note that we wait to do the second lookup + * until we have done the first clip, as the clip + * may affect which entry we get! + */ + + (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry); + src_entry = tmp_entry; + vm_map_clip_start(src_map, src_entry, src_start); + + (void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry); + dst_entry = tmp_entry; + vm_map_clip_start(dst_map, dst_entry, dst_start); + + /* + * If both source and destination entries are the same, + * retry the first lookup, as it may have changed. + */ + + if (src_entry == dst_entry) { + (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry); + src_entry = tmp_entry; + } + + /* + * If source and destination entries are still the same, + * a null copy is being performed. + */ + + if (src_entry == dst_entry) + goto Return; + + /* + * Go through entries until we get to the end of the + * region. + */ + + while (src_start < src_end) { + /* + * Clip the entries to the endpoint of the entire region. + */ + + vm_map_clip_end(src_map, src_entry, src_end); + vm_map_clip_end(dst_map, dst_entry, dst_end); + + /* + * Clip each entry to the endpoint of the other entry. + */ + + src_clip = src_entry->start + (dst_entry->end - dst_entry->start); + vm_map_clip_end(src_map, src_entry, src_clip); + + dst_clip = dst_entry->start + (src_entry->end - src_entry->start); + vm_map_clip_end(dst_map, dst_entry, dst_clip); + + /* + * Both entries now match in size and relative endpoints. + * + * If both entries refer to a VM object, we can + * deal with them now. + */ + + if (!src_entry->is_a_map && !dst_entry->is_a_map) { + vm_map_copy_entry(src_map, dst_map, src_entry, + dst_entry); + } + else { + register vm_map_t new_dst_map; + vm_offset_t new_dst_start; + vm_size_t new_size; + vm_map_t new_src_map; + vm_offset_t new_src_start; + + /* + * We have to follow at least one sharing map. + */ + + new_size = (dst_entry->end - dst_entry->start); + + if (src_entry->is_a_map) { + new_src_map = src_entry->object.share_map; + new_src_start = src_entry->offset; + } + else { + new_src_map = src_map; + new_src_start = src_entry->start; + lock_set_recursive(&src_map->lock); + } + + if (dst_entry->is_a_map) { + vm_offset_t new_dst_end; + + new_dst_map = dst_entry->object.share_map; + new_dst_start = dst_entry->offset; + + /* + * Since the destination sharing entries + * will be merely deallocated, we can + * do that now, and replace the region + * with a null object. [This prevents + * splitting the source map to match + * the form of the destination map.] + * Note that we can only do so if the + * source and destination do not overlap. + */ + + new_dst_end = new_dst_start + new_size; + + if (new_dst_map != new_src_map) { + vm_map_lock(new_dst_map); + (void) vm_map_delete(new_dst_map, + new_dst_start, + new_dst_end); + (void) vm_map_insert(new_dst_map, + NULL, + (vm_offset_t) 0, + new_dst_start, + new_dst_end); + vm_map_unlock(new_dst_map); + } + } + else { + new_dst_map = dst_map; + new_dst_start = dst_entry->start; + lock_set_recursive(&dst_map->lock); + } + + /* + * Recursively copy the sharing map. + */ + + (void) vm_map_copy(new_dst_map, new_src_map, + new_dst_start, new_size, new_src_start, + FALSE, FALSE); + + if (dst_map == new_dst_map) + lock_clear_recursive(&dst_map->lock); + if (src_map == new_src_map) + lock_clear_recursive(&src_map->lock); + } + + /* + * Update variables for next pass through the loop. + */ + + src_start = src_entry->end; + src_entry = src_entry->next; + dst_start = dst_entry->end; + dst_entry = dst_entry->next; + + /* + * If the source is to be destroyed, here is the + * place to do it. + */ + + if (src_destroy && src_map->is_main_map && + dst_map->is_main_map) + vm_map_entry_delete(src_map, src_entry->prev); + } + + /* + * Update the physical maps as appropriate + */ + + if (src_map->is_main_map && dst_map->is_main_map) { + if (src_destroy) + pmap_remove(src_map->pmap, src_addr, src_addr + len); + } + + /* + * Unlock the maps + */ + + Return: ; + + if (old_src_destroy) + vm_map_delete(src_map, src_addr, src_addr + len); + + vm_map_unlock(src_map); + if (src_map != dst_map) + vm_map_unlock(dst_map); + + return(result); +} + +/* + * vmspace_fork: + * Create a new process vmspace structure and vm_map + * based on those of an existing process. The new map + * is based on the old map, according to the inheritance + * values on the regions in that map. + * + * The source map must not be locked. + */ +struct vmspace * +vmspace_fork(vm1) + register struct vmspace *vm1; +{ + register struct vmspace *vm2; + vm_map_t old_map = &vm1->vm_map; + vm_map_t new_map; + vm_map_entry_t old_entry; + vm_map_entry_t new_entry; + pmap_t new_pmap; + + vm_map_lock(old_map); + + vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, + old_map->entries_pageable); + bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy, + (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy); + new_pmap = &vm2->vm_pmap; /* XXX */ + new_map = &vm2->vm_map; /* XXX */ + + old_entry = old_map->header.next; + + while (old_entry != &old_map->header) { + if (old_entry->is_sub_map) + panic("vm_map_fork: encountered a submap"); + + switch (old_entry->inheritance) { + case VM_INHERIT_NONE: + break; + + case VM_INHERIT_SHARE: + /* + * If we don't already have a sharing map: + */ + + if (!old_entry->is_a_map) { + vm_map_t new_share_map; + vm_map_entry_t new_share_entry; + + /* + * Create a new sharing map + */ + + new_share_map = vm_map_create(NULL, + old_entry->start, + old_entry->end, + TRUE); + new_share_map->is_main_map = FALSE; + + /* + * Create the only sharing entry from the + * old task map entry. + */ + + new_share_entry = + vm_map_entry_create(new_share_map); + *new_share_entry = *old_entry; + + /* + * Insert the entry into the new sharing + * map + */ + + vm_map_entry_link(new_share_map, + new_share_map->header.prev, + new_share_entry); + + /* + * Fix up the task map entry to refer + * to the sharing map now. + */ + + old_entry->is_a_map = TRUE; + old_entry->object.share_map = new_share_map; + old_entry->offset = old_entry->start; + } + + /* + * Clone the entry, referencing the sharing map. + */ + + new_entry = vm_map_entry_create(new_map); + *new_entry = *old_entry; + vm_map_reference(new_entry->object.share_map); + + /* + * Insert the entry into the new map -- we + * know we're inserting at the end of the new + * map. + */ + + vm_map_entry_link(new_map, new_map->header.prev, + new_entry); + + /* + * Update the physical map + */ + + pmap_copy(new_map->pmap, old_map->pmap, + new_entry->start, + (old_entry->end - old_entry->start), + old_entry->start); + break; + + case VM_INHERIT_COPY: + /* + * Clone the entry and link into the map. + */ + + new_entry = vm_map_entry_create(new_map); + *new_entry = *old_entry; + new_entry->wired_count = 0; + new_entry->object.vm_object = NULL; + new_entry->is_a_map = FALSE; + vm_map_entry_link(new_map, new_map->header.prev, + new_entry); + if (old_entry->is_a_map) { + int check; + + check = vm_map_copy(new_map, + old_entry->object.share_map, + new_entry->start, + (vm_size_t)(new_entry->end - + new_entry->start), + old_entry->offset, + FALSE, FALSE); + if (check != KERN_SUCCESS) + printf("vm_map_fork: copy in share_map region failed\n"); + } + else { + vm_map_copy_entry(old_map, new_map, old_entry, + new_entry); + } + break; + } + old_entry = old_entry->next; + } + + new_map->size = old_map->size; + vm_map_unlock(old_map); + + return(vm2); +} + +/* + * vm_map_lookup: + * + * Finds the VM object, offset, and + * protection for a given virtual address in the + * specified map, assuming a page fault of the + * type specified. + * + * Leaves the map in question locked for read; return + * values are guaranteed until a vm_map_lookup_done + * call is performed. Note that the map argument + * is in/out; the returned map must be used in + * the call to vm_map_lookup_done. + * + * A handle (out_entry) is returned for use in + * vm_map_lookup_done, to make that fast. + * + * If a lookup is requested with "write protection" + * specified, the map may be changed to perform virtual + * copying operations, although the data referenced will + * remain the same. + */ +vm_map_lookup(var_map, vaddr, fault_type, out_entry, + object, offset, out_prot, wired, single_use) + vm_map_t *var_map; /* IN/OUT */ + register vm_offset_t vaddr; + register vm_prot_t fault_type; + + vm_map_entry_t *out_entry; /* OUT */ + vm_object_t *object; /* OUT */ + vm_offset_t *offset; /* OUT */ + vm_prot_t *out_prot; /* OUT */ + boolean_t *wired; /* OUT */ + boolean_t *single_use; /* OUT */ +{ + vm_map_t share_map; + vm_offset_t share_offset; + register vm_map_entry_t entry; + register vm_map_t map = *var_map; + register vm_prot_t prot; + register boolean_t su; + + RetryLookup: ; + + /* + * Lookup the faulting address. + */ + + vm_map_lock_read(map); + +#define RETURN(why) \ + { \ + vm_map_unlock_read(map); \ + return(why); \ + } + + /* + * If the map has an interesting hint, try it before calling + * full blown lookup routine. + */ + + simple_lock(&map->hint_lock); + entry = map->hint; + simple_unlock(&map->hint_lock); + + *out_entry = entry; + + if ((entry == &map->header) || + (vaddr < entry->start) || (vaddr >= entry->end)) { + vm_map_entry_t tmp_entry; + + /* + * Entry was either not a valid hint, or the vaddr + * was not contained in the entry, so do a full lookup. + */ + if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) + RETURN(KERN_INVALID_ADDRESS); + + entry = tmp_entry; + *out_entry = entry; + } + + /* + * Handle submaps. + */ + + if (entry->is_sub_map) { + vm_map_t old_map = map; + + *var_map = map = entry->object.sub_map; + vm_map_unlock_read(old_map); + goto RetryLookup; + } + + /* + * Check whether this task is allowed to have + * this page. + */ + + prot = entry->protection; + if ((fault_type & (prot)) != fault_type) + RETURN(KERN_PROTECTION_FAILURE); + + /* + * If this page is not pageable, we have to get + * it for all possible accesses. + */ + + if (*wired = (entry->wired_count != 0)) + prot = fault_type = entry->protection; + + /* + * If we don't already have a VM object, track + * it down. + */ + + if (su = !entry->is_a_map) { + share_map = map; + share_offset = vaddr; + } + else { + vm_map_entry_t share_entry; + + /* + * Compute the sharing map, and offset into it. + */ + + share_map = entry->object.share_map; + share_offset = (vaddr - entry->start) + entry->offset; + + /* + * Look for the backing store object and offset + */ + + vm_map_lock_read(share_map); + + if (!vm_map_lookup_entry(share_map, share_offset, + &share_entry)) { + vm_map_unlock_read(share_map); + RETURN(KERN_INVALID_ADDRESS); + } + entry = share_entry; + } + + /* + * If the entry was copy-on-write, we either ... + */ + + if (entry->needs_copy) { + /* + * If we want to write the page, we may as well + * handle that now since we've got the sharing + * map locked. + * + * If we don't need to write the page, we just + * demote the permissions allowed. + */ + + if (fault_type & VM_PROT_WRITE) { + /* + * Make a new object, and place it in the + * object chain. Note that no new references + * have appeared -- one just moved from the + * share map to the new object. + */ + + if (lock_read_to_write(&share_map->lock)) { + if (share_map != map) + vm_map_unlock_read(map); + goto RetryLookup; + } + + vm_object_shadow( + &entry->object.vm_object, + &entry->offset, + (vm_size_t) (entry->end - entry->start)); + + entry->needs_copy = FALSE; + + lock_write_to_read(&share_map->lock); + } + else { + /* + * We're attempting to read a copy-on-write + * page -- don't allow writes. + */ + + prot &= (~VM_PROT_WRITE); + } + } + + /* + * Create an object if necessary. + */ + if (entry->object.vm_object == NULL) { + + if (lock_read_to_write(&share_map->lock)) { + if (share_map != map) + vm_map_unlock_read(map); + goto RetryLookup; + } + + entry->object.vm_object = vm_object_allocate( + (vm_size_t)(entry->end - entry->start)); + entry->offset = 0; + lock_write_to_read(&share_map->lock); + } + + /* + * Return the object/offset from this entry. If the entry + * was copy-on-write or empty, it has been fixed up. + */ + + *offset = (share_offset - entry->start) + entry->offset; + *object = entry->object.vm_object; + + /* + * Return whether this is the only map sharing this data. + */ + + if (!su) { + simple_lock(&share_map->ref_lock); + su = (share_map->ref_count == 1); + simple_unlock(&share_map->ref_lock); + } + + *out_prot = prot; + *single_use = su; + + return(KERN_SUCCESS); + +#undef RETURN +} + +/* + * vm_map_lookup_done: + * + * Releases locks acquired by a vm_map_lookup + * (according to the handle returned by that lookup). + */ + +void vm_map_lookup_done(map, entry) + register vm_map_t map; + vm_map_entry_t entry; +{ + /* + * If this entry references a map, unlock it first. + */ + + if (entry->is_a_map) + vm_map_unlock_read(entry->object.share_map); + + /* + * Unlock the main-level map + */ + + vm_map_unlock_read(map); +} + +/* + * Routine: vm_map_simplify + * Purpose: + * Attempt to simplify the map representation in + * the vicinity of the given starting address. + * Note: + * This routine is intended primarily to keep the + * kernel maps more compact -- they generally don't + * benefit from the "expand a map entry" technology + * at allocation time because the adjacent entry + * is often wired down. + */ +void vm_map_simplify(map, start) + vm_map_t map; + vm_offset_t start; +{ + vm_map_entry_t this_entry; + vm_map_entry_t prev_entry; + + vm_map_lock(map); + if ( + (vm_map_lookup_entry(map, start, &this_entry)) && + ((prev_entry = this_entry->prev) != &map->header) && + + (prev_entry->end == start) && + (map->is_main_map) && + + (prev_entry->is_a_map == FALSE) && + (prev_entry->is_sub_map == FALSE) && + + (this_entry->is_a_map == FALSE) && + (this_entry->is_sub_map == FALSE) && + + (prev_entry->inheritance == this_entry->inheritance) && + (prev_entry->protection == this_entry->protection) && + (prev_entry->max_protection == this_entry->max_protection) && + (prev_entry->wired_count == this_entry->wired_count) && + + (prev_entry->copy_on_write == this_entry->copy_on_write) && + (prev_entry->needs_copy == this_entry->needs_copy) && + + (prev_entry->object.vm_object == this_entry->object.vm_object) && + ((prev_entry->offset + (prev_entry->end - prev_entry->start)) + == this_entry->offset) + ) { + if (map->first_free == this_entry) + map->first_free = prev_entry; + + SAVE_HINT(map, prev_entry); + vm_map_entry_unlink(map, this_entry); + prev_entry->end = this_entry->end; + vm_object_deallocate(this_entry->object.vm_object); + vm_map_entry_dispose(map, this_entry); + } + vm_map_unlock(map); +} + +#if defined(DEBUG) || (NDDB > 0) +/* + * vm_map_print: [ debug ] + */ +void vm_map_print(map, full) + register vm_map_t map; + boolean_t full; +{ + register vm_map_entry_t entry; + extern int indent; + + iprintf("%s map 0x%x: pmap=0x%x,ref=%d,nentries=%d,version=%d\n", + (map->is_main_map ? "Task" : "Share"), + (int) map, (int) (map->pmap), map->ref_count, map->nentries, + map->timestamp); + + if (!full && indent) + return; + + indent += 2; + for (entry = map->header.next; entry != &map->header; + entry = entry->next) { + iprintf("map entry 0x%x: start=0x%x, end=0x%x, ", + (int) entry, (int) entry->start, (int) entry->end); + if (map->is_main_map) { + static char *inheritance_name[4] = + { "share", "copy", "none", "donate_copy"}; + printf("prot=%x/%x/%s, ", + entry->protection, + entry->max_protection, + inheritance_name[entry->inheritance]); + if (entry->wired_count != 0) + printf("wired, "); + } + + if (entry->is_a_map || entry->is_sub_map) { + printf("share=0x%x, offset=0x%x\n", + (int) entry->object.share_map, + (int) entry->offset); + if ((entry->prev == &map->header) || + (!entry->prev->is_a_map) || + (entry->prev->object.share_map != + entry->object.share_map)) { + indent += 2; + vm_map_print(entry->object.share_map, full); + indent -= 2; + } + + } + else { + printf("object=0x%x, offset=0x%x", + (int) entry->object.vm_object, + (int) entry->offset); + if (entry->copy_on_write) + printf(", copy (%s)", + entry->needs_copy ? "needed" : "done"); + printf("\n"); + + if ((entry->prev == &map->header) || + (entry->prev->is_a_map) || + (entry->prev->object.vm_object != + entry->object.vm_object)) { + indent += 2; + vm_object_print(entry->object.vm_object, full); + indent -= 2; + } + } + } + indent -= 2; +} +#endif /* defined(DEBUG) || (NDDB > 0) */ diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h new file mode 100644 index 000000000000..7066bd054765 --- /dev/null +++ b/sys/vm/vm_map.h @@ -0,0 +1,212 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_map.h 7.3 (Berkeley) 4/21/91 + * $Id: vm_map.h,v 1.2 1993/10/16 16:20:36 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + * + */ + +/* + * Virtual memory map module definitions. + */ + +#ifndef _VM_MAP_ +#define _VM_MAP_ + +/* + * Types defined: + * + * vm_map_t the high-level address map data structure. + * vm_map_entry_t an entry in an address map. + * vm_map_version_t a timestamp of a map, for use with vm_map_lookup + */ + +/* + * Objects which live in maps may be either VM objects, or + * another map (called a "sharing map") which denotes read-write + * sharing with other maps. + */ + +union vm_map_object { + struct vm_object *vm_object; /* object object */ + struct vm_map *share_map; /* share map */ + struct vm_map *sub_map; /* belongs to another map */ +}; + +typedef union vm_map_object vm_map_object_t; + +/* + * Address map entries consist of start and end addresses, + * a VM object (or sharing map) and offset into that object, + * and user-exported inheritance and protection information. + * Also included is control information for virtual copy operations. + */ +struct vm_map_entry { + struct vm_map_entry *prev; /* previous entry */ + struct vm_map_entry *next; /* next entry */ + vm_offset_t start; /* start address */ + vm_offset_t end; /* end address */ + union vm_map_object object; /* object I point to */ + vm_offset_t offset; /* offset into object */ + boolean_t is_a_map; /* Is "object" a map? */ + boolean_t is_sub_map; /* Is "object" a submap? */ + /* Only in sharing maps: */ + boolean_t copy_on_write; /* is data copy-on-write */ + boolean_t needs_copy; /* does object need to be copied */ + /* Only in task maps: */ + vm_prot_t protection; /* protection code */ + vm_prot_t max_protection; /* maximum protection */ + vm_inherit_t inheritance; /* inheritance */ + int wired_count; /* can be paged if = 0 */ +}; + +typedef struct vm_map_entry *vm_map_entry_t; + +/* + * Maps are doubly-linked lists of map entries, kept sorted + * by address. A single hint is provided to start + * searches again from the last successful search, + * insertion, or removal. + */ +struct vm_map { + struct pmap * pmap; /* Physical map */ + lock_data_t lock; /* Lock for map data */ + struct vm_map_entry header; /* List of entries */ + int nentries; /* Number of entries */ + vm_size_t size; /* virtual size */ + boolean_t is_main_map; /* Am I a main map? */ + int ref_count; /* Reference count */ + simple_lock_data_t ref_lock; /* Lock for ref_count field */ + vm_map_entry_t hint; /* hint for quick lookups */ + simple_lock_data_t hint_lock; /* lock for hint storage */ + vm_map_entry_t first_free; /* First free space hint */ + boolean_t entries_pageable; /* map entries pageable?? */ + unsigned int timestamp; /* Version number */ +#define min_offset header.start +#define max_offset header.end +}; + +typedef struct vm_map *vm_map_t; + +/* + * Map versions are used to validate a previous lookup attempt. + * + * Since lookup operations may involve both a main map and + * a sharing map, it is necessary to have a timestamp from each. + * [If the main map timestamp has changed, the share_map and + * associated timestamp are no longer valid; the map version + * does not include a reference for the imbedded share_map.] + */ +typedef struct { + int main_timestamp; + vm_map_t share_map; + int share_timestamp; +} vm_map_version_t; + +/* + * Macros: vm_map_lock, etc. + * Function: + * Perform locking on the data portion of a map. + */ + +#define vm_map_lock(map) { lock_write(&(map)->lock); (map)->timestamp++; } +#define vm_map_unlock(map) lock_write_done(&(map)->lock) +#define vm_map_lock_read(map) lock_read(&(map)->lock) +#define vm_map_unlock_read(map) lock_read_done(&(map)->lock) + +/* + * Exported procedures that operate on vm_map_t. + */ + +void vm_map_init(); +vm_map_t vm_map_create(); +void vm_map_deallocate(); +void vm_map_reference(); +int vm_map_find(); +int vm_map_remove(); +int vm_map_lookup(); +void vm_map_lookup_done(); +int vm_map_protect(); +int vm_map_inherit(); +int vm_map_copy(); +void vm_map_print(); +void vm_map_copy_entry(); +boolean_t vm_map_verify(); +void vm_map_verify_done(); + +/* + * Functions implemented as macros + */ +#define vm_map_min(map) ((map)->min_offset) +#define vm_map_max(map) ((map)->max_offset) +#define vm_map_pmap(map) ((map)->pmap) + +/* XXX: number of kernel maps and entries to statically allocate */ +#define MAX_KMAP 10 + +#ifdef OMIT +#define MAX_KMAPENT 500 +#else /* !OMIT*/ +#define MAX_KMAPENT 1000 /* 15 Aug 92*/ +#endif /* !OMIT*/ + +#endif _VM_MAP_ diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c new file mode 100644 index 000000000000..219fd50a1e72 --- /dev/null +++ b/sys/vm/vm_meter.c @@ -0,0 +1,144 @@ +/* + * Copyright (c) 1982, 1986, 1989 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_meter.c 7.11 (Berkeley) 4/20/91 + * $Id: vm_meter.c,v 1.2 1993/10/16 16:20:37 rgrimes Exp $ + */ + +#include "param.h" +#include "proc.h" +#include "systm.h" +#include "kernel.h" + +#include "vm_param.h" +#include "vmmeter.h" + +fixpt_t averunnable[3]; /* load average, of runnable procs */ + +int maxslp = MAXSLP; +int saferss = SAFERSS; + + +vmmeter() +{ + register unsigned *cp, *rp, *sp; + + if (time.tv_sec % 5 == 0) + vmtotal(); + if (proc0.p_slptime > maxslp/2) + wakeup((caddr_t)&proc0); +} + +vmtotal() +{ + register struct proc *p; + int nrun = 0; + + total.t_vm = 0; + total.t_avm = 0; + total.t_rm = 0; + total.t_arm = 0; + total.t_rq = 0; + total.t_dw = 0; + total.t_pw = 0; + total.t_sl = 0; + total.t_sw = 0; + for (p = allproc; p != NULL; p = p->p_nxt) { + if (p->p_flag & SSYS) + continue; + if (p->p_stat) { + switch (p->p_stat) { + + case SSLEEP: + if (p->p_pri <= PZERO && p->p_slptime == 0) + nrun++; + /* fall through */ + case SSTOP: +#ifdef notdef + if (p->p_flag & SPAGE) + total.t_pw++; + else +#endif + if (p->p_flag & SLOAD) { + if (p->p_pri <= PZERO) + total.t_dw++; + else if (p->p_slptime < maxslp) + total.t_sl++; + } else if (p->p_slptime < maxslp) + total.t_sw++; + if (p->p_slptime < maxslp) + goto active; + break; + + case SRUN: + case SIDL: + nrun++; + if (p->p_flag & SLOAD) + total.t_rq++; + else + total.t_sw++; +active: + break; + } + } + } + loadav(averunnable, nrun); +} + +/* + * Constants for averages over 1, 5, and 15 minutes + * when sampling at 5 second intervals. + */ +fixpt_t cexp[3] = { + 0.9200444146293232 * FSCALE, /* exp(-1/12) */ + 0.9834714538216174 * FSCALE, /* exp(-1/60) */ + 0.9944598480048967 * FSCALE, /* exp(-1/180) */ +}; + +/* + * Compute a tenex style load average of a quantity on + * 1, 5 and 15 minute intervals. + */ +loadav(avg, n) + register fixpt_t *avg; + int n; +{ + register int i; + + for (i = 0; i < 3; i++) + avg[i] = (cexp[i] * avg[i] + n * FSCALE * (FSCALE - cexp[i])) + >> FSHIFT; +#if defined(COMPAT_43) && (defined(vax) || defined(tahoe)) + for (i = 0; i < 3; i++) + avenrun[i] = (double) averunnable[i] / FSCALE; +#endif /* COMPAT_43 */ +} diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c new file mode 100644 index 000000000000..f963e25c8ffd --- /dev/null +++ b/sys/vm/vm_mmap.c @@ -0,0 +1,883 @@ +/* + * Copyright (c) 1988 University of Utah. + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: Utah $Hdr: vm_mmap.c 1.3 90/01/21$ + * from: @(#)vm_mmap.c 7.5 (Berkeley) 6/28/91 + * $Id: vm_mmap.c,v 1.8 1993/10/16 16:20:39 rgrimes Exp $ + */ + +/* + * Mapped file (mmap) interface to VM + */ + +#include "param.h" +#include "systm.h" +#include "filedesc.h" +#include "proc.h" +#include "vnode.h" +#include "specdev.h" +#include "file.h" +#include "mman.h" +#include "conf.h" + +#include "vm.h" +#include "vm_pager.h" +#include "vm_prot.h" +#include "vm_statistics.h" + +#ifdef DEBUG +int mmapdebug = 0; +#define MDB_FOLLOW 0x01 +#define MDB_SYNC 0x02 +#define MDB_MAPIT 0x04 +#endif + +/* ARGSUSED */ +getpagesize(p, uap, retval) + struct proc *p; + void *uap; + int *retval; +{ + + *retval = NBPG * CLSIZE; + return (0); +} + +struct sbrk_args { + int incr; +}; + +/* ARGSUSED */ +sbrk(p, uap, retval) + struct proc *p; + struct sbrk_args *uap; + int *retval; +{ + + /* Not yet implemented */ + return (EOPNOTSUPP); +} + +struct sstk_args { + int incr; +}; + +/* ARGSUSED */ +sstk(p, uap, retval) + struct proc *p; + struct sstk_args *uap; + int *retval; +{ + + /* Not yet implemented */ + return (EOPNOTSUPP); +} + +struct smmap_args { + caddr_t addr; + int len; + int prot; + int flags; + int fd; + off_t pos; +}; + +smmap(p, uap, retval) + struct proc *p; + register struct smmap_args *uap; + int *retval; +{ + register struct filedesc *fdp = p->p_fd; + register struct file *fp; + struct vnode *vp; + vm_offset_t addr; + vm_size_t size; + vm_prot_t maxprot; + vm_prot_t prot; + caddr_t handle; + int mtype, error; + int flags = uap->flags; + +#ifdef DEBUG + if (mmapdebug & MDB_FOLLOW) + printf("mmap(%d): addr %x len %x pro %x flg %x fd %d pos %x\n", + p->p_pid, uap->addr, uap->len, uap->prot, + uap->flags, uap->fd, uap->pos); +#endif + /* + * Make sure one of the sharing types is specified + */ + mtype = flags & MAP_TYPE; + switch (mtype) { + case MAP_FILE: + case MAP_ANON: + break; + default: + return(EINVAL); + } + /* + * Address (if FIXED) must be page aligned. + * Size is implicitly rounded to a page boundary. + */ + addr = (vm_offset_t) uap->addr; + if ((flags & MAP_FIXED) && (addr & page_mask) || uap->len < 0) + return(EINVAL); + size = (vm_size_t) round_page(uap->len); + if ((uap->flags & MAP_FIXED) && (addr + size > VM_MAXUSER_ADDRESS)) + return(EINVAL); + /* + * XXX if no hint provided for a non-fixed mapping place it after + * the end of the largest possible heap. + * + * There should really be a pmap call to determine a reasonable + * location. + */ + if (addr == 0 && (flags & MAP_FIXED) == 0) + addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ); + /* + * Mapping file or named anonymous, get fp for validation + */ + if (mtype == MAP_FILE || uap->fd != -1) { + if (((unsigned)uap->fd) >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[uap->fd]) == NULL) + return(EBADF); + } + /* + * If we are mapping a file we need to check various + * file/vnode related things. + */ + if (mtype == MAP_FILE) { + /* + * Obtain vnode and make sure it is of appropriate type + */ + if (fp->f_type != DTYPE_VNODE) + return(EINVAL); + vp = (struct vnode *)fp->f_data; + if (vp->v_type != VREG && vp->v_type != VCHR) + return(EINVAL); + /* + * Ensure that file protection and desired protection + * are compatible. Note that we only worry about writability + * if mapping is shared. + */ + if ((uap->prot & PROT_READ) && (fp->f_flag & FREAD) == 0 || + ((flags & MAP_SHARED) && + (uap->prot & PROT_WRITE) && (fp->f_flag & FWRITE) == 0)) + return(EACCES); + handle = (caddr_t)vp; + /* + * PATCH GVR 25-03-93 + * Map protections to MACH style + */ + if(uap->flags & MAP_SHARED) { + maxprot = VM_PROT_EXECUTE; + if (fp->f_flag & FREAD) + maxprot |= VM_PROT_READ; + if (fp->f_flag & FWRITE) + maxprot |= VM_PROT_WRITE; + } else + maxprot = VM_PROT_ALL; + } else if (uap->fd != -1) { + maxprot = VM_PROT_ALL; + handle = (caddr_t)fp; + } else { + maxprot = VM_PROT_ALL; + handle = NULL; + } + /* + * Map protections to MACH style + */ + prot = VM_PROT_NONE; + if (uap->prot & PROT_READ) + prot |= VM_PROT_READ; + if (uap->prot & PROT_WRITE) + prot |= VM_PROT_WRITE; + if (uap->prot & PROT_EXEC) + prot |= VM_PROT_EXECUTE; + + error = vm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot, + flags, handle, (vm_offset_t)uap->pos); + if (error == 0) + *retval = (int) addr; + return(error); +} + +struct msync_args { + caddr_t addr; + int len; +}; + +msync(p, uap, retval) + struct proc *p; + struct msync_args *uap; + int *retval; +{ + vm_offset_t addr, objoff, oaddr; + vm_size_t size, osize; + vm_prot_t prot, mprot; + vm_inherit_t inherit; + vm_object_t object; + boolean_t shared; + int rv; + +#ifdef DEBUG + if (mmapdebug & (MDB_FOLLOW|MDB_SYNC)) + printf("msync(%d): addr %x len %x\n", + p->p_pid, uap->addr, uap->len); +#endif + if (((int)uap->addr & page_mask) || uap->len < 0) + return(EINVAL); + addr = oaddr = (vm_offset_t)uap->addr; + osize = (vm_size_t)uap->len; + /* + * Region must be entirely contained in a single entry + */ + if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+osize, + TRUE)) + return(EINVAL); + /* + * Determine the object associated with that entry + * (object is returned locked on KERN_SUCCESS) + */ + rv = vm_region(&p->p_vmspace->vm_map, &addr, &size, &prot, &mprot, + &inherit, &shared, &object, &objoff); + if (rv != KERN_SUCCESS) + return(EINVAL); +#ifdef DEBUG + if (mmapdebug & MDB_SYNC) + printf("msync: region: object %x addr %x size %d objoff %d\n", + object, addr, size, objoff); +#endif + /* + * Do not msync non-vnoded backed objects. + */ + if (object->internal || object->pager == NULL || + object->pager->pg_type != PG_VNODE) { + vm_object_unlock(object); + return(EINVAL); + } + objoff += oaddr - addr; + if (osize == 0) + osize = size; +#ifdef DEBUG + if (mmapdebug & MDB_SYNC) + printf("msync: cleaning/flushing object range [%x-%x)\n", + objoff, objoff+osize); +#endif + if (prot & VM_PROT_WRITE) + vm_object_page_clean(object, objoff, objoff+osize); + /* + * (XXX) + * Bummer, gotta flush all cached pages to ensure + * consistency with the file system cache. + */ + vm_object_page_remove(object, objoff, objoff+osize); + vm_object_unlock(object); + return(0); +} + +struct munmap_args { + caddr_t addr; + int len; +}; + +munmap(p, uap, retval) + register struct proc *p; + register struct munmap_args *uap; + int *retval; +{ + vm_offset_t addr; + vm_size_t size; + +#ifdef DEBUG + if (mmapdebug & MDB_FOLLOW) + printf("munmap(%d): addr %x len %x\n", + p->p_pid, uap->addr, uap->len); +#endif + + addr = (vm_offset_t) uap->addr; + if ((addr & page_mask) || uap->len < 0) + return(EINVAL); + size = (vm_size_t) round_page(uap->len); + if (size == 0) + return(0); + if (addr + size >= VM_MAXUSER_ADDRESS) + return(EINVAL); + if (!vm_map_is_allocated(&p->p_vmspace->vm_map, addr, addr+size, + FALSE)) + return(EINVAL); + /* returns nothing but KERN_SUCCESS anyway */ + (void) vm_map_remove(&p->p_vmspace->vm_map, addr, addr+size); + return(0); +} + +munmapfd(p, fd) + register struct proc *p; +{ +#ifdef DEBUG + if (mmapdebug & MDB_FOLLOW) + printf("munmapfd(%d): fd %d\n", p->p_pid, fd); +#endif + + /* + * XXX -- should vm_deallocate any regions mapped to this file + */ + p->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED; +} + +struct mprotect_args { + caddr_t addr; + int len; + int prot; +}; + +mprotect(p, uap, retval) + struct proc *p; + struct mprotect_args *uap; + int *retval; +{ + vm_offset_t addr; + vm_size_t size; + register vm_prot_t prot; + +#ifdef DEBUG + if (mmapdebug & MDB_FOLLOW) + printf("mprotect(%d): addr %x len %x prot %d\n", + p->p_pid, uap->addr, uap->len, uap->prot); +#endif + + addr = (vm_offset_t) uap->addr; + if ((addr & page_mask) || uap->len < 0) + return(EINVAL); + size = (vm_size_t) uap->len; + /* + * Map protections + */ + prot = VM_PROT_NONE; + if (uap->prot & PROT_READ) + prot |= VM_PROT_READ; + if (uap->prot & PROT_WRITE) + prot |= VM_PROT_WRITE; + if (uap->prot & PROT_EXEC) + prot |= VM_PROT_EXECUTE; + + switch (vm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, prot, + FALSE)) { + case KERN_SUCCESS: + return (0); + case KERN_PROTECTION_FAILURE: + return (EACCES); + } + return (EINVAL); +} + +struct madvise_args { + caddr_t addr; + int len; + int behav; +}; + +/* ARGSUSED */ +madvise(p, uap, retval) + struct proc *p; + struct madvise_args *uap; + int *retval; +{ + + /* Not yet implemented */ + return (EOPNOTSUPP); +} + +struct mincore_args { + caddr_t addr; + int len; + char *vec; +}; + +/* ARGSUSED */ +mincore(p, uap, retval) + struct proc *p; + struct mincore_args *uap; + int *retval; +{ + + /* Not yet implemented */ + return (EOPNOTSUPP); +} + +/* + * Internal version of mmap. + * Currently used by mmap, exec, and sys5 shared memory. + * Handle is: + * MAP_FILE: a vnode pointer + * MAP_ANON: NULL or a file pointer + */ +vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff) + register vm_map_t map; + register vm_offset_t *addr; + register vm_size_t size; + vm_prot_t prot; + vm_prot_t maxprot; + register int flags; + caddr_t handle; /* XXX should be vp */ + vm_offset_t foff; +{ + register vm_pager_t pager; + boolean_t fitit; + vm_object_t object; + struct vnode *vp; + int type; + int rv = KERN_SUCCESS; + + if (size == 0) + return (0); + + if ((flags & MAP_FIXED) == 0) { + fitit = TRUE; + *addr = round_page(*addr); + } else { + fitit = FALSE; + (void) vm_deallocate(map, *addr, size); + } + + /* + * Lookup/allocate pager. All except an unnamed anonymous lookup + * gain a reference to ensure continued existance of the object. + * (XXX the exception is to appease the pageout daemon) + */ + if ((flags & MAP_TYPE) == MAP_ANON) + type = PG_DFLT; + else { + vp = (struct vnode *)handle; + if (vp->v_type == VCHR) { + type = PG_DEVICE; + handle = (caddr_t)vp; + } else + type = PG_VNODE; + } + pager = vm_pager_allocate(type, handle, size, prot); + if (pager == NULL) + return (type == PG_DEVICE ? EINVAL : ENOMEM); + /* + * Find object and release extra reference gained by lookup + */ + object = vm_object_lookup(pager); + vm_object_deallocate(object); + + /* + * Anonymous memory. + */ + if ((flags & MAP_TYPE) == MAP_ANON) { + rv = vm_allocate_with_pager(map, addr, size, fitit, + pager, (vm_offset_t)foff, TRUE); + if (rv != KERN_SUCCESS) { + if (handle == NULL) + vm_pager_deallocate(pager); + else + vm_object_deallocate(object); + goto out; + } + /* + * The object of unnamed anonymous regions was just created + * find it for pager_cache. + */ + if (handle == NULL) + object = vm_object_lookup(pager); + + /* + * Don't cache anonymous objects. + * Loses the reference gained by vm_pager_allocate. + */ + (void) pager_cache(object, FALSE); +#ifdef DEBUG + if (mmapdebug & MDB_MAPIT) + printf("vm_mmap(%d): ANON *addr %x size %x pager %x\n", + curproc->p_pid, *addr, size, pager); +#endif + } + /* + * Must be type MAP_FILE. + * Distinguish between character special and regular files. + */ + else if (vp->v_type == VCHR) { + rv = vm_allocate_with_pager(map, addr, size, fitit, + pager, (vm_offset_t)foff, FALSE); + /* + * Uncache the object and lose the reference gained + * by vm_pager_allocate(). If the call to + * vm_allocate_with_pager() was sucessful, then we + * gained an additional reference ensuring the object + * will continue to exist. If the call failed then + * the deallocate call below will terminate the + * object which is fine. + */ + (void) pager_cache(object, FALSE); + if (rv != KERN_SUCCESS) + goto out; + } + /* + * A regular file + */ + else { +#ifdef DEBUG + if (object == NULL) + printf("vm_mmap: no object: vp %x, pager %x\n", + vp, pager); +#endif + /* + * Map it directly. + * Allows modifications to go out to the vnode. + */ + if (flags & MAP_SHARED) { + rv = vm_allocate_with_pager(map, addr, size, + fitit, pager, + (vm_offset_t)foff, FALSE); + if (rv != KERN_SUCCESS) { + vm_object_deallocate(object); + goto out; + } + /* + * Don't cache the object. This is the easiest way + * of ensuring that data gets back to the filesystem + * because vnode_pager_deallocate() will fsync the + * vnode. pager_cache() will lose the extra ref. + */ + if (prot & VM_PROT_WRITE) + pager_cache(object, FALSE); + else + vm_object_deallocate(object); + } + /* + * Copy-on-write of file. Two flavors. + * MAP_COPY is true COW, you essentially get a snapshot of + * the region at the time of mapping. MAP_PRIVATE means only + * that your changes are not reflected back to the object. + * Changes made by others will be seen. + */ + else { + vm_map_t tmap; + vm_offset_t off; + + /* locate and allocate the target address space */ + rv = vm_map_find(map, NULL, (vm_offset_t)0, + addr, size, fitit); + if (rv != KERN_SUCCESS) { + vm_object_deallocate(object); + goto out; + } + tmap = vm_map_create(pmap_create(size), VM_MIN_ADDRESS, + VM_MIN_ADDRESS+size, TRUE); + off = VM_MIN_ADDRESS; + rv = vm_allocate_with_pager(tmap, &off, size, + TRUE, pager, + (vm_offset_t)foff, FALSE); + if (rv != KERN_SUCCESS) { + vm_object_deallocate(object); + vm_map_deallocate(tmap); + goto out; + } + /* + * (XXX) + * MAP_PRIVATE implies that we see changes made by + * others. To ensure that we need to guarentee that + * no copy object is created (otherwise original + * pages would be pushed to the copy object and we + * would never see changes made by others). We + * totally sleeze it right now by marking the object + * internal temporarily. + */ + if ((flags & MAP_COPY) == 0) + object->internal = TRUE; + rv = vm_map_copy(map, tmap, *addr, size, off, + FALSE, FALSE); + object->internal = FALSE; + /* + * (XXX) + * My oh my, this only gets worse... + * Force creation of a shadow object so that + * vm_map_fork will do the right thing. + */ + if ((flags & MAP_COPY) == 0) { + vm_map_t tmap; + vm_map_entry_t tentry; + vm_object_t tobject; + vm_offset_t toffset; + vm_prot_t tprot; + boolean_t twired, tsu; + + tmap = map; + vm_map_lookup(&tmap, *addr, VM_PROT_WRITE, + &tentry, &tobject, &toffset, + &tprot, &twired, &tsu); + vm_map_lookup_done(tmap, tentry); + } + /* + * (XXX) + * Map copy code cannot detect sharing unless a + * sharing map is involved. So we cheat and write + * protect everything ourselves. + */ + vm_object_pmap_copy(object, (vm_offset_t)foff, + (vm_offset_t)foff+size); + vm_object_deallocate(object); + vm_map_deallocate(tmap); + if (rv != KERN_SUCCESS) + goto out; + } +#ifdef DEBUG + if (mmapdebug & MDB_MAPIT) + printf("vm_mmap(%d): FILE *addr %x size %x pager %x\n", + curproc->p_pid, *addr, size, pager); +#endif + } + /* + * Correct protection (default is VM_PROT_ALL). + * Note that we set the maximum protection. This may not be + * entirely correct. Maybe the maximum protection should be based + * on the object permissions where it makes sense (e.g. a vnode). + * + * XXX Changed my mind: leave max prot at VM_PROT_ALL. + * PATCH GVR 25-03-93: + * Changed again: indeed set maximum protection based on + * object permissions. + */ + rv = vm_map_protect(map, *addr, *addr+size, prot, FALSE); + if (rv != KERN_SUCCESS) { + (void) vm_deallocate(map, *addr, size); + goto out; + } + /* + * We only need to set max_protection in case it's + * unequal to its default, which is VM_PROT_DEFAULT. + */ + if(maxprot != VM_PROT_DEFAULT) { + rv = vm_map_protect(map, *addr, *addr+size, maxprot, TRUE); + if (rv != KERN_SUCCESS) { + (void) vm_deallocate(map, *addr, size); + goto out; + } + } + /* + * Shared memory is also shared with children. + */ + if (flags & MAP_SHARED) { + rv = vm_inherit(map, *addr, size, VM_INHERIT_SHARE); + if (rv != KERN_SUCCESS) { + (void) vm_deallocate(map, *addr, size); + goto out; + } + } +out: +#ifdef DEBUG + if (mmapdebug & MDB_MAPIT) + printf("vm_mmap: rv %d\n", rv); +#endif + switch (rv) { + case KERN_SUCCESS: + return (0); + case KERN_INVALID_ADDRESS: + case KERN_NO_SPACE: + return (ENOMEM); + case KERN_PROTECTION_FAILURE: + return (EACCES); + default: + return (EINVAL); + } +} + +/* + * Internal bastardized version of MACHs vm_region system call. + * Given address and size it returns map attributes as well + * as the (locked) object mapped at that location. + */ +vm_region(map, addr, size, prot, max_prot, inheritance, shared, object, objoff) + vm_map_t map; + vm_offset_t *addr; /* IN/OUT */ + vm_size_t *size; /* OUT */ + vm_prot_t *prot; /* OUT */ + vm_prot_t *max_prot; /* OUT */ + vm_inherit_t *inheritance; /* OUT */ + boolean_t *shared; /* OUT */ + vm_object_t *object; /* OUT */ + vm_offset_t *objoff; /* OUT */ +{ + vm_map_entry_t tmp_entry; + register + vm_map_entry_t entry; + register + vm_offset_t tmp_offset; + vm_offset_t start; + + if (map == NULL) + return(KERN_INVALID_ARGUMENT); + + start = *addr; + + vm_map_lock_read(map); + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + if ((entry = tmp_entry->next) == &map->header) { + vm_map_unlock_read(map); + return(KERN_NO_SPACE); + } + start = entry->start; + *addr = start; + } else + entry = tmp_entry; + + *prot = entry->protection; + *max_prot = entry->max_protection; + *inheritance = entry->inheritance; + + tmp_offset = entry->offset + (start - entry->start); + *size = (entry->end - start); + + if (entry->is_a_map) { + register vm_map_t share_map; + vm_size_t share_size; + + share_map = entry->object.share_map; + + vm_map_lock_read(share_map); + (void) vm_map_lookup_entry(share_map, tmp_offset, &tmp_entry); + + if ((share_size = (tmp_entry->end - tmp_offset)) < *size) + *size = share_size; + + vm_object_lock(tmp_entry->object); + *object = tmp_entry->object.vm_object; + *objoff = tmp_entry->offset + (tmp_offset - tmp_entry->start); + + *shared = (share_map->ref_count != 1); + vm_map_unlock_read(share_map); + } else { + vm_object_lock(entry->object); + *object = entry->object.vm_object; + *objoff = tmp_offset; + + *shared = FALSE; + } + + vm_map_unlock_read(map); + + return(KERN_SUCCESS); +} + +/* + * Yet another bastard routine. + */ +vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal) + register vm_map_t map; + register vm_offset_t *addr; + register vm_size_t size; + boolean_t fitit; + vm_pager_t pager; + vm_offset_t poffset; + boolean_t internal; +{ + register vm_object_t object; + register int result; + + if (map == NULL) + return(KERN_INVALID_ARGUMENT); + + *addr = trunc_page(*addr); + size = round_page(size); + + /* + * Lookup the pager/paging-space in the object cache. + * If it's not there, then create a new object and cache + * it. + */ + object = vm_object_lookup(pager); + vm_stat.lookups++; + if (object == NULL) { + object = vm_object_allocate(size); + vm_object_enter(object, pager); + } else + vm_stat.hits++; + object->internal = internal; + + result = vm_map_find(map, object, poffset, addr, size, fitit); + if (result != KERN_SUCCESS) + vm_object_deallocate(object); + else if (pager != NULL) + vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE); + return(result); +} + +/* + * XXX: this routine belongs in vm_map.c. + * + * Returns TRUE if the range [start - end) is allocated in either + * a single entry (single_entry == TRUE) or multiple contiguous + * entries (single_entry == FALSE). + * + * start and end should be page aligned. + */ +boolean_t +vm_map_is_allocated(map, start, end, single_entry) + vm_map_t map; + vm_offset_t start, end; + boolean_t single_entry; +{ + vm_map_entry_t mapent; + register vm_offset_t nend; + + vm_map_lock_read(map); + + /* + * Start address not in any entry + */ + if (!vm_map_lookup_entry(map, start, &mapent)) { + vm_map_unlock_read(map); + return (FALSE); + } + /* + * Find the maximum stretch of contiguously allocated space + */ + nend = mapent->end; + if (!single_entry) { + mapent = mapent->next; + while (mapent != &map->header && mapent->start == nend) { + nend = mapent->end; + mapent = mapent->next; + } + } + + vm_map_unlock_read(map); + return (end <= nend); +} diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c new file mode 100644 index 000000000000..56c9bd954d9e --- /dev/null +++ b/sys/vm/vm_object.c @@ -0,0 +1,1541 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_object.c 7.4 (Berkeley) 5/7/91 + * $Id: vm_object.c,v 1.6.2.1 1993/11/14 21:20:24 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Virtual memory object module. + */ + +#include "ddb.h" + +#include "param.h" +#include "malloc.h" + +#include "vm.h" +#include "vm_page.h" + +/* + * Virtual memory objects maintain the actual data + * associated with allocated virtual memory. A given + * page of memory exists within exactly one object. + * + * An object is only deallocated when all "references" + * are given up. Only one "reference" to a given + * region of an object should be writeable. + * + * Associated with each object is a list of all resident + * memory pages belonging to that object; this list is + * maintained by the "vm_page" module, and locked by the object's + * lock. + * + * Each object also records a "pager" routine which is + * used to retrieve (and store) pages to the proper backing + * storage. In addition, objects may be backed by other + * objects from which they were virtual-copied. + * + * The only items within the object structure which are + * modified after time of creation are: + * reference count locked by object's lock + * pager routine locked by object's lock + * + */ + +struct vm_object kernel_object_store; +struct vm_object kmem_object_store; + +#define VM_OBJECT_HASH_COUNT 157 + +int vm_cache_max = 100; /* can patch if necessary */ +queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT]; + +long object_collapses = 0; +long object_bypasses = 0; + +/* + * vm_object_init: + * + * Initialize the VM objects module. + */ +void vm_object_init() +{ + register int i; + + queue_init(&vm_object_cached_list); + queue_init(&vm_object_list); + vm_object_count = 0; + simple_lock_init(&vm_cache_lock); + simple_lock_init(&vm_object_list_lock); + + for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) + queue_init(&vm_object_hashtable[i]); + + kernel_object = &kernel_object_store; + _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, + kernel_object); + + kmem_object = &kmem_object_store; + _vm_object_allocate(VM_KMEM_SIZE + VM_MBUF_SIZE, kmem_object); +} + +/* + * vm_object_allocate: + * + * Returns a new object with the given size. + */ + +vm_object_t vm_object_allocate(size) + vm_size_t size; +{ + register vm_object_t result; + + result = (vm_object_t) + malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK); + + _vm_object_allocate(size, result); + + return(result); +} + +_vm_object_allocate(size, object) + vm_size_t size; + register vm_object_t object; +{ + queue_init(&object->memq); + vm_object_lock_init(object); + object->ref_count = 1; + object->resident_page_count = 0; + object->size = size; + object->can_persist = FALSE; + object->paging_in_progress = 0; + object->copy = NULL; + + /* + * Object starts out read-write, with no pager. + */ + + object->pager = NULL; + object->pager_ready = FALSE; + object->internal = TRUE; /* vm_allocate_with_pager will reset */ + object->paging_offset = 0; + object->shadow = NULL; + object->shadow_offset = (vm_offset_t) 0; + + simple_lock(&vm_object_list_lock); + queue_enter(&vm_object_list, object, vm_object_t, object_list); + vm_object_count++; + simple_unlock(&vm_object_list_lock); +} + +/* + * vm_object_reference: + * + * Gets another reference to the given object. + */ +void vm_object_reference(object) + register vm_object_t object; +{ + if (object == NULL) + return; + + vm_object_lock(object); + object->ref_count++; + vm_object_unlock(object); +} + +/* + * vm_object_deallocate: + * + * Release a reference to the specified object, + * gained either through a vm_object_allocate + * or a vm_object_reference call. When all references + * are gone, storage associated with this object + * may be relinquished. + * + * No object may be locked. + */ +void vm_object_deallocate(object) + register vm_object_t object; +{ + vm_object_t temp; + + while (object != NULL) { + + /* + * The cache holds a reference (uncounted) to + * the object; we must lock it before removing + * the object. + */ + + vm_object_cache_lock(); + + /* + * Lose the reference + */ + vm_object_lock(object); + if (--(object->ref_count) != 0) { + + /* + * If there are still references, then + * we are done. + */ + vm_object_unlock(object); + vm_object_cache_unlock(); + return; + } + + /* + * See if this object can persist. If so, enter + * it in the cache, then deactivate all of its + * pages. + */ + + if (object->can_persist) { +#ifdef DIAGNOSTIC + register vm_page_t p; + + /* + * Check for dirty pages in object + * Print warning as this may signify kernel bugs + * pk@cs.few.eur.nl - 4/15/93 + */ + p = (vm_page_t) queue_first(&object->memq); + while (!queue_end(&object->memq, (queue_entry_t) p)) { + VM_PAGE_CHECK(p); + + if (pmap_is_modified(VM_PAGE_TO_PHYS(p)) || + !p->clean) { + + printf("vm_object_dealloc: persistent object %x isn't clean\n", object); + goto cant_persist; + } + + p = (vm_page_t) queue_next(&p->listq); + } +#endif /* DIAGNOSTIC */ + + queue_enter(&vm_object_cached_list, object, + vm_object_t, cached_list); + vm_object_cached++; + vm_object_cache_unlock(); + + vm_object_deactivate_pages(object); + vm_object_unlock(object); + + vm_object_cache_trim(); + return; + } + cant_persist:; + + /* + * Make sure no one can look us up now. + */ + vm_object_remove(object->pager); + vm_object_cache_unlock(); + + temp = object->shadow; + vm_object_terminate(object); + /* unlocks and deallocates object */ + object = temp; + } +} + + +/* + * vm_object_terminate actually destroys the specified object, freeing + * up all previously used resources. + * + * The object must be locked. + */ +void vm_object_terminate(object) + register vm_object_t object; +{ + register vm_page_t p; + vm_object_t shadow_object; + + /* + * Detach the object from its shadow if we are the shadow's + * copy. + */ + if ((shadow_object = object->shadow) != NULL) { + vm_object_lock(shadow_object); + if (shadow_object->copy == object) + shadow_object->copy = NULL; +#if 0 + else if (shadow_object->copy != NULL) + panic("vm_object_terminate: copy/shadow inconsistency"); +#endif + vm_object_unlock(shadow_object); + } + + /* + * Wait until the pageout daemon is through + * with the object. + */ + + while (object->paging_in_progress != 0) { + vm_object_sleep(object, object, FALSE); + vm_object_lock(object); + } + + + /* + * While the paging system is locked, + * pull the object's pages off the active + * and inactive queues. This keeps the + * pageout daemon from playing with them + * during vm_pager_deallocate. + * + * We can't free the pages yet, because the + * object's pager may have to write them out + * before deallocating the paging space. + */ + + p = (vm_page_t) queue_first(&object->memq); + while (!queue_end(&object->memq, (queue_entry_t) p)) { + VM_PAGE_CHECK(p); + + vm_page_lock_queues(); + if (p->active) { + queue_remove(&vm_page_queue_active, p, vm_page_t, + pageq); + p->active = FALSE; + vm_page_active_count--; + } + + if (p->inactive) { + queue_remove(&vm_page_queue_inactive, p, vm_page_t, + pageq); + p->inactive = FALSE; + vm_page_inactive_count--; + } + vm_page_unlock_queues(); + p = (vm_page_t) queue_next(&p->listq); + } + + vm_object_unlock(object); + + if (object->paging_in_progress != 0) + panic("vm_object_deallocate: pageout in progress"); + + /* + * Clean and free the pages, as appropriate. + * All references to the object are gone, + * so we don't need to lock it. + */ + + if (!object->internal) { + vm_object_lock(object); + vm_object_page_clean(object, 0, 0); + vm_object_unlock(object); + } + while (!queue_empty(&object->memq)) { + p = (vm_page_t) queue_first(&object->memq); + + VM_PAGE_CHECK(p); + + vm_page_lock_queues(); + vm_page_free(p); + vm_page_unlock_queues(); + } + + /* + * Let the pager know object is dead. + */ + + if (object->pager != NULL) + vm_pager_deallocate(object->pager); + + + simple_lock(&vm_object_list_lock); + queue_remove(&vm_object_list, object, vm_object_t, object_list); + vm_object_count--; + simple_unlock(&vm_object_list_lock); + + /* + * Free the space for the object. + */ + + free((caddr_t)object, M_VMOBJ); +} + +/* + * vm_object_page_clean + * + * Clean all dirty pages in the specified range of object. + * Leaves page on whatever queue it is currently on. + * + * Odd semantics: if start == end, we clean everything. + * + * The object must be locked. + */ +vm_object_page_clean(object, start, end) + register vm_object_t object; + register vm_offset_t start; + register vm_offset_t end; +{ + register vm_page_t p; + + if (object->pager == NULL) + return; + +again: + p = (vm_page_t) queue_first(&object->memq); + while (!queue_end(&object->memq, (queue_entry_t) p)) { + if (start == end || + p->offset >= start && p->offset < end) { + if (p->clean && pmap_is_modified(VM_PAGE_TO_PHYS(p))) + p->clean = FALSE; + pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); + if (!p->clean) { + p->busy = TRUE; + object->paging_in_progress++; + vm_object_unlock(object); + (void) vm_pager_put(object->pager, p, TRUE); + vm_object_lock(object); + object->paging_in_progress--; + p->busy = FALSE; + PAGE_WAKEUP(p); + goto again; + } + } + p = (vm_page_t) queue_next(&p->listq); + } +} + +/* + * vm_object_deactivate_pages + * + * Deactivate all pages in the specified object. (Keep its pages + * in memory even though it is no longer referenced.) + * + * The object must be locked. + */ +vm_object_deactivate_pages(object) + register vm_object_t object; +{ + register vm_page_t p, next; + + p = (vm_page_t) queue_first(&object->memq); + while (!queue_end(&object->memq, (queue_entry_t) p)) { + next = (vm_page_t) queue_next(&p->listq); + vm_page_lock_queues(); + if (!p->busy) + vm_page_deactivate(p); /* optimisation from mach 3.0 - + * andrew@werple.apana.org.au, + * Feb '93 + */ + vm_page_unlock_queues(); + p = next; + } +} + +/* + * Trim the object cache to size. + */ +vm_object_cache_trim() +{ + register vm_object_t object; + + vm_object_cache_lock(); + while (vm_object_cached > vm_cache_max) { + object = (vm_object_t) queue_first(&vm_object_cached_list); + vm_object_cache_unlock(); + + if (object != vm_object_lookup(object->pager)) + panic("vm_object_deactivate: I'm sooo confused."); + + pager_cache(object, FALSE); + + vm_object_cache_lock(); + } + vm_object_cache_unlock(); +} + + +/* + * vm_object_shutdown() + * + * Shut down the object system. Unfortunately, while we + * may be trying to do this, init is happily waiting for + * processes to exit, and therefore will be causing some objects + * to be deallocated. To handle this, we gain a fake reference + * to all objects we release paging areas for. This will prevent + * a duplicate deallocation. This routine is probably full of + * race conditions! + */ + +void vm_object_shutdown() +{ + register vm_object_t object; + + /* + * Clean up the object cache *before* we screw up the reference + * counts on all of the objects. + */ + + vm_object_cache_clear(); + + printf("free paging spaces: "); + + /* + * First we gain a reference to each object so that + * no one else will deallocate them. + */ + + simple_lock(&vm_object_list_lock); + object = (vm_object_t) queue_first(&vm_object_list); + while (!queue_end(&vm_object_list, (queue_entry_t) object)) { + vm_object_reference(object); + object = (vm_object_t) queue_next(&object->object_list); + } + simple_unlock(&vm_object_list_lock); + + /* + * Now we deallocate all the paging areas. We don't need + * to lock anything because we've reduced to a single + * processor while shutting down. This also assumes that + * no new objects are being created. + */ + + object = (vm_object_t) queue_first(&vm_object_list); + while (!queue_end(&vm_object_list, (queue_entry_t) object)) { + if (object->pager != NULL) + vm_pager_deallocate(object->pager); + object = (vm_object_t) queue_next(&object->object_list); + printf("."); + } + printf("done.\n"); +} + +/* + * vm_object_pmap_copy: + * + * Makes all physical pages in the specified + * object range copy-on-write. No writeable + * references to these pages should remain. + * + * The object must *not* be locked. + */ +void vm_object_pmap_copy(object, start, end) + register vm_object_t object; + register vm_offset_t start; + register vm_offset_t end; +{ + register vm_page_t p; + + if (object == NULL) + return; + + vm_object_lock(object); + p = (vm_page_t) queue_first(&object->memq); + while (!queue_end(&object->memq, (queue_entry_t) p)) { + if ((start <= p->offset) && (p->offset < end)) { + pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ); + p->copy_on_write = TRUE; + } + p = (vm_page_t) queue_next(&p->listq); + } + vm_object_unlock(object); +} + +/* + * vm_object_pmap_remove: + * + * Removes all physical pages in the specified + * object range from all physical maps. + * + * The object must *not* be locked. + */ +void vm_object_pmap_remove(object, start, end) + register vm_object_t object; + register vm_offset_t start; + register vm_offset_t end; +{ + register vm_page_t p; + + if (object == NULL) + return; + + vm_object_lock(object); + p = (vm_page_t) queue_first(&object->memq); + while (!queue_end(&object->memq, (queue_entry_t) p)) { + if ((start <= p->offset) && (p->offset < end)) + pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); + p = (vm_page_t) queue_next(&p->listq); + } + vm_object_unlock(object); +} + +/* + * vm_object_copy: + * + * Create a new object which is a copy of an existing + * object, and mark all of the pages in the existing + * object 'copy-on-write'. The new object has one reference. + * Returns the new object. + * + * May defer the copy until later if the object is not backed + * up by a non-default pager. + */ +void vm_object_copy(src_object, src_offset, size, + dst_object, dst_offset, src_needs_copy) + register vm_object_t src_object; + vm_offset_t src_offset; + vm_size_t size; + vm_object_t *dst_object; /* OUT */ + vm_offset_t *dst_offset; /* OUT */ + boolean_t *src_needs_copy; /* OUT */ +{ + register vm_object_t new_copy; + register vm_object_t old_copy; + vm_offset_t new_start, new_end; + + register vm_page_t p; + + if (src_object == NULL) { + /* + * Nothing to copy + */ + *dst_object = NULL; + *dst_offset = 0; + *src_needs_copy = FALSE; + return; + } + + /* + * If the object's pager is null_pager or the + * default pager, we don't have to make a copy + * of it. Instead, we set the needs copy flag and + * make a shadow later. + */ + + vm_object_lock(src_object); + if (src_object->pager == NULL || + src_object->internal) { + + /* + * Make another reference to the object + */ + src_object->ref_count++; + + /* + * Mark all of the pages copy-on-write. + */ + for (p = (vm_page_t) queue_first(&src_object->memq); + !queue_end(&src_object->memq, (queue_entry_t)p); + p = (vm_page_t) queue_next(&p->listq)) { + if (src_offset <= p->offset && + p->offset < src_offset + size) + p->copy_on_write = TRUE; + } + vm_object_unlock(src_object); + + *dst_object = src_object; + *dst_offset = src_offset; + + /* + * Must make a shadow when write is desired + */ + *src_needs_copy = TRUE; + return; + } + + /* + * Try to collapse the object before copying it. + */ + vm_object_collapse(src_object); + + /* + * If the object has a pager, the pager wants to + * see all of the changes. We need a copy-object + * for the changed pages. + * + * If there is a copy-object, and it is empty, + * no changes have been made to the object since the + * copy-object was made. We can use the same copy- + * object. + */ + + Retry1: + old_copy = src_object->copy; + if (old_copy != NULL) { + /* + * Try to get the locks (out of order) + */ + if (!vm_object_lock_try(old_copy)) { + vm_object_unlock(src_object); + + /* should spin a bit here... */ + vm_object_lock(src_object); + goto Retry1; + } + + if (old_copy->resident_page_count == 0 && + old_copy->pager == NULL) { + /* + * Return another reference to + * the existing copy-object. + */ + old_copy->ref_count++; + vm_object_unlock(old_copy); + vm_object_unlock(src_object); + *dst_object = old_copy; + *dst_offset = src_offset; + *src_needs_copy = FALSE; + return; + } + vm_object_unlock(old_copy); + } + vm_object_unlock(src_object); + + /* + * If the object has a pager, the pager wants + * to see all of the changes. We must make + * a copy-object and put the changed pages there. + * + * The copy-object is always made large enough to + * completely shadow the original object, since + * it may have several users who want to shadow + * the original object at different points. + */ + + new_copy = vm_object_allocate(src_object->size); + + Retry2: + vm_object_lock(src_object); + /* + * Copy object may have changed while we were unlocked + */ + old_copy = src_object->copy; + if (old_copy != NULL) { + /* + * Try to get the locks (out of order) + */ + if (!vm_object_lock_try(old_copy)) { + vm_object_unlock(src_object); + goto Retry2; + } + + /* + * Consistency check + */ + if (old_copy->shadow != src_object || + old_copy->shadow_offset != (vm_offset_t) 0) + panic("vm_object_copy: copy/shadow inconsistency"); + + /* + * Make the old copy-object shadow the new one. + * It will receive no more pages from the original + * object. + */ + + src_object->ref_count--; /* remove ref. from old_copy */ + old_copy->shadow = new_copy; + new_copy->ref_count++; /* locking not needed - we + have the only pointer */ + vm_object_unlock(old_copy); /* done with old_copy */ + } + + new_start = (vm_offset_t) 0; /* always shadow original at 0 */ + new_end = (vm_offset_t) new_copy->size; /* for the whole object */ + + /* + * Point the new copy at the existing object. + */ + + new_copy->shadow = src_object; + new_copy->shadow_offset = new_start; + src_object->ref_count++; + src_object->copy = new_copy; + + /* + * Mark all the affected pages of the existing object + * copy-on-write. + */ + p = (vm_page_t) queue_first(&src_object->memq); + while (!queue_end(&src_object->memq, (queue_entry_t) p)) { + if ((new_start <= p->offset) && (p->offset < new_end)) + p->copy_on_write = TRUE; + p = (vm_page_t) queue_next(&p->listq); + } + + vm_object_unlock(src_object); + + *dst_object = new_copy; + *dst_offset = src_offset - new_start; + *src_needs_copy = FALSE; +} + +/* + * vm_object_shadow: + * + * Create a new object which is backed by the + * specified existing object range. The source + * object reference is deallocated. + * + * The new object and offset into that object + * are returned in the source parameters. + */ + +void vm_object_shadow(object, offset, length) + vm_object_t *object; /* IN/OUT */ + vm_offset_t *offset; /* IN/OUT */ + vm_size_t length; +{ + register vm_object_t source; + register vm_object_t result; + + source = *object; + + /* + * Allocate a new object with the given length + */ + + if ((result = vm_object_allocate(length)) == NULL) + panic("vm_object_shadow: no object for shadowing"); + + /* + * The new object shadows the source object, adding + * a reference to it. Our caller changes his reference + * to point to the new object, removing a reference to + * the source object. Net result: no change of reference + * count. + */ + result->shadow = source; + + /* + * Store the offset into the source object, + * and fix up the offset into the new object. + */ + + result->shadow_offset = *offset; + + /* + * Return the new things + */ + + *offset = 0; + *object = result; +} + +/* + * Set the specified object's pager to the specified pager. + */ + +void vm_object_setpager(object, pager, paging_offset, + read_only) + vm_object_t object; + vm_pager_t pager; + vm_offset_t paging_offset; + boolean_t read_only; +{ +#ifdef lint + read_only++; /* No longer used */ +#endif lint + + vm_object_lock(object); /* XXX ? */ + object->pager = pager; + object->paging_offset = paging_offset; + vm_object_unlock(object); /* XXX ? */ +} + +/* + * vm_object_hash hashes the pager/id pair. + */ + +#define vm_object_hash(pager) \ + (((unsigned)pager)%VM_OBJECT_HASH_COUNT) + +/* + * vm_object_lookup looks in the object cache for an object with the + * specified pager and paging id. + */ + +vm_object_t vm_object_lookup(pager) + vm_pager_t pager; +{ + register queue_t bucket; + register vm_object_hash_entry_t entry; + vm_object_t object; + + bucket = &vm_object_hashtable[vm_object_hash(pager)]; + + vm_object_cache_lock(); + + entry = (vm_object_hash_entry_t) queue_first(bucket); + while (!queue_end(bucket, (queue_entry_t) entry)) { + object = entry->object; + if (object->pager == pager) { + vm_object_lock(object); + if (object->ref_count == 0) { + queue_remove(&vm_object_cached_list, object, + vm_object_t, cached_list); + vm_object_cached--; + } + object->ref_count++; + vm_object_unlock(object); + vm_object_cache_unlock(); + return(object); + } + entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links); + } + + vm_object_cache_unlock(); + return(NULL); +} + +/* + * vm_object_enter enters the specified object/pager/id into + * the hash table. + */ + +void vm_object_enter(object, pager) + vm_object_t object; + vm_pager_t pager; +{ + register queue_t bucket; + register vm_object_hash_entry_t entry; + + /* + * We don't cache null objects, and we can't cache + * objects with the null pager. + */ + + if (object == NULL) + return; + if (pager == NULL) + return; + + bucket = &vm_object_hashtable[vm_object_hash(pager)]; + entry = (vm_object_hash_entry_t) + malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK); + entry->object = object; + object->can_persist = TRUE; + + vm_object_cache_lock(); + queue_enter(bucket, entry, vm_object_hash_entry_t, hash_links); + vm_object_cache_unlock(); +} + +/* + * vm_object_remove: + * + * Remove the pager from the hash table. + * Note: This assumes that the object cache + * is locked. XXX this should be fixed + * by reorganizing vm_object_deallocate. + */ +vm_object_remove(pager) + register vm_pager_t pager; +{ + register queue_t bucket; + register vm_object_hash_entry_t entry; + register vm_object_t object; + + bucket = &vm_object_hashtable[vm_object_hash(pager)]; + + entry = (vm_object_hash_entry_t) queue_first(bucket); + while (!queue_end(bucket, (queue_entry_t) entry)) { + object = entry->object; + if (object->pager == pager) { + queue_remove(bucket, entry, vm_object_hash_entry_t, + hash_links); + free((caddr_t)entry, M_VMOBJHASH); + break; + } + entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links); + } +} + +/* + * vm_object_cache_clear removes all objects from the cache. + * + */ + +void vm_object_cache_clear() +{ + register vm_object_t object; + + /* + * Remove each object in the cache by scanning down the + * list of cached objects. + */ + vm_object_cache_lock(); + while (!queue_empty(&vm_object_cached_list)) { + object = (vm_object_t) queue_first(&vm_object_cached_list); + vm_object_cache_unlock(); + + /* + * Note: it is important that we use vm_object_lookup + * to gain a reference, and not vm_object_reference, because + * the logic for removing an object from the cache lies in + * lookup. + */ + if (object != vm_object_lookup(object->pager)) + panic("vm_object_cache_clear: I'm sooo confused."); + pager_cache(object, FALSE); + + vm_object_cache_lock(); + } + vm_object_cache_unlock(); +} + +boolean_t vm_object_collapse_allowed = TRUE; +/* + * vm_object_collapse: + * + * Collapse an object with the object backing it. + * Pages in the backing object are moved into the + * parent, and the backing object is deallocated. + * + * Requires that the object be locked and the page + * queues be unlocked. + * + */ +void vm_object_collapse(object) + register vm_object_t object; + +{ + register vm_object_t backing_object; + register vm_offset_t backing_offset; + register vm_size_t size; + register vm_offset_t new_offset; + register vm_page_t p, pp; + + if (!vm_object_collapse_allowed) + return; + + while (TRUE) { + /* + * Verify that the conditions are right for collapse: + * + * The object exists and no pages in it are currently + * being paged out (or have ever been paged out). + */ + if (object == NULL || + object->paging_in_progress != 0 || + object->pager != NULL) + return; + + /* + * There is a backing object, and + */ + + if ((backing_object = object->shadow) == NULL) + return; + + vm_object_lock(backing_object); + /* + * ... + * The backing object is not read_only, + * and no pages in the backing object are + * currently being paged out. + * The backing object is internal. + */ + + if (!backing_object->internal || + backing_object->paging_in_progress != 0) { + vm_object_unlock(backing_object); + return; + } + + /* + * The backing object can't be a copy-object: + * the shadow_offset for the copy-object must stay + * as 0. Furthermore (for the 'we have all the + * pages' case), if we bypass backing_object and + * just shadow the next object in the chain, old + * pages from that object would then have to be copied + * BOTH into the (former) backing_object and into the + * parent object. + */ + if (backing_object->shadow != NULL && + backing_object->shadow->copy != NULL) { + vm_object_unlock(backing_object); + return; + } + + /* + * We know that we can either collapse the backing + * object (if the parent is the only reference to + * it) or (perhaps) remove the parent's reference + * to it. + */ + + backing_offset = object->shadow_offset; + size = object->size; + + /* + * If there is exactly one reference to the backing + * object, we can collapse it into the parent. + */ + + if (backing_object->ref_count == 1) { + + /* + * We can collapse the backing object. + * + * Move all in-memory pages from backing_object + * to the parent. Pages that have been paged out + * will be overwritten by any of the parent's + * pages that shadow them. + */ + + while (!queue_empty(&backing_object->memq)) { + + p = (vm_page_t) + queue_first(&backing_object->memq); + + new_offset = (p->offset - backing_offset); + + /* + * If the parent has a page here, or if + * this page falls outside the parent, + * dispose of it. + * + * Otherwise, move it as planned. + */ + + if (p->offset < backing_offset || + new_offset >= size) { + vm_page_lock_queues(); + vm_page_free(p); + vm_page_unlock_queues(); + } else { + pp = vm_page_lookup(object, new_offset); + if (pp != NULL && !pp->fake) { + vm_page_lock_queues(); + vm_page_free(p); + vm_page_unlock_queues(); + } + else { + if (pp) { +#if 1 + /* + * This should never happen -- the + * parent cannot have ever had an + * external memory object, and thus + * cannot have absent pages. + */ + panic("vm_object_collapse: bad case"); + /* andrew@werple.apana.org.au - from + mach 3.0 VM */ +#else + /* may be someone waiting for it */ + PAGE_WAKEUP(pp); + vm_page_lock_queues(); + vm_page_free(pp); + vm_page_unlock_queues(); +#endif + } + /* + * Parent now has no page. + * Move the backing object's page + * up. + */ + vm_page_rename(p, object, new_offset); + } + } + } + + /* + * Move the pager from backing_object to object. + * + * XXX We're only using part of the paging space + * for keeps now... we ought to discard the + * unused portion. + */ + + /* + * Remove backing_object from the object hashtable now. + * This is necessary since its pager is going away + * and therefore it is not going to be removed from + * hashtable in vm_object_deallocate(). + * + * NOTE - backing_object can only get at this stage if + * it has an internal pager. It is not normally on the + * hashtable unless it was put there by eg. vm_mmap() + * + * XXX - Need I worry here about *named* ANON pagers ? + */ + + if (backing_object->pager) { + vm_object_remove(backing_object->pager); + } + object->pager = backing_object->pager; +#if 1 + /* Mach 3.0 code */ + /* andrew@werple.apana.org.au, 12 Feb 1993 */ + + /* + * If there is no pager, leave paging-offset alone. + */ + if (object->pager) + object->paging_offset = + backing_object->paging_offset + + backing_offset; +#else + /* old VM 2.5 version */ + object->paging_offset += backing_offset; +#endif + + backing_object->pager = NULL; + + /* + * Object now shadows whatever backing_object did. + * Note that the reference to backing_object->shadow + * moves from within backing_object to within object. + */ + + object->shadow = backing_object->shadow; + object->shadow_offset += backing_object->shadow_offset; + if (object->shadow != NULL && + object->shadow->copy != NULL) { + panic("vm_object_collapse: we collapsed a copy-object!"); + } + /* + * Discard backing_object. + * + * Since the backing object has no pages, no + * pager left, and no object references within it, + * all that is necessary is to dispose of it. + */ + + vm_object_unlock(backing_object); + + simple_lock(&vm_object_list_lock); + queue_remove(&vm_object_list, backing_object, + vm_object_t, object_list); + vm_object_count--; + simple_unlock(&vm_object_list_lock); + + free((caddr_t)backing_object, M_VMOBJ); + + object_collapses++; + } + else { + /* + * If all of the pages in the backing object are + * shadowed by the parent object, the parent + * object no longer has to shadow the backing + * object; it can shadow the next one in the + * chain. + * + * The backing object must not be paged out - we'd + * have to check all of the paged-out pages, as + * well. + */ + + if (backing_object->pager != NULL) { + vm_object_unlock(backing_object); + return; + } + + /* + * Should have a check for a 'small' number + * of pages here. + */ + + p = (vm_page_t) queue_first(&backing_object->memq); + while (!queue_end(&backing_object->memq, + (queue_entry_t) p)) { + + new_offset = (p->offset - backing_offset); + + /* + * If the parent has a page here, or if + * this page falls outside the parent, + * keep going. + * + * Otherwise, the backing_object must be + * left in the chain. + */ + + if (p->offset >= backing_offset && + new_offset <= size && + ((pp = vm_page_lookup(object, new_offset)) + == NULL || + pp->fake)) { + /* + * Page still needed. + * Can't go any further. + */ + vm_object_unlock(backing_object); + return; + } + p = (vm_page_t) queue_next(&p->listq); + } + + /* + * Make the parent shadow the next object + * in the chain. Deallocating backing_object + * will not remove it, since its reference + * count is at least 2. + */ + + vm_object_reference(object->shadow = backing_object->shadow); + object->shadow_offset += backing_object->shadow_offset; + +#if 1 + /* Mach 3.0 code */ + /* andrew@werple.apana.org.au, 12 Feb 1993 */ + + /* + * Backing object might have had a copy pointer + * to us. If it did, clear it. + */ + if (backing_object->copy == object) + backing_object->copy = NULL; +#endif + + /* Drop the reference count on backing_object. + * Since its ref_count was at least 2, it + * will not vanish; so we don't need to call + * vm_object_deallocate. + */ + backing_object->ref_count--; + vm_object_unlock(backing_object); + + object_bypasses ++; + + } + + /* + * Try again with this object's new backing object. + */ + } +} + +/* + * vm_object_page_remove: [internal] + * + * Removes all physical pages in the specified + * object range from the object's list of pages. + * + * The object must be locked. + */ +void vm_object_page_remove(object, start, end) + register vm_object_t object; + register vm_offset_t start; + register vm_offset_t end; +{ + register vm_page_t p, next; + + if (object == NULL) + return; + + p = (vm_page_t) queue_first(&object->memq); + while (!queue_end(&object->memq, (queue_entry_t) p)) { + next = (vm_page_t) queue_next(&p->listq); + if ((start <= p->offset) && (p->offset < end)) { + pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); + vm_page_lock_queues(); + vm_page_free(p); + vm_page_unlock_queues(); + } + p = next; + } +} + +/* + * Routine: vm_object_coalesce + * Function: Coalesces two objects backing up adjoining + * regions of memory into a single object. + * + * returns TRUE if objects were combined. + * + * NOTE: Only works at the moment if the second object is NULL - + * if it's not, which object do we lock first? + * + * Parameters: + * prev_object First object to coalesce + * prev_offset Offset into prev_object + * next_object Second object into coalesce + * next_offset Offset into next_object + * + * prev_size Size of reference to prev_object + * next_size Size of reference to next_object + * + * Conditions: + * The object must *not* be locked. + */ +boolean_t vm_object_coalesce(prev_object, next_object, + prev_offset, next_offset, + prev_size, next_size) + + register vm_object_t prev_object; + vm_object_t next_object; + vm_offset_t prev_offset, next_offset; + vm_size_t prev_size, next_size; +{ + vm_size_t newsize; + +#ifdef lint + next_offset++; +#endif lint + + if (next_object != NULL) { + return(FALSE); + } + + if (prev_object == NULL) { + return(TRUE); + } + + vm_object_lock(prev_object); + + /* + * Try to collapse the object first + */ + vm_object_collapse(prev_object); + + /* + * Can't coalesce if: + * . more than one reference + * . paged out + * . shadows another object + * . has a copy elsewhere + * (any of which mean that the pages not mapped to + * prev_entry may be in use anyway) + */ + + if (prev_object->ref_count > 1 || + prev_object->pager != NULL || + prev_object->shadow != NULL || + prev_object->copy != NULL) { + vm_object_unlock(prev_object); + return(FALSE); + } + + /* + * Remove any pages that may still be in the object from + * a previous deallocation. + */ + + vm_object_page_remove(prev_object, + prev_offset + prev_size, + prev_offset + prev_size + next_size); + + /* + * Extend the object if necessary. + */ + newsize = prev_offset + prev_size + next_size; + if (newsize > prev_object->size) + prev_object->size = newsize; + + vm_object_unlock(prev_object); + return(TRUE); +} + +#if defined(DEBUG) || (NDDB > 0) +/* + * vm_object_print: [ debug ] + */ +void vm_object_print(object, full) + vm_object_t object; + boolean_t full; +{ + register vm_page_t p; + extern indent; + + register int count; + + if (object == NULL) + return; + + iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", + (int) object, (int) object->size, + object->resident_page_count, object->ref_count); + printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n", + (int) object->pager, (int) object->paging_offset, + (int) object->shadow, (int) object->shadow_offset); + printf("cache: next=0x%x, prev=0x%x\n", + object->cached_list.next, object->cached_list.prev); + + if (!full) + return; + + indent += 2; + count = 0; + p = (vm_page_t) queue_first(&object->memq); + while (!queue_end(&object->memq, (queue_entry_t) p)) { + if (count == 0) + iprintf("memory:="); + else if (count == 6) { + printf("\n"); + iprintf(" ..."); + count = 0; + } else + printf(","); + count++; + + printf("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p)); + p = (vm_page_t) queue_next(&p->listq); + } + if (count != 0) + printf("\n"); + indent -= 2; +} +#endif /* defined(DEBUG) || (NDDB > 0) */ diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h new file mode 100644 index 000000000000..df243bf985b3 --- /dev/null +++ b/sys/vm/vm_object.h @@ -0,0 +1,173 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_object.h 7.3 (Berkeley) 4/21/91 + * $Id: vm_object.h,v 1.2 1993/10/16 16:20:43 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Virtual memory object module definitions. + */ + +#ifndef _VM_OBJECT_ +#define _VM_OBJECT_ + +#include <vm/vm_pager.h> + +/* + * Types defined: + * + * vm_object_t Virtual memory object. + */ + +struct vm_object { + queue_chain_t memq; /* Resident memory */ + queue_chain_t object_list; /* list of all objects */ + simple_lock_data_t Lock; /* Synchronization */ + int LockHolder; + int ref_count; /* How many refs?? */ + vm_size_t size; /* Object size */ + int resident_page_count; + /* number of resident pages */ + struct vm_object *copy; /* Object that holds copies of + my changed pages */ + vm_pager_t pager; /* Where to get data */ + boolean_t pager_ready; /* Have pager fields been filled? */ + vm_offset_t paging_offset; /* Offset into paging space */ + struct vm_object *shadow; /* My shadow */ + vm_offset_t shadow_offset; /* Offset in shadow */ + unsigned int + paging_in_progress:16, + /* Paging (in or out) - don't + collapse or destroy */ + /* boolean_t */ can_persist:1, /* allow to persist */ + /* boolean_t */ internal:1; /* internally created object */ + queue_chain_t cached_list; /* for persistence */ +}; + +typedef struct vm_object *vm_object_t; + +struct vm_object_hash_entry { + queue_chain_t hash_links; /* hash chain links */ + vm_object_t object; /* object we represent */ +}; + +typedef struct vm_object_hash_entry *vm_object_hash_entry_t; + +#ifdef KERNEL +queue_head_t vm_object_cached_list; /* list of objects persisting */ +int vm_object_cached; /* size of cached list */ +simple_lock_data_t vm_cache_lock; /* lock for object cache */ + +queue_head_t vm_object_list; /* list of allocated objects */ +long vm_object_count; /* count of all objects */ +simple_lock_data_t vm_object_list_lock; + /* lock for object list and count */ + +vm_object_t kernel_object; /* the single kernel object */ +vm_object_t kmem_object; + +#define vm_object_cache_lock() simple_lock(&vm_cache_lock) +#define vm_object_cache_unlock() simple_unlock(&vm_cache_lock) +#endif KERNEL + +/* + * Declare procedures that operate on VM objects. + */ + +void vm_object_init (); +void vm_object_terminate(); +vm_object_t vm_object_allocate(); +void vm_object_reference(); +void vm_object_deallocate(); +void vm_object_pmap_copy(); +void vm_object_pmap_remove(); +void vm_object_page_remove(); +void vm_object_shadow(); +void vm_object_copy(); +void vm_object_collapse(); +vm_object_t vm_object_lookup(); +void vm_object_enter(); +void vm_object_setpager(); +#define vm_object_cache(pager) pager_cache(vm_object_lookup(pager),TRUE) +#define vm_object_uncache(pager) pager_cache(vm_object_lookup(pager),FALSE) + +void vm_object_cache_clear(); +void vm_object_print(); + +#if VM_OBJECT_DEBUG +#define vm_object_lock_init(object) { simple_lock_init(&(object)->Lock); (object)->LockHolder = 0; } +#define vm_object_lock(object) { simple_lock(&(object)->Lock); (object)->LockHolder = (int) current_thread(); } +#define vm_object_unlock(object) { (object)->LockHolder = 0; simple_unlock(&(object)->Lock); } +#define vm_object_lock_try(object) (simple_lock_try(&(object)->Lock) ? ( ((object)->LockHolder = (int) current_thread()) , TRUE) : FALSE) +#define vm_object_sleep(event, object, interruptible) \ + { (object)->LockHolder = 0; thread_sleep((event), &(object)->Lock, (interruptible)); } +#else VM_OBJECT_DEBUG +#define vm_object_lock_init(object) simple_lock_init(&(object)->Lock) +#define vm_object_lock(object) simple_lock(&(object)->Lock) +#define vm_object_unlock(object) simple_unlock(&(object)->Lock) +#define vm_object_lock_try(object) simple_lock_try(&(object)->Lock) +#define vm_object_sleep(event, object, interruptible) \ + thread_sleep((event), &(object)->Lock, (interruptible)) +#endif VM_OBJECT_DEBUG + +#endif _VM_OBJECT_ diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c new file mode 100644 index 000000000000..2935a67bbe9a --- /dev/null +++ b/sys/vm/vm_page.c @@ -0,0 +1,783 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 + * $Id: vm_page.c,v 1.2 1993/10/16 16:20:44 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Resident memory management module. + */ + +#include "param.h" + +#include "vm.h" +#include "vm_map.h" +#include "vm_page.h" +#include "vm_pageout.h" + +/* + * Associated with page of user-allocatable memory is a + * page structure. + */ + +queue_head_t *vm_page_buckets; /* Array of buckets */ +int vm_page_bucket_count = 0; /* How big is array? */ +int vm_page_hash_mask; /* Mask for hash function */ +simple_lock_data_t bucket_lock; /* lock for all buckets XXX */ + +vm_size_t page_size = 4096; +vm_size_t page_mask = 4095; +int page_shift = 12; + +queue_head_t vm_page_queue_free; +queue_head_t vm_page_queue_active; +queue_head_t vm_page_queue_inactive; +simple_lock_data_t vm_page_queue_lock; +simple_lock_data_t vm_page_queue_free_lock; + +vm_page_t vm_page_array; +long first_page; +long last_page; +vm_offset_t first_phys_addr; +vm_offset_t last_phys_addr; + +int vm_page_free_count; +int vm_page_active_count; +int vm_page_inactive_count; +int vm_page_wire_count; +int vm_page_laundry_count; + +int vm_page_free_target = 0; +int vm_page_free_min = 0; +int vm_page_inactive_target = 0; +int vm_page_free_reserved = 0; + +/* + * vm_set_page_size: + * + * Sets the page size, perhaps based upon the memory + * size. Must be called before any use of page-size + * dependent functions. + * + * Sets page_shift and page_mask from page_size. + */ +void vm_set_page_size() +{ + page_mask = page_size - 1; + + if ((page_mask & page_size) != 0) + panic("vm_set_page_size: page size not a power of two"); + + for (page_shift = 0; ; page_shift++) + if ((1 << page_shift) == page_size) + break; +} + + +/* + * vm_page_startup: + * + * Initializes the resident memory module. + * + * Allocates memory for the page cells, and + * for the object/offset-to-page hash table headers. + * Each page cell is initialized and placed on the free list. + */ +vm_offset_t vm_page_startup(start, end, vaddr) + register vm_offset_t start; + vm_offset_t end; + register vm_offset_t vaddr; +{ + register vm_offset_t mapped; + register vm_page_t m; + register queue_t bucket; + vm_size_t npages; + register vm_offset_t new_start; + int i; + vm_offset_t pa; + + extern vm_offset_t kentry_data; + extern vm_size_t kentry_data_size; + + + /* + * Initialize the locks + */ + + simple_lock_init(&vm_page_queue_free_lock); + simple_lock_init(&vm_page_queue_lock); + + /* + * Initialize the queue headers for the free queue, + * the active queue and the inactive queue. + */ + + queue_init(&vm_page_queue_free); + queue_init(&vm_page_queue_active); + queue_init(&vm_page_queue_inactive); + + /* + * Allocate (and initialize) the hash table buckets. + * + * The number of buckets MUST BE a power of 2, and + * the actual value is the next power of 2 greater + * than the number of physical pages in the system. + * + * Note: + * This computation can be tweaked if desired. + */ + + vm_page_buckets = (queue_t) vaddr; + bucket = vm_page_buckets; + if (vm_page_bucket_count == 0) { + vm_page_bucket_count = 1; + while (vm_page_bucket_count < atop(end - start)) + vm_page_bucket_count <<= 1; + } + + vm_page_hash_mask = vm_page_bucket_count - 1; + + /* + * Validate these addresses. + */ + + new_start = round_page(((queue_t)start) + vm_page_bucket_count); + mapped = vaddr; + vaddr = pmap_map(mapped, start, new_start, + VM_PROT_READ|VM_PROT_WRITE); + start = new_start; + bzero((caddr_t) mapped, vaddr - mapped); + mapped = vaddr; + + for (i = vm_page_bucket_count; i--;) { + queue_init(bucket); + bucket++; + } + + simple_lock_init(&bucket_lock); + + /* + * round (or truncate) the addresses to our page size. + */ + + end = trunc_page(end); + + /* + * Pre-allocate maps and map entries that cannot be dynamically + * allocated via malloc(). The maps include the kernel_map and + * kmem_map which must be initialized before malloc() will + * work (obviously). Also could include pager maps which would + * be allocated before kmeminit. + * + * Allow some kernel map entries... this should be plenty + * since people shouldn't be cluttering up the kernel + * map (they should use their own maps). + */ + + kentry_data_size = MAX_KMAP * sizeof(struct vm_map) + + MAX_KMAPENT * sizeof(struct vm_map_entry); + kentry_data_size = round_page(kentry_data_size); + kentry_data = (vm_offset_t) vaddr; + vaddr += kentry_data_size; + + /* + * Validate these zone addresses. + */ + + new_start = start + (vaddr - mapped); + pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE); + bzero((caddr_t) mapped, (vaddr - mapped)); + mapped = vaddr; + start = new_start; + + /* + * Compute the number of pages of memory that will be + * available for use (taking into account the overhead + * of a page structure per page). + */ + + vm_page_free_count = npages = + (end - start + sizeof(struct vm_page))/(PAGE_SIZE + sizeof(struct vm_page)); + + /* + * Initialize the mem entry structures now, and + * put them in the free queue. + */ + + m = vm_page_array = (vm_page_t) vaddr; + first_page = start; + first_page += npages*sizeof(struct vm_page); + first_page = atop(round_page(first_page)); + last_page = first_page + npages - 1; + + first_phys_addr = ptoa(first_page); + last_phys_addr = ptoa(last_page) + page_mask; + + /* + * Validate these addresses. + */ + + new_start = start + (round_page(m + npages) - mapped); + mapped = pmap_map(mapped, start, new_start, + VM_PROT_READ|VM_PROT_WRITE); + start = new_start; + + /* + * Clear all of the page structures + */ + bzero((caddr_t)m, npages * sizeof(*m)); + + pa = first_phys_addr; + while (npages--) { + m->copy_on_write = FALSE; + m->wanted = FALSE; + m->inactive = FALSE; + m->active = FALSE; + m->busy = FALSE; + m->object = NULL; + m->phys_addr = pa; + queue_enter(&vm_page_queue_free, m, vm_page_t, pageq); + m++; + pa += PAGE_SIZE; + } + + /* + * Initialize vm_pages_needed lock here - don't wait for pageout + * daemon XXX + */ + simple_lock_init(&vm_pages_needed_lock); + + return(mapped); +} + +/* + * vm_page_hash: + * + * Distributes the object/offset key pair among hash buckets. + * + * NOTE: This macro depends on vm_page_bucket_count being a power of 2. + */ +#define vm_page_hash(object, offset) \ + (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask) + +/* + * vm_page_insert: [ internal use only ] + * + * Inserts the given mem entry into the object/object-page + * table and object list. + * + * The object and page must be locked. + */ + +void vm_page_insert(mem, object, offset) + register vm_page_t mem; + register vm_object_t object; + register vm_offset_t offset; +{ + register queue_t bucket; + int spl; + + VM_PAGE_CHECK(mem); + + if (mem->tabled) + panic("vm_page_insert: already inserted"); + + /* + * Record the object/offset pair in this page + */ + + mem->object = object; + mem->offset = offset; + + /* + * Insert it into the object_object/offset hash table + */ + + bucket = &vm_page_buckets[vm_page_hash(object, offset)]; + spl = splimp(); + simple_lock(&bucket_lock); + queue_enter(bucket, mem, vm_page_t, hashq); + simple_unlock(&bucket_lock); + (void) splx(spl); + + /* + * Now link into the object's list of backed pages. + */ + + queue_enter(&object->memq, mem, vm_page_t, listq); + mem->tabled = TRUE; + + /* + * And show that the object has one more resident + * page. + */ + + object->resident_page_count++; +} + +/* + * vm_page_remove: [ internal use only ] + * + * Removes the given mem entry from the object/offset-page + * table and the object page list. + * + * The object and page must be locked. + */ + +void vm_page_remove(mem) + register vm_page_t mem; +{ + register queue_t bucket; + int spl; + + VM_PAGE_CHECK(mem); + + if (!mem->tabled) + return; + + /* + * Remove from the object_object/offset hash table + */ + + bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)]; + spl = splimp(); + simple_lock(&bucket_lock); + queue_remove(bucket, mem, vm_page_t, hashq); + simple_unlock(&bucket_lock); + (void) splx(spl); + + /* + * Now remove from the object's list of backed pages. + */ + + queue_remove(&mem->object->memq, mem, vm_page_t, listq); + + /* + * And show that the object has one fewer resident + * page. + */ + + mem->object->resident_page_count--; + + mem->tabled = FALSE; +} + +/* + * vm_page_lookup: + * + * Returns the page associated with the object/offset + * pair specified; if none is found, NULL is returned. + * + * The object must be locked. No side effects. + */ + +vm_page_t vm_page_lookup(object, offset) + register vm_object_t object; + register vm_offset_t offset; +{ + register vm_page_t mem; + register queue_t bucket; + int spl; + + /* + * Search the hash table for this object/offset pair + */ + + bucket = &vm_page_buckets[vm_page_hash(object, offset)]; + + spl = splimp(); + simple_lock(&bucket_lock); + mem = (vm_page_t) queue_first(bucket); + while (!queue_end(bucket, (queue_entry_t) mem)) { + VM_PAGE_CHECK(mem); + if ((mem->object == object) && (mem->offset == offset)) { + simple_unlock(&bucket_lock); + splx(spl); + return(mem); + } + mem = (vm_page_t) queue_next(&mem->hashq); + } + + simple_unlock(&bucket_lock); + splx(spl); + return(NULL); +} + +/* + * vm_page_rename: + * + * Move the given memory entry from its + * current object to the specified target object/offset. + * + * The object must be locked. + */ +void vm_page_rename(mem, new_object, new_offset) + register vm_page_t mem; + register vm_object_t new_object; + vm_offset_t new_offset; +{ + if (mem->object == new_object) + return; + + vm_page_lock_queues(); /* keep page from moving out from + under pageout daemon */ + vm_page_remove(mem); + vm_page_insert(mem, new_object, new_offset); + vm_page_unlock_queues(); +} + +void vm_page_init(mem, object, offset) + vm_page_t mem; + vm_object_t object; + vm_offset_t offset; +{ +#ifdef DEBUG +#define vm_page_init(mem, object, offset) {\ + (mem)->busy = TRUE; \ + (mem)->tabled = FALSE; \ + vm_page_insert((mem), (object), (offset)); \ + (mem)->absent = FALSE; \ + (mem)->fictitious = FALSE; \ + (mem)->page_lock = VM_PROT_NONE; \ + (mem)->unlock_request = VM_PROT_NONE; \ + (mem)->laundry = FALSE; \ + (mem)->active = FALSE; \ + (mem)->inactive = FALSE; \ + (mem)->wire_count = 0; \ + (mem)->clean = TRUE; \ + (mem)->copy_on_write = FALSE; \ + (mem)->fake = TRUE; \ + (mem)->pagerowned = FALSE; \ + (mem)->ptpage = FALSE; \ + } +#else +#define vm_page_init(mem, object, offset) {\ + (mem)->busy = TRUE; \ + (mem)->tabled = FALSE; \ + vm_page_insert((mem), (object), (offset)); \ + (mem)->absent = FALSE; \ + (mem)->fictitious = FALSE; \ + (mem)->page_lock = VM_PROT_NONE; \ + (mem)->unlock_request = VM_PROT_NONE; \ + (mem)->laundry = FALSE; \ + (mem)->active = FALSE; \ + (mem)->inactive = FALSE; \ + (mem)->wire_count = 0; \ + (mem)->clean = TRUE; \ + (mem)->copy_on_write = FALSE; \ + (mem)->fake = TRUE; \ + } +#endif + + vm_page_init(mem, object, offset); +} + +/* + * vm_page_alloc: + * + * Allocate and return a memory cell associated + * with this VM object/offset pair. + * + * Object must be locked. + */ +vm_page_t vm_page_alloc(object, offset) + vm_object_t object; + vm_offset_t offset; +{ + register vm_page_t mem; + int spl; + + spl = splimp(); /* XXX */ + simple_lock(&vm_page_queue_free_lock); + if ( object != kernel_object && + object != kmem_object && + vm_page_free_count <= vm_page_free_reserved) { + + simple_unlock(&vm_page_queue_free_lock); + splx(spl); + return(NULL); + } + if (queue_empty(&vm_page_queue_free)) { + simple_unlock(&vm_page_queue_free_lock); + splx(spl); + return(NULL); + } + + queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq); + + vm_page_free_count--; + simple_unlock(&vm_page_queue_free_lock); + splx(spl); + + vm_page_init(mem, object, offset); + + /* + * Decide if we should poke the pageout daemon. + * We do this if the free count is less than the low + * water mark, or if the free count is less than the high + * water mark (but above the low water mark) and the inactive + * count is less than its target. + * + * We don't have the counts locked ... if they change a little, + * it doesn't really matter. + */ + + if ((vm_page_free_count < vm_page_free_min) || + ((vm_page_free_count < vm_page_free_target) && + (vm_page_inactive_count < vm_page_inactive_target))) + thread_wakeup(&vm_pages_needed); + return(mem); +} + +/* + * vm_page_free: + * + * Returns the given page to the free list, + * disassociating it with any VM object. + * + * Object and page must be locked prior to entry. + */ +void vm_page_free(mem) + register vm_page_t mem; +{ + vm_page_remove(mem); + if (mem->active) { + queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq); + mem->active = FALSE; + vm_page_active_count--; + } + + if (mem->inactive) { + queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq); + mem->inactive = FALSE; + vm_page_inactive_count--; + } + + if (!mem->fictitious) { + int spl; + + spl = splimp(); + simple_lock(&vm_page_queue_free_lock); + queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq); + + vm_page_free_count++; + simple_unlock(&vm_page_queue_free_lock); + splx(spl); + } +} + +/* + * vm_page_wire: + * + * Mark this page as wired down by yet + * another map, removing it from paging queues + * as necessary. + * + * The page queues must be locked. + */ +void vm_page_wire(mem) + register vm_page_t mem; +{ + VM_PAGE_CHECK(mem); + + if (mem->wire_count == 0) { + if (mem->active) { + queue_remove(&vm_page_queue_active, mem, vm_page_t, + pageq); + vm_page_active_count--; + mem->active = FALSE; + } + if (mem->inactive) { + queue_remove(&vm_page_queue_inactive, mem, vm_page_t, + pageq); + vm_page_inactive_count--; + mem->inactive = FALSE; + } + vm_page_wire_count++; + } + mem->wire_count++; +} + +/* + * vm_page_unwire: + * + * Release one wiring of this page, potentially + * enabling it to be paged again. + * + * The page queues must be locked. + */ +void vm_page_unwire(mem) + register vm_page_t mem; +{ + VM_PAGE_CHECK(mem); + + mem->wire_count--; + if (mem->wire_count == 0) { + queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq); + vm_page_active_count++; + mem->active = TRUE; + vm_page_wire_count--; + } +} + +/* + * vm_page_deactivate: + * + * Returns the given page to the inactive list, + * indicating that no physical maps have access + * to this page. [Used by the physical mapping system.] + * + * The page queues must be locked. + */ +void vm_page_deactivate(m) + register vm_page_t m; +{ + VM_PAGE_CHECK(m); + + /* + * Only move active pages -- ignore locked or already + * inactive ones. + * + * XXX: sometimes we get pages which aren't wired down + * or on any queue - we need to put them on the inactive + * queue also, otherwise we lose track of them. + * Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93. + */ + + if (!m->inactive && m->wire_count == 0) { + pmap_clear_reference(VM_PAGE_TO_PHYS(m)); + if (m->active) { + queue_remove(&vm_page_queue_active, m, vm_page_t, pageq); + m->active = FALSE; + vm_page_active_count--; + } + queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq); + m->inactive = TRUE; + vm_page_inactive_count++; + if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) + m->clean = FALSE; + m->laundry = !m->clean; + } +} + +/* + * vm_page_activate: + * + * Put the specified page on the active list (if appropriate). + * + * The page queues must be locked. + */ + +void vm_page_activate(m) + register vm_page_t m; +{ + VM_PAGE_CHECK(m); + + if (m->inactive) { + queue_remove(&vm_page_queue_inactive, m, vm_page_t, + pageq); + vm_page_inactive_count--; + m->inactive = FALSE; + } + if (m->wire_count == 0) { + if (m->active) + panic("vm_page_activate: already active"); + + queue_enter(&vm_page_queue_active, m, vm_page_t, pageq); + m->active = TRUE; + vm_page_active_count++; + } +} + +/* + * vm_page_zero_fill: + * + * Zero-fill the specified page. + * Written as a standard pagein routine, to + * be used by the zero-fill object. + */ + +boolean_t vm_page_zero_fill(m) + vm_page_t m; +{ + VM_PAGE_CHECK(m); + + pmap_zero_page(VM_PAGE_TO_PHYS(m)); + return(TRUE); +} + +/* + * vm_page_copy: + * + * Copy one page to another + */ + +void vm_page_copy(src_m, dest_m) + vm_page_t src_m; + vm_page_t dest_m; +{ + VM_PAGE_CHECK(src_m); + VM_PAGE_CHECK(dest_m); + + pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m)); +} diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h new file mode 100644 index 000000000000..072c6a29ba70 --- /dev/null +++ b/sys/vm/vm_page.h @@ -0,0 +1,263 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_page.h 7.3 (Berkeley) 4/21/91 + * $Id: vm_page.h,v 1.2 1993/10/16 16:20:46 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Resident memory system definitions. + */ + +#ifndef _VM_PAGE_ +#define _VM_PAGE_ + +/* + * Management of resident (logical) pages. + * + * A small structure is kept for each resident + * page, indexed by page number. Each structure + * is an element of several lists: + * + * A hash table bucket used to quickly + * perform object/offset lookups + * + * A list of all pages for a given object, + * so they can be quickly deactivated at + * time of deallocation. + * + * An ordered list of pages due for pageout. + * + * In addition, the structure contains the object + * and offset to which this page belongs (for pageout), + * and sundry status bits. + * + * Fields in this structure are locked either by the lock on the + * object that the page belongs to (O) or by the lock on the page + * queues (P). + */ + +struct vm_page { + queue_chain_t pageq; /* queue info for FIFO + * queue or free list (P) */ + queue_chain_t hashq; /* hash table links (O)*/ + queue_chain_t listq; /* all pages in same object (O)*/ + + vm_object_t object; /* which object am I in (O,P)*/ + vm_offset_t offset; /* offset into that object (O,P) */ + + unsigned int wire_count:16, /* how many wired down maps use me? + (P) */ + /* boolean_t */ inactive:1, /* page is in inactive list (P) */ + active:1, /* page is in active list (P) */ + laundry:1, /* page is being cleaned now (P)*/ +#ifdef DEBUG + pagerowned:1, /* async paging op in progress */ + ptpage:1, /* is a user page table page */ +#endif + :0; /* (force to 'long' boundary) */ +#ifdef ns32000 + int pad; /* extra space for ns32000 bit ops */ +#endif ns32000 + boolean_t clean; /* page has not been modified */ + unsigned int + /* boolean_t */ busy:1, /* page is in transit (O) */ + wanted:1, /* someone is waiting for page (O) */ + tabled:1, /* page is in VP table (O) */ + copy_on_write:1,/* page must be copied before being + changed (O) */ + fictitious:1, /* physical page doesn't exist (O) */ + absent:1, /* virtual page doesn't exist (O) */ + fake:1, /* page is a placeholder for page-in + (O) */ + :0; + + vm_offset_t phys_addr; /* physical address of page */ + vm_prot_t page_lock; /* Uses prohibited by data manager */ + vm_prot_t unlock_request; /* Outstanding unlock request */ +}; + +typedef struct vm_page *vm_page_t; + +#if VM_PAGE_DEBUG +#define VM_PAGE_CHECK(mem) { \ + if ( (((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \ + (((unsigned int) mem) > ((unsigned int) &vm_page_array[last_page-first_page])) || \ + (mem->active && mem->inactive) \ + ) panic("vm_page_check: not valid!"); \ + } +#else VM_PAGE_DEBUG +#define VM_PAGE_CHECK(mem) +#endif VM_PAGE_DEBUG + +#ifdef KERNEL +/* + * Each pageable resident page falls into one of three lists: + * + * free + * Available for allocation now. + * inactive + * Not referenced in any map, but still has an + * object/offset-page mapping, and may be dirty. + * This is the list of pages that should be + * paged out next. + * active + * A list of pages which have been placed in + * at least one physical map. This list is + * ordered, in LRU-like fashion. + */ + +extern +queue_head_t vm_page_queue_free; /* memory free queue */ +extern +queue_head_t vm_page_queue_active; /* active memory queue */ +extern +queue_head_t vm_page_queue_inactive; /* inactive memory queue */ + +extern +vm_page_t vm_page_array; /* First resident page in table */ +extern +long first_page; /* first physical page number */ + /* ... represented in vm_page_array */ +extern +long last_page; /* last physical page number */ + /* ... represented in vm_page_array */ + /* [INCLUSIVE] */ +extern +vm_offset_t first_phys_addr; /* physical address for first_page */ +extern +vm_offset_t last_phys_addr; /* physical address for last_page */ + +extern +int vm_page_free_count; /* How many pages are free? */ +extern +int vm_page_active_count; /* How many pages are active? */ +extern +int vm_page_inactive_count; /* How many pages are inactive? */ +extern +int vm_page_wire_count; /* How many pages are wired? */ +extern +int vm_page_free_target; /* How many do we want free? */ +extern +int vm_page_free_min; /* When to wakeup pageout */ +extern +int vm_page_inactive_target;/* How many do we want inactive? */ +extern +int vm_page_free_reserved; /* How many pages reserved to do pageout */ +extern +int vm_page_laundry_count; /* How many pages being laundered? */ + +#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr) + +#define IS_VM_PHYSADDR(pa) \ + ((pa) >= first_phys_addr && (pa) <= last_phys_addr) + +#define PHYS_TO_VM_PAGE(pa) \ + (&vm_page_array[atop(pa) - first_page ]) + +extern +simple_lock_data_t vm_page_queue_lock; /* lock on active and inactive + page queues */ +extern +simple_lock_data_t vm_page_queue_free_lock; + /* lock on free page queue */ +vm_offset_t vm_page_startup(); +vm_page_t vm_page_lookup(); +vm_page_t vm_page_alloc(); +void vm_page_init(); +void vm_page_free(); +void vm_page_activate(); +void vm_page_deactivate(); +void vm_page_rename(); +void vm_page_replace(); + +boolean_t vm_page_zero_fill(); +void vm_page_copy(); + +void vm_page_wire(); +void vm_page_unwire(); + +void vm_set_page_size(); + +/* + * Functions implemented as macros + */ + +#define PAGE_ASSERT_WAIT(m, interruptible) { \ + (m)->wanted = TRUE; \ + assert_wait((int) (m), (interruptible)); \ + } + +#define PAGE_WAKEUP(m) { \ + (m)->busy = FALSE; \ + if ((m)->wanted) { \ + (m)->wanted = FALSE; \ + thread_wakeup((int) (m)); \ + } \ + } + +#define vm_page_lock_queues() simple_lock(&vm_page_queue_lock) +#define vm_page_unlock_queues() simple_unlock(&vm_page_queue_lock) + +#define vm_page_set_modified(m) { (m)->clean = FALSE; } +#endif KERNEL +#endif _VM_PAGE_ diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c new file mode 100644 index 000000000000..1800729df785 --- /dev/null +++ b/sys/vm/vm_pageout.c @@ -0,0 +1,396 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 + * $Id: vm_pageout.c,v 1.2 1993/10/16 16:20:47 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * The proverbial page-out daemon. + */ + +#include "param.h" + +#include "vm.h" +#include "vm_page.h" +#include "vm_pageout.h" +#include "vmmeter.h" + +int vm_pages_needed; /* Event on which pageout daemon sleeps */ +int vm_pageout_free_min = 0; /* Stop pageout to wait for pagers at this free level */ + +int vm_page_free_min_sanity = 40; + +int vm_page_pagesfreed; /* Pages freed by page daemon */ + +/* + * vm_pageout_scan does the dirty work for the pageout daemon. + */ +vm_pageout_scan() +{ + register vm_page_t m; + register int page_shortage; + register int s; + register int pages_freed; + int free; + + /* + * Only continue when we want more pages to be "free" + */ + + s = splimp(); + simple_lock(&vm_page_queue_free_lock); + free = vm_page_free_count; + simple_unlock(&vm_page_queue_free_lock); + splx(s); + + if (free < vm_page_free_target) { +#ifdef OMIT + swapout_threads(); +#endif /* OMIT*/ + + /* + * Be sure the pmap system is updated so + * we can scan the inactive queue. + */ + + pmap_update(); + } + + /* + * Acquire the resident page system lock, + * as we may be changing what's resident quite a bit. + */ + vm_page_lock_queues(); + + /* + * Start scanning the inactive queue for pages we can free. + * We keep scanning until we have enough free pages or + * we have scanned through the entire queue. If we + * encounter dirty pages, we start cleaning them. + */ + + pages_freed = 0; + m = (vm_page_t) queue_first(&vm_page_queue_inactive); + while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) { + vm_page_t next; + + s = splimp(); + simple_lock(&vm_page_queue_free_lock); + free = vm_page_free_count; + simple_unlock(&vm_page_queue_free_lock); + splx(s); + + if (free >= vm_page_free_target) + break; + + if (m->clean) { + next = (vm_page_t) queue_next(&m->pageq); + if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { + vm_page_activate(m); + vm_stat.reactivations++; + } + else { + register vm_object_t object; + object = m->object; + if (!vm_object_lock_try(object)) { + /* + * Can't lock object - + * skip page. + */ + m = next; + continue; + } + pmap_page_protect(VM_PAGE_TO_PHYS(m), + VM_PROT_NONE); + vm_page_free(m); /* will dequeue */ + pages_freed++; + vm_object_unlock(object); + } + m = next; + } + else { + /* + * If a page is dirty, then it is either + * being washed (but not yet cleaned) + * or it is still in the laundry. If it is + * still in the laundry, then we start the + * cleaning operation. + */ + + if (m->laundry) { + /* + * Clean the page and remove it from the + * laundry. + * + * We set the busy bit to cause + * potential page faults on this page to + * block. + * + * And we set pageout-in-progress to keep + * the object from disappearing during + * pageout. This guarantees that the + * page won't move from the inactive + * queue. (However, any other page on + * the inactive queue may move!) + */ + + register vm_object_t object; + register vm_pager_t pager; + int pageout_status; + + object = m->object; + if (!vm_object_lock_try(object)) { + /* + * Skip page if we can't lock + * its object + */ + m = (vm_page_t) queue_next(&m->pageq); + continue; + } + + pmap_page_protect(VM_PAGE_TO_PHYS(m), + VM_PROT_NONE); + m->busy = TRUE; + vm_stat.pageouts++; + + /* + * Try to collapse the object before + * making a pager for it. We must + * unlock the page queues first. + */ + vm_page_unlock_queues(); + + vm_object_collapse(object); + + object->paging_in_progress++; + vm_object_unlock(object); + + /* + * Do a wakeup here in case the following + * operations block. + */ + thread_wakeup((int) &vm_page_free_count); + + /* + * If there is no pager for the page, + * use the default pager. If there's + * no place to put the page at the + * moment, leave it in the laundry and + * hope that there will be paging space + * later. + */ + + if ((pager = object->pager) == NULL) { + pager = vm_pager_allocate(PG_DFLT, + (caddr_t)0, + object->size, + VM_PROT_ALL); + if (pager != NULL) { + vm_object_setpager(object, + pager, 0, FALSE); + } + } + pageout_status = pager ? + vm_pager_put(pager, m, FALSE) : + VM_PAGER_FAIL; + vm_object_lock(object); + vm_page_lock_queues(); + next = (vm_page_t) queue_next(&m->pageq); + + switch (pageout_status) { + case VM_PAGER_OK: + case VM_PAGER_PEND: + m->laundry = FALSE; + break; + case VM_PAGER_BAD: + /* + * Page outside of range of object. + * Right now we essentially lose the + * changes by pretending it worked. + * XXX dubious, what should we do? + */ + m->laundry = FALSE; + m->clean = TRUE; + pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + break; + case VM_PAGER_FAIL: + /* + * If page couldn't be paged out, then + * reactivate the page so it doesn't + * clog the inactive list. (We will + * try paging out it again later). + */ + vm_page_activate(m); + break; + } + + pmap_clear_reference(VM_PAGE_TO_PHYS(m)); + + /* + * If the operation is still going, leave + * the page busy to block all other accesses. + * Also, leave the paging in progress + * indicator set so that we don't attempt an + * object collapse. + */ + if (pageout_status != VM_PAGER_PEND) { + m->busy = FALSE; + PAGE_WAKEUP(m); + object->paging_in_progress--; + } + thread_wakeup((int) object); + vm_object_unlock(object); + m = next; + } + else + m = (vm_page_t) queue_next(&m->pageq); + } + } + + /* + * Compute the page shortage. If we are still very low on memory + * be sure that we will move a minimal amount of pages from active + * to inactive. + */ + + page_shortage = vm_page_inactive_target - vm_page_inactive_count; + page_shortage -= vm_page_free_count; + + if ((page_shortage <= 0) && (pages_freed == 0)) + page_shortage = 1; + + while (page_shortage > 0) { + /* + * Move some more pages from active to inactive. + */ + + if (queue_empty(&vm_page_queue_active)) { + break; + } + m = (vm_page_t) queue_first(&vm_page_queue_active); + vm_page_deactivate(m); + page_shortage--; + } + + vm_page_pagesfreed += pages_freed; + vm_page_unlock_queues(); +} + +/* + * vm_pageout is the high level pageout daemon. + */ + +void vm_pageout() +{ + (void) spl0(); + + /* + * Initialize some paging parameters. + */ + + if (vm_page_free_min == 0) { + vm_page_free_min = vm_page_free_count / 20; + if (vm_page_free_min < 3) + vm_page_free_min = 3; + + if (vm_page_free_min > vm_page_free_min_sanity) + vm_page_free_min = vm_page_free_min_sanity; + } + + if (vm_page_free_reserved == 0) { + if ((vm_page_free_reserved = vm_page_free_min / 2) < 10) + vm_page_free_reserved = 10; + } + if (vm_pageout_free_min == 0) { + if ((vm_pageout_free_min = vm_page_free_reserved / 2) > 10) + vm_pageout_free_min = 10; + } + + if (vm_page_free_target == 0) + vm_page_free_target = (vm_page_free_min * 4) / 3; + + if (vm_page_inactive_target == 0) + vm_page_inactive_target = vm_page_free_min * 2; + + if (vm_page_free_target <= vm_page_free_min) + vm_page_free_target = vm_page_free_min + 1; + + if (vm_page_inactive_target <= vm_page_free_target) + vm_page_inactive_target = vm_page_free_target + 1; + + /* + * The pageout daemon is never done, so loop + * forever. + */ + + simple_lock(&vm_pages_needed_lock); + while (TRUE) { + thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock, + FALSE); + cnt.v_scan++; + vm_pageout_scan(); + vm_pager_sync(); + simple_lock(&vm_pages_needed_lock); + thread_wakeup((int) &vm_page_free_count); + } +} diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h new file mode 100644 index 000000000000..638684ac394d --- /dev/null +++ b/sys/vm/vm_pageout.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_pageout.h 7.3 (Berkeley) 4/21/91 + * $Id: vm_pageout.h,v 1.2 1993/10/16 16:20:49 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Author: Avadis Tevanian, Jr. + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Header file for pageout daemon. + */ + +/* + * Exported data structures. + */ + +extern int vm_pages_needed; /* should be some "event" structure */ +simple_lock_data_t vm_pages_needed_lock; + + +/* + * Exported routines. + */ + +/* + * Signal pageout-daemon and wait for it. + */ + +#define VM_WAIT { \ + simple_lock(&vm_pages_needed_lock); \ + thread_wakeup((int)&vm_pages_needed); \ + thread_sleep((int)&vm_page_free_count, \ + &vm_pages_needed_lock, FALSE); \ + } diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c new file mode 100644 index 000000000000..ac49c43c76ea --- /dev/null +++ b/sys/vm/vm_pager.c @@ -0,0 +1,284 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_pager.c 7.4 (Berkeley) 5/7/91 + * $Id: vm_pager.c,v 1.3 1993/10/16 16:20:50 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Paging space routine stubs. Emulates a matchmaker-like interface + * for builtin pagers. + */ + +#include "param.h" +#include "malloc.h" + +#include "vm.h" +#include "vm_page.h" +#include "vm_kern.h" + +#include "swappager.h" + +#if NSWAPPAGER > 0 +extern struct pagerops swappagerops; +#else +#define swappagerops NULL +#endif +#include "vnodepager.h" +#if NVNODEPAGER > 0 +extern struct pagerops vnodepagerops; +#else +#define vnodepagerops NULL +#endif +#include "devpager.h" +#if NDEVPAGER > 0 +extern struct pagerops devicepagerops; +#else +#define devicepagerops NULL +#endif + +struct pagerops *pagertab[] = { + &swappagerops, /* PG_SWAP */ + &vnodepagerops, /* PG_VNODE */ + &devicepagerops, /* PG_DEV */ +}; +int npagers = sizeof (pagertab) / sizeof (pagertab[0]); + +struct pagerops *dfltpagerops = NULL; /* default pager */ + +/* + * Kernel address space for mapping pages. + * Used by pagers where KVAs are needed for IO. + */ +#define PAGER_MAP_SIZE (256 * PAGE_SIZE) +vm_map_t pager_map; +vm_offset_t pager_sva, pager_eva; + +void +vm_pager_init() +{ + struct pagerops **pgops; + + /* + * Allocate a kernel submap for tracking get/put page mappings + */ + pager_map = kmem_suballoc(kernel_map, &pager_sva, &pager_eva, + PAGER_MAP_SIZE, FALSE); + /* + * Initialize known pagers + * If pgops is a null pointer skip over it. + */ + for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) + if (*pgops) (*(*pgops)->pgo_init)(); + if (dfltpagerops == NULL) + panic("no default pager"); +} + +/* + * Allocate an instance of a pager of the given type. + */ +vm_pager_t +vm_pager_allocate(type, handle, size, prot) + int type; + caddr_t handle; + vm_size_t size; + vm_prot_t prot; +{ + vm_pager_t pager; + struct pagerops *ops; + + ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type]; + return((*ops->pgo_alloc)(handle, size, prot)); +} + +void +vm_pager_deallocate(pager) + vm_pager_t pager; +{ + if (pager == NULL) + panic("vm_pager_deallocate: null pager"); + + VM_PAGER_DEALLOC(pager); +} + +vm_pager_get(pager, m, sync) + vm_pager_t pager; + vm_page_t m; + boolean_t sync; +{ + extern boolean_t vm_page_zero_fill(); + + if (pager == NULL) + return(vm_page_zero_fill(m) ? VM_PAGER_OK : VM_PAGER_FAIL); + return(VM_PAGER_GET(pager, m, sync)); +} + +vm_pager_put(pager, m, sync) + vm_pager_t pager; + vm_page_t m; + boolean_t sync; +{ + if (pager == NULL) + panic("vm_pager_put: null pager"); + return(VM_PAGER_PUT(pager, m, sync)); +} + +boolean_t +vm_pager_has_page(pager, offset) + vm_pager_t pager; + vm_offset_t offset; +{ + if (pager == NULL) + panic("vm_pager_has_page"); + return(VM_PAGER_HASPAGE(pager, offset)); +} + +/* + * Called by pageout daemon before going back to sleep. + * Gives pagers a chance to clean up any completed async pageing operations. + */ +void +vm_pager_sync() +{ + struct pagerops **pgops; + + for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) + (*(*pgops)->pgo_putpage)(NULL, NULL, FALSE); +} + +vm_offset_t +vm_pager_map_page(m) + vm_page_t m; +{ + vm_offset_t kva; + +#ifdef DEBUG + if (!m->busy || m->active) + panic("vm_pager_map_page: page active or not busy"); + if (m->pagerowned) + printf("vm_pager_map_page: page %x already in pager\n", m); +#endif + kva = kmem_alloc_wait(pager_map, PAGE_SIZE); +#ifdef DEBUG + m->pagerowned = 1; +#endif + pmap_enter(vm_map_pmap(pager_map), kva, VM_PAGE_TO_PHYS(m), + VM_PROT_DEFAULT, TRUE); + return(kva); +} + +void +vm_pager_unmap_page(kva) + vm_offset_t kva; +{ +#ifdef DEBUG + vm_page_t m; + + m = PHYS_TO_VM_PAGE(pmap_extract(vm_map_pmap(pager_map), kva)); +#endif + kmem_free_wakeup(pager_map, kva, PAGE_SIZE); +#ifdef DEBUG + if (m->pagerowned) + m->pagerowned = 0; + else + printf("vm_pager_unmap_page: page %x(%x/%x) not owned\n", + m, kva, VM_PAGE_TO_PHYS(m)); +#endif +} + +vm_pager_t +vm_pager_lookup(list, handle) + register queue_head_t *list; + caddr_t handle; +{ + register vm_pager_t pager; + + pager = (vm_pager_t) queue_first(list); + while (!queue_end(list, (queue_entry_t)pager)) { + if (pager->pg_handle == handle) + return(pager); + pager = (vm_pager_t) queue_next(&pager->pg_list); + } + return(NULL); +} + +/* + * This routine gains a reference to the object. + * Explicit deallocation is necessary. + */ +pager_cache(object, should_cache) + vm_object_t object; + boolean_t should_cache; +{ + if (object == NULL) + return(KERN_INVALID_ARGUMENT); + + vm_object_cache_lock(); + vm_object_lock(object); + object->can_persist = should_cache; + vm_object_unlock(object); + vm_object_cache_unlock(); + + vm_object_deallocate(object); + + return(KERN_SUCCESS); +} diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h new file mode 100644 index 000000000000..292c03da5ac5 --- /dev/null +++ b/sys/vm/vm_pager.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 1990 University of Utah. + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_pager.h 7.2 (Berkeley) 4/20/91 + * $Id: vm_pager.h,v 1.2 1993/10/16 16:20:51 rgrimes Exp $ + */ + +/* + * Pager routine interface definition. + * For BSD we use a cleaner version of the internal pager interface. + */ + +#ifndef _VM_PAGER_ +#define _VM_PAGER_ + +struct pager_struct { + queue_head_t pg_list; /* links for list management */ + caddr_t pg_handle; /* external handle (vp, dev, fp) */ + int pg_type; /* type of pager */ + struct pagerops *pg_ops; /* pager operations */ + caddr_t pg_data; /* private pager data */ +}; +typedef struct pager_struct *vm_pager_t; + +/* pager types */ +#define PG_DFLT -1 +#define PG_SWAP 0 +#define PG_VNODE 1 +#define PG_DEVICE 2 + +struct pagerops { + void (*pgo_init)(); /* initialize pager */ + vm_pager_t (*pgo_alloc)(); /* allocate pager */ + void (*pgo_dealloc)(); /* disassociate */ + int (*pgo_getpage)(); /* get (read) page */ + int (*pgo_putpage)(); /* put (write) page */ + boolean_t (*pgo_haspage)(); /* does pager have page? */ +}; + +/* + * get/put return values + * OK operation was successful + * BAD specified data was out of the accepted range + * FAIL specified data was in range, but doesn't exist + * PEND operations was initiated but not completed + */ +#define VM_PAGER_OK 0 +#define VM_PAGER_BAD 1 +#define VM_PAGER_FAIL 2 +#define VM_PAGER_PEND 3 + +#define VM_PAGER_ALLOC(h, s, p) (*(pg)->pg_ops->pgo_alloc)(h, s, p) +#define VM_PAGER_DEALLOC(pg) (*(pg)->pg_ops->pgo_dealloc)(pg) +#define VM_PAGER_GET(pg, m, s) (*(pg)->pg_ops->pgo_getpage)(pg, m, s) +#define VM_PAGER_PUT(pg, m, s) (*(pg)->pg_ops->pgo_putpage)(pg, m, s) +#define VM_PAGER_HASPAGE(pg, o) (*(pg)->pg_ops->pgo_haspage)(pg, o) + +#ifdef KERNEL +vm_pager_t vm_pager_allocate(); +void vm_pager_deallocate(); +int vm_pager_get(); +int vm_pager_put(); +boolean_t vm_pager_has_page(); + +vm_offset_t vm_pager_map_page(); +void vm_pager_unmap_page(); +vm_pager_t vm_pager_lookup(); +void vm_pager_sync(); + +extern struct pagerops *dfltpagerops; +#endif + +#endif /* _VM_PAGER_ */ diff --git a/sys/vm/vm_param.h b/sys/vm/vm_param.h new file mode 100644 index 000000000000..32f35ce01360 --- /dev/null +++ b/sys/vm/vm_param.h @@ -0,0 +1,153 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_param.h 7.2 (Berkeley) 4/21/91 + * $Id: vm_param.h,v 1.2 1993/10/16 16:20:53 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Machine independent virtual memory parameters. + */ + +#ifndef _VM_PARAM_ +#define _VM_PARAM_ + +#ifdef KERNEL +#include "machine/vmparam.h" +#else +#include <machine/vmparam.h> +#endif + +/* + * This belongs in types.h, but breaks too many existing programs. + */ +typedef int boolean_t; +#define TRUE 1 +#define FALSE 0 + +/* + * The machine independent pages are refered to as PAGES. A page + * is some number of hardware pages, depending on the target machine. + */ + +/* + * All references to the size of a page should be done with PAGE_SIZE + * or PAGE_SHIFT. The fact they are variables is hidden here so that + * we can easily make them constant if we so desire. + */ + +#define PAGE_SIZE page_size /* size of page in addressible units */ +#define PAGE_SHIFT page_shift /* number of bits to shift for pages */ + +/* + * Return values from the VM routines. + */ +#define KERN_SUCCESS 0 +#define KERN_INVALID_ADDRESS 1 +#define KERN_PROTECTION_FAILURE 2 +#define KERN_NO_SPACE 3 +#define KERN_INVALID_ARGUMENT 4 +#define KERN_FAILURE 5 +#define KERN_RESOURCE_SHORTAGE 6 +#define KERN_NOT_RECEIVER 7 +#define KERN_NO_ACCESS 8 + +#ifdef ASSEMBLER +#else ASSEMBLER +/* + * Convert addresses to pages and vice versa. + * No rounding is used. + */ + +#ifdef KERNEL +#define atop(x) (((unsigned)(x)) >> page_shift) +#define ptoa(x) ((vm_offset_t)((x) << page_shift)) +#endif KERNEL + +/* + * Round off or truncate to the nearest page. These will work + * for either addresses or counts. (i.e. 1 byte rounds to 1 page + * bytes. + */ + +#ifdef KERNEL +#define round_page(x) ((vm_offset_t)((((vm_offset_t)(x)) + page_mask) & ~page_mask)) +#define trunc_page(x) ((vm_offset_t)(((vm_offset_t)(x)) & ~page_mask)) +#else KERNEL +#define round_page(x) ((((vm_offset_t)(x) + (vm_page_size - 1)) / vm_page_size) * vm_page_size) +#define trunc_page(x) ((((vm_offset_t)(x)) / vm_page_size) * vm_page_size) +#endif KERNEL + +#ifdef KERNEL +extern vm_size_t page_size; /* machine independent page size */ +extern vm_size_t page_mask; /* page_size - 1; mask for + offset within page */ +extern int page_shift; /* shift to use for page size */ + +extern vm_size_t mem_size; /* size of physical memory (bytes) */ +extern vm_offset_t first_addr; /* first physical page */ +extern vm_offset_t last_addr; /* last physical page */ +#endif KERNEL + +#endif ASSEMBLER + +#endif _VM_PARAM_ diff --git a/sys/vm/vm_prot.h b/sys/vm/vm_prot.h new file mode 100644 index 000000000000..f830d811f947 --- /dev/null +++ b/sys/vm/vm_prot.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_prot.h 7.2 (Berkeley) 4/21/91 + * $Id: vm_prot.h,v 1.2 1993/10/16 16:20:54 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Virtual memory protection definitions. + */ + +#ifndef _VM_PROT_ +#define _VM_PROT_ + +/* + * Types defined: + * + * vm_prot_t VM protection values. + */ + +typedef int vm_prot_t; + +/* + * Protection values, defined as bits within the vm_prot_t type + */ + +#define VM_PROT_NONE ((vm_prot_t) 0x00) + +#define VM_PROT_READ ((vm_prot_t) 0x01) /* read permission */ +#define VM_PROT_WRITE ((vm_prot_t) 0x02) /* write permission */ +#define VM_PROT_EXECUTE ((vm_prot_t) 0x04) /* execute permission */ + +/* + * The default protection for newly-created virtual memory + */ + +#define VM_PROT_DEFAULT (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) + +/* + * The maximum privileges possible, for parameter checking. + */ + +#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) + +#endif _VM_PROT_ diff --git a/sys/vm/vm_statistics.h b/sys/vm/vm_statistics.h new file mode 100644 index 000000000000..c258b1a01af2 --- /dev/null +++ b/sys/vm/vm_statistics.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_statistics.h 7.2 (Berkeley) 4/21/91 + * $Id: vm_statistics.h,v 1.2 1993/10/16 16:20:55 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young, David Golub + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Virtual memory statistics structure. + */ + +#ifndef _VM_STATISTICS_ +#define _VM_STATISTICS_ + +struct vm_statistics { + long pagesize; /* page size in bytes */ + long free_count; /* # of pages free */ + long active_count; /* # of pages active */ + long inactive_count; /* # of pages inactive */ + long wire_count; /* # of pages wired down */ + long zero_fill_count; /* # of zero fill pages */ + long reactivations; /* # of pages reactivated */ + long pageins; /* # of pageins */ + long pageouts; /* # of pageouts */ + long faults; /* # of faults */ + long cow_faults; /* # of copy-on-writes */ + long lookups; /* object cache lookups */ + long hits; /* object cache hits */ +}; + +typedef struct vm_statistics *vm_statistics_t; +typedef struct vm_statistics vm_statistics_data_t; + +#ifdef KERNEL +vm_statistics_data_t vm_stat; +#endif KERNEL + +/* + * Each machine dependent implementation is expected to + * keep certain statistics. They may do this anyway they + * so choose, but are expected to return the statistics + * in the following structure. + */ + +struct pmap_statistics { + long resident_count; /* # of pages mapped (total)*/ + long wired_count; /* # of pages wired */ +}; + +typedef struct pmap_statistics *pmap_statistics_t; +#endif _VM_STATISTICS_ diff --git a/sys/vm/vm_swap.c b/sys/vm/vm_swap.c new file mode 100644 index 000000000000..935d11cba89a --- /dev/null +++ b/sys/vm/vm_swap.c @@ -0,0 +1,262 @@ +/* + * Copyright (c) 1982, 1986, 1989 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_swap.c 7.18 (Berkeley) 5/6/91 + * $Id: vm_swap.c,v 1.4 1993/10/16 16:20:56 rgrimes Exp $ + */ + +#include "param.h" +#include "systm.h" +#include "buf.h" +#include "conf.h" +#include "proc.h" +#include "namei.h" +#include "dmap.h" /* XXX */ +#include "vnode.h" +#include "specdev.h" +#include "file.h" +#include "rlist.h" + +/* + * Indirect driver for multi-controller paging. + */ + +int nswap, nswdev; + +/* + * Set up swap devices. + * Initialize linked list of free swap + * headers. These do not actually point + * to buffers, but rather to pages that + * are being swapped in and out. + */ +swapinit() +{ + register int i; + register struct buf *sp = swbuf; + struct swdevt *swp; + int error; + + /* + * Count swap devices, and adjust total swap space available. + * Some of this space will not be available until a swapon() + * system is issued, usually when the system goes multi-user. + */ + nswdev = 0; + nswap = 0; + for (swp = swdevt; swp->sw_dev; swp++) { + nswdev++; + if (swp->sw_nblks > nswap) + nswap = swp->sw_nblks; + } + if (nswdev == 0) + panic("swapinit"); + if (nswdev > 1) + nswap = ((nswap + dmmax - 1) / dmmax) * dmmax; + nswap *= nswdev; + if (bdevvp(swdevt[0].sw_dev, &swdevt[0].sw_vp)) + panic("swapvp"); + if (error = swfree(&proc0, 0)) { + printf("\nwarning: no swap space present (yet)\n"); + /* printf("(swfree (..., 0) -> %d)\n", error); /* XXX */ + /*panic("swapinit swfree 0");*/ + } + + /* + * Now set up swap buffer headers. + */ + bswlist.av_forw = sp; + for (i = 0; i < nswbuf - 1; i++, sp++) + sp->av_forw = sp + 1; + sp->av_forw = NULL; +} + +swstrategy(bp) + register struct buf *bp; +{ + int sz, off, seg, index; + register struct swdevt *sp; + struct vnode *vp; + +#ifdef GENERIC + /* + * A mini-root gets copied into the front of the swap + * and we run over top of the swap area just long + * enough for us to do a mkfs and restor of the real + * root (sure beats rewriting standalone restor). + */ +#define MINIROOTSIZE 4096 + if (rootdev == dumpdev) + bp->b_blkno += MINIROOTSIZE; +#endif + sz = howmany(bp->b_bcount, DEV_BSIZE); + if (bp->b_blkno + sz > nswap) { + bp->b_flags |= B_ERROR; + biodone(bp); + return; + } + if (nswdev > 1) { + off = bp->b_blkno % dmmax; + if (off+sz > dmmax) { + bp->b_flags |= B_ERROR; + biodone(bp); + return; + } + seg = bp->b_blkno / dmmax; + index = seg % nswdev; + seg /= nswdev; + bp->b_blkno = seg*dmmax + off; + } else + index = 0; + sp = &swdevt[index]; + if ((bp->b_dev = sp->sw_dev) == 0) + panic("swstrategy"); + if (sp->sw_vp == NULL) { + bp->b_error |= B_ERROR; + biodone(bp); + return; + } + VHOLD(sp->sw_vp); + if ((bp->b_flags & B_READ) == 0) { + if (vp = bp->b_vp) { + vp->v_numoutput--; + if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) { + vp->v_flag &= ~VBWAIT; + wakeup((caddr_t)&vp->v_numoutput); + } + } + sp->sw_vp->v_numoutput++; + } + if (bp->b_vp != NULL) + brelvp(bp); + bp->b_vp = sp->sw_vp; + VOP_STRATEGY(bp); +} + +/* + * System call swapon(name) enables swapping on device name, + * which must be in the swdevsw. Return EBUSY + * if already swapping on this device. + */ + +struct swapon_args { + char *name; +}; + +/* ARGSUSED */ +swapon(p, uap, retval) + struct proc *p; + struct swapon_args *uap; + int *retval; +{ + register struct vnode *vp; + register struct swdevt *sp; + register struct nameidata *ndp; + dev_t dev; + int error; + struct nameidata nd; + + if (error = suser(p->p_ucred, &p->p_acflag)) + return (error); + ndp = &nd; + ndp->ni_nameiop = LOOKUP | FOLLOW; + ndp->ni_segflg = UIO_USERSPACE; + ndp->ni_dirp = uap->name; + if (error = namei(ndp, p)) + return (error); + vp = ndp->ni_vp; + if (vp->v_type != VBLK) { + vrele(vp); + return (ENOTBLK); + } + dev = (dev_t)vp->v_rdev; + if (major(dev) >= nblkdev) { + vrele(vp); + return (ENXIO); + } + for (sp = &swdevt[0]; sp->sw_dev; sp++) + if (sp->sw_dev == dev) { + if (sp->sw_freed) { + vrele(vp); + return (EBUSY); + } + sp->sw_vp = vp; + if (error = swfree(p, sp - swdevt)) { + printf("swap failed! (unchanged)\n"); + vrele(vp); + return (error); + } + return (0); + } + vrele(vp); + return (EINVAL); +} + +/* + * Swfree(index) frees the index'th portion of the swap map. + * Each of the nswdev devices provides 1/nswdev'th of the swap + * space, which is laid out with blocks of dmmax pages circularly + * among the devices. + */ +swfree(p, index) + struct proc *p; + int index; +{ + register struct swdevt *sp; + register swblk_t vsbase; + register long blk; + struct vnode *vp; + register swblk_t dvbase; + register int nblks; + int error; + + sp = &swdevt[index]; + nblks = sp->sw_nblks; + if (nblks <= 0) + return(ENXIO); + vp = sp->sw_vp; + if (error = VOP_OPEN(vp, FREAD|FWRITE, p->p_ucred, p)) + return (error); + sp->sw_freed = 1; + + /*printf("%d blocks from device %d/%d ", + sp->sw_nblks, major(sp->sw_dev), minor(sp->sw_dev));*/ + for (dvbase = 0; dvbase < nblks; dvbase += dmmax) { + blk = nblks - dvbase; + if ((vsbase = index*dmmax + dvbase*nswdev) >= nswap) + panic("swfree"); + if (blk > dmmax) + blk = dmmax; + rlist_free(&swapmap, vsbase, vsbase + blk - 1); + } + return (0); +} diff --git a/sys/vm/vm_unix.c b/sys/vm/vm_unix.c new file mode 100644 index 000000000000..f658f633164f --- /dev/null +++ b/sys/vm/vm_unix.c @@ -0,0 +1,136 @@ +/* + * Copyright (c) 1988 University of Utah. + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: Utah $Hdr: vm_unix.c 1.1 89/11/07$ + * from: @(#)vm_unix.c 7.2 (Berkeley) 4/20/91 + * $Id: vm_unix.c,v 1.3 1993/10/16 16:20:58 rgrimes Exp $ + */ + +/* + * Traditional sbrk/grow interface to VM + */ +#include "param.h" +#include "systm.h" +#include "proc.h" +#include "resourcevar.h" + +#include "vm.h" + +struct obreak_args { + char *nsiz; +}; + +/* ARGSUSED */ +obreak(p, uap, retval) + struct proc *p; + struct obreak_args *uap; + int *retval; +{ + register struct vmspace *vm = p->p_vmspace; + vm_offset_t new, old; + int rv; + register int diff; + + old = (vm_offset_t)vm->vm_daddr; + new = round_page(uap->nsiz); + if ((int)(new - old) > p->p_rlimit[RLIMIT_DATA].rlim_cur) + return(ENOMEM); + old = round_page(old + ctob(vm->vm_dsize)); + diff = new - old; + if (diff > 0) { + rv = vm_allocate(&vm->vm_map, &old, diff, FALSE); + if (rv != KERN_SUCCESS) { + uprintf("sbrk: grow failed, return = %d\n", rv); + return(ENOMEM); + } + vm->vm_dsize += btoc(diff); + } else if (diff < 0) { + diff = -diff; + rv = vm_deallocate(&vm->vm_map, new, diff); + if (rv != KERN_SUCCESS) { + uprintf("sbrk: shrink failed, return = %d\n", rv); + return(ENOMEM); + } + vm->vm_dsize -= btoc(diff); + } + return(0); +} + +/* + * Enlarge the "stack segment" to include the specified + * stack pointer for the process. + */ +grow(p, sp) + struct proc *p; + unsigned sp; +{ + register struct vmspace *vm = p->p_vmspace; + register int si; + + /* + * For user defined stacks (from sendsig). + */ + if (sp < (unsigned)vm->vm_maxsaddr) + return (0); + /* + * For common case of already allocated (from trap). + */ + if (sp >= (unsigned)vm->vm_maxsaddr + MAXSSIZ - ctob(vm->vm_ssize)) + return (1); + /* + * Really need to check vs limit and increment stack size if ok. + */ + si = clrnd(btoc(vm->vm_maxsaddr + MAXSSIZ - sp) - vm->vm_ssize); + if (vm->vm_ssize + si > btoc(p->p_rlimit[RLIMIT_STACK].rlim_cur)) + return (0); + vm->vm_ssize += si; + return (1); +} + +struct ovadvise_args { + int anom; +}; + +/* ARGSUSED */ +ovadvise(p, uap, retval) + struct proc *p; + struct ovadvise_args *uap; + int *retval; +{ + + return (EINVAL); +} diff --git a/sys/vm/vm_user.c b/sys/vm/vm_user.c new file mode 100644 index 000000000000..88ae9b526148 --- /dev/null +++ b/sys/vm/vm_user.c @@ -0,0 +1,257 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_user.c 7.3 (Berkeley) 4/21/91 + * $Id: vm_user.c,v 1.3 1993/10/16 16:20:59 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * User-exported virtual memory functions. + */ + +#include "param.h" +#include "systm.h" +#include "proc.h" + +#include "vm.h" +#include "vm_page.h" + +simple_lock_data_t vm_alloc_lock; /* XXX */ + +#ifdef MACHVMCOMPAT +/* + * BSD style syscall interfaces to MACH calls + * All return MACH return values. + */ + +struct svm_allocate_args { + vm_map_t map; + vm_offset_t *addr; + vm_size_t size; + boolean_t anywhere; +}; + +/* ARGSUSED */ +svm_allocate(p, uap, retval) + struct proc *p; + struct svm_allocate_args *uap; + int *retval; +{ + vm_offset_t addr; + int rv; + + uap->map = p->p_map; /* XXX */ + + if (copyin((caddr_t)uap->addr, (caddr_t)&addr, sizeof (addr))) + rv = KERN_INVALID_ARGUMENT; + else + rv = vm_allocate(uap->map, &addr, uap->size, uap->anywhere); + if (rv == KERN_SUCCESS) { + if (copyout((caddr_t)&addr, (caddr_t)uap->addr, sizeof(addr))) + rv = KERN_INVALID_ARGUMENT; + } + return((int)rv); +} + +struct svm_deallocate_args { + vm_map_t map; + vm_offset_t addr; + vm_size_t size; +}; + +/* ARGSUSED */ +svm_deallocate(p, uap, retval) + struct proc *p; + struct svm_deallocate_args *uap; + int *retval; +{ + int rv; + + uap->map = p->p_map; /* XXX */ + rv = vm_deallocate(uap->map, uap->addr, uap->size); + return((int)rv); +} + +struct svm_inherit_args { + vm_map_t map; + vm_offset_t addr; + vm_size_t size; + vm_inherit_t inherit; +}; + +/* ARGSUSED */ +svm_inherit(p, uap, retval) + struct proc *p; + struct svm_inherit_args *uap; + int *retval; +{ + int rv; + + uap->map = p->p_map; /* XXX */ + rv = vm_inherit(uap->map, uap->addr, uap->size, uap->inherit); + return((int)rv); +} + +struct svm_protect_args { + vm_map_t map; + vm_offset_t addr; + vm_size_t size; + boolean_t setmax; + vm_prot_t prot; +}; + +/* ARGSUSED */ +svm_protect(p, uap, retval) + struct proc *p; + struct svm_protect_args *uap; + int *retval; +{ + int rv; + + uap->map = p->p_map; /* XXX */ + rv = vm_protect(uap->map, uap->addr, uap->size, uap->setmax, uap->prot); + return((int)rv); +} +#endif + +/* + * vm_allocate allocates "zero fill" memory in the specfied + * map. + */ +vm_allocate(map, addr, size, anywhere) + register vm_map_t map; + register vm_offset_t *addr; + register vm_size_t size; + boolean_t anywhere; +{ + int result; + + if (map == NULL) + return(KERN_INVALID_ARGUMENT); + if (size == 0) { + *addr = 0; + return(KERN_SUCCESS); + } + + if (anywhere) + *addr = vm_map_min(map); + else + *addr = trunc_page(*addr); + size = round_page(size); + + result = vm_map_find(map, NULL, (vm_offset_t) 0, addr, + size, anywhere); + + return(result); +} + +/* + * vm_deallocate deallocates the specified range of addresses in the + * specified address map. + */ +vm_deallocate(map, start, size) + register vm_map_t map; + vm_offset_t start; + vm_size_t size; +{ + if (map == NULL) + return(KERN_INVALID_ARGUMENT); + + if (size == (vm_offset_t) 0) + return(KERN_SUCCESS); + + return(vm_map_remove(map, trunc_page(start), round_page(start+size))); +} + +/* + * vm_inherit sets the inheritence of the specified range in the + * specified map. + */ +vm_inherit(map, start, size, new_inheritance) + register vm_map_t map; + vm_offset_t start; + vm_size_t size; + vm_inherit_t new_inheritance; +{ + if (map == NULL) + return(KERN_INVALID_ARGUMENT); + + return(vm_map_inherit(map, trunc_page(start), round_page(start+size), new_inheritance)); +} + +/* + * vm_protect sets the protection of the specified range in the + * specified map. + */ + +vm_protect(map, start, size, set_maximum, new_protection) + register vm_map_t map; + vm_offset_t start; + vm_size_t size; + boolean_t set_maximum; + vm_prot_t new_protection; +{ + if (map == NULL) + return(KERN_INVALID_ARGUMENT); + + return(vm_map_protect(map, trunc_page(start), round_page(start+size), new_protection, set_maximum)); +} diff --git a/sys/vm/vm_user.h b/sys/vm/vm_user.h new file mode 100644 index 000000000000..a8f13fd34c70 --- /dev/null +++ b/sys/vm/vm_user.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_user.h 7.2 (Berkeley) 4/21/91 + * $Id: vm_user.h,v 1.2 1993/10/16 16:21:00 rgrimes Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Kernel memory management definitions. + */ + +#ifndef _VM_USER_ +#define _VM_USER_ + +int vm_allocate(); +int vm_deallocate(); +int vm_inherit(); +int vm_protect(); +int vm_statistics(); + +#endif _VM_USER_ diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c new file mode 100644 index 000000000000..41c2872e4d39 --- /dev/null +++ b/sys/vm/vnode_pager.c @@ -0,0 +1,484 @@ +/* + * Copyright (c) 1990 University of Utah. + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 + * $Id: vnode_pager.c,v 1.2 1993/10/16 16:21:02 rgrimes Exp $ + */ + +/* + * Page to/from files (vnodes). + * + * TODO: + * pageouts + * fix credential use (uses current process credentials now) + */ +#include "vnodepager.h" +#if NVNODEPAGER > 0 + +#include "param.h" +#include "proc.h" +#include "malloc.h" +#include "vnode.h" +#include "uio.h" +#include "mount.h" + +#include "vm_param.h" +#include "lock.h" +#include "queue.h" +#include "vm_prot.h" +#include "vm_object.h" +#include "vm_page.h" +#include "vnode_pager.h" + +queue_head_t vnode_pager_list; /* list of managed vnodes */ + +#ifdef DEBUG +int vpagerdebug = 0x00; +#define VDB_FOLLOW 0x01 +#define VDB_INIT 0x02 +#define VDB_IO 0x04 +#define VDB_FAIL 0x08 +#define VDB_ALLOC 0x10 +#define VDB_SIZE 0x20 +#endif + +void +vnode_pager_init() +{ +#ifdef DEBUG + if (vpagerdebug & VDB_FOLLOW) + printf("vnode_pager_init()\n"); +#endif + queue_init(&vnode_pager_list); +} + +/* + * Allocate (or lookup) pager for a vnode. + * Handle is a vnode pointer. + */ +vm_pager_t +vnode_pager_alloc(handle, size, prot) + caddr_t handle; + vm_size_t size; + vm_prot_t prot; +{ + register vm_pager_t pager; + register vn_pager_t vnp; + vm_object_t object; + struct vattr vattr; + struct vnode *vp; + struct proc *p = curproc; /* XXX */ + +#ifdef DEBUG + if (vpagerdebug & (VDB_FOLLOW|VDB_ALLOC)) + printf("vnode_pager_alloc(%x, %x, %x)\n", handle, size, prot); +#endif + /* + * Pageout to vnode, no can do yet. + */ + if (handle == NULL) + return(NULL); + + /* + * Vnodes keep a pointer to any associated pager so no need to + * lookup with vm_pager_lookup. + */ + vp = (struct vnode *)handle; + pager = (vm_pager_t)vp->v_vmdata; + if (pager == NULL) { + /* + * Allocate pager structures + */ + pager = (vm_pager_t)malloc(sizeof *pager, M_VMPAGER, M_WAITOK); + if (pager == NULL) + return(NULL); + vnp = (vn_pager_t)malloc(sizeof *vnp, M_VMPGDATA, M_WAITOK); + if (vnp == NULL) { + free((caddr_t)pager, M_VMPAGER); + return(NULL); + } + /* + * And an object of the appropriate size + */ + if (VOP_GETATTR(vp, &vattr, p->p_ucred, p) == 0) { + object = vm_object_allocate(round_page(vattr.va_size)); + vm_object_enter(object, pager); + vm_object_setpager(object, pager, 0, TRUE); + } else { + free((caddr_t)vnp, M_VMPGDATA); + free((caddr_t)pager, M_VMPAGER); + return(NULL); + } + /* + * Hold a reference to the vnode and initialize pager data. + */ + VREF(vp); + vnp->vnp_flags = 0; + vnp->vnp_vp = vp; + vnp->vnp_size = vattr.va_size; + queue_enter(&vnode_pager_list, pager, vm_pager_t, pg_list); + pager->pg_handle = handle; + pager->pg_type = PG_VNODE; + pager->pg_ops = &vnodepagerops; + pager->pg_data = (caddr_t)vnp; + vp->v_vmdata = (caddr_t)pager; + } else { + /* + * vm_object_lookup() will remove the object from the + * cache if found and also gain a reference to the object. + */ + object = vm_object_lookup(pager); +#ifdef DEBUG + vnp = (vn_pager_t)pager->pg_data; +#endif + } +#ifdef DEBUG + if (vpagerdebug & VDB_ALLOC) + printf("vnode_pager_setup: vp %x sz %x pager %x object %x\n", + vp, vnp->vnp_size, pager, object); +#endif + return(pager); +} + +void +vnode_pager_dealloc(pager) + vm_pager_t pager; +{ + register vn_pager_t vnp = (vn_pager_t)pager->pg_data; + register struct vnode *vp; + struct proc *p = curproc; /* XXX */ + +#ifdef DEBUG + if (vpagerdebug & VDB_FOLLOW) + printf("vnode_pager_dealloc(%x)\n", pager); +#endif + if (vp = vnp->vnp_vp) { + vp->v_vmdata = NULL; + vp->v_flag &= ~VTEXT; +#if 0 + /* can hang if done at reboot on NFS FS */ + (void) VOP_FSYNC(vp, p->p_ucred, p); +#endif + vrele(vp); + } + queue_remove(&vnode_pager_list, pager, vm_pager_t, pg_list); + free((caddr_t)vnp, M_VMPGDATA); + free((caddr_t)pager, M_VMPAGER); +} + +vnode_pager_getpage(pager, m, sync) + vm_pager_t pager; + vm_page_t m; + boolean_t sync; +{ + +#ifdef DEBUG + if (vpagerdebug & VDB_FOLLOW) + printf("vnode_pager_getpage(%x, %x)\n", pager, m); +#endif + return(vnode_pager_io((vn_pager_t)pager->pg_data, m, UIO_READ)); +} + +boolean_t +vnode_pager_putpage(pager, m, sync) + vm_pager_t pager; + vm_page_t m; + boolean_t sync; +{ + int err; + +#ifdef DEBUG + if (vpagerdebug & VDB_FOLLOW) + printf("vnode_pager_putpage(%x, %x)\n", pager, m); +#endif + if (pager == NULL) + return; + err = vnode_pager_io((vn_pager_t)pager->pg_data, m, UIO_WRITE); + if (err == VM_PAGER_OK) { + m->clean = TRUE; /* XXX - wrong place */ + pmap_clear_modify(VM_PAGE_TO_PHYS(m)); /* XXX - wrong place */ + } + return(err); +} + +boolean_t +vnode_pager_haspage(pager, offset) + vm_pager_t pager; + vm_offset_t offset; +{ + register vn_pager_t vnp = (vn_pager_t)pager->pg_data; + daddr_t bn; + int err; + +#ifdef DEBUG + if (vpagerdebug & VDB_FOLLOW) + printf("vnode_pager_haspage(%x, %x)\n", pager, offset); +#endif + + /* + * Offset beyond end of file, do not have the page + */ + if (offset >= vnp->vnp_size) { +#ifdef DEBUG + if (vpagerdebug & (VDB_FAIL|VDB_SIZE)) + printf("vnode_pager_haspage: pg %x, off %x, size %x\n", + pager, offset, vnp->vnp_size); +#endif + return(FALSE); + } + + /* + * Read the index to find the disk block to read + * from. If there is no block, report that we don't + * have this data. + * + * Assumes that the vnode has whole page or nothing. + */ + err = VOP_BMAP(vnp->vnp_vp, + offset / vnp->vnp_vp->v_mount->mnt_stat.f_bsize, + (struct vnode **)0, &bn); + if (err) { +#ifdef DEBUG + if (vpagerdebug & VDB_FAIL) + printf("vnode_pager_haspage: BMAP err %d, pg %x, off %x\n", + err, pager, offset); +#endif + return(TRUE); + } + return((long)bn < 0 ? FALSE : TRUE); +} + +/* + * (XXX) + * Lets the VM system know about a change in size for a file. + * If this vnode is mapped into some address space (i.e. we have a pager + * for it) we adjust our own internal size and flush any cached pages in + * the associated object that are affected by the size change. + * + * Note: this routine may be invoked as a result of a pager put + * operation (possibly at object termination time), so we must be careful. + */ +vnode_pager_setsize(vp, nsize) + struct vnode *vp; + u_long nsize; +{ + register vn_pager_t vnp; + register vm_object_t object; + vm_pager_t pager; + + /* + * Not a mapped vnode + */ + if (vp == NULL || vp->v_type != VREG || vp->v_vmdata == NULL) + return; + /* + * Hasn't changed size + */ + pager = (vm_pager_t)vp->v_vmdata; + vnp = (vn_pager_t)pager->pg_data; + if (nsize == vnp->vnp_size) + return; + /* + * No object. + * This can happen during object termination since + * vm_object_page_clean is called after the object + * has been removed from the hash table, and clean + * may cause vnode write operations which can wind + * up back here. + */ + object = vm_object_lookup(pager); + if (object == NULL) + return; + +#ifdef DEBUG + if (vpagerdebug & (VDB_FOLLOW|VDB_SIZE)) + printf("vnode_pager_setsize: vp %x obj %x osz %d nsz %d\n", + vp, object, vnp->vnp_size, nsize); +#endif + /* + * File has shrunk. + * Toss any cached pages beyond the new EOF. + */ + if (nsize < vnp->vnp_size) { + vm_object_lock(object); + vm_object_page_remove(object, + (vm_offset_t)nsize, vnp->vnp_size); + vm_object_unlock(object); + } + vnp->vnp_size = (vm_offset_t)nsize; + vm_object_deallocate(object); +} + +vnode_pager_umount(mp) + register struct mount *mp; +{ + register vm_pager_t pager, npager; + struct vnode *vp; + + pager = (vm_pager_t) queue_first(&vnode_pager_list); + while (!queue_end(&vnode_pager_list, (queue_entry_t)pager)) { + /* + * Save the next pointer now since uncaching may + * terminate the object and render pager invalid + */ + vp = ((vn_pager_t)pager->pg_data)->vnp_vp; + npager = (vm_pager_t) queue_next(&pager->pg_list); + if (mp == (struct mount *)0 || vp->v_mount == mp) + (void) vnode_pager_uncache(vp); + pager = npager; + } +} + +/* + * Remove vnode associated object from the object cache. + * + * Note: this routine may be invoked as a result of a pager put + * operation (possibly at object termination time), so we must be careful. + */ +boolean_t +vnode_pager_uncache(vp) + register struct vnode *vp; +{ + register vm_object_t object; + boolean_t uncached, locked; + vm_pager_t pager; + + /* + * Not a mapped vnode + */ + pager = (vm_pager_t)vp->v_vmdata; + if (pager == NULL) + return (TRUE); + /* + * Unlock the vnode if it is currently locked. + * We do this since uncaching the object may result + * in its destruction which may initiate paging + * activity which may necessitate locking the vnode. + */ + locked = VOP_ISLOCKED(vp); + if (locked) + VOP_UNLOCK(vp); + /* + * Must use vm_object_lookup() as it actually removes + * the object from the cache list. + */ + object = vm_object_lookup(pager); + if (object) { + uncached = (object->ref_count <= 1); + pager_cache(object, FALSE); + } else + uncached = TRUE; + if (locked) + VOP_LOCK(vp); + return(uncached); +} + +vnode_pager_io(vnp, m, rw) + register vn_pager_t vnp; + vm_page_t m; + enum uio_rw rw; +{ + struct uio auio; + struct iovec aiov; + vm_offset_t kva, foff; + int error, size; + struct proc *p = curproc; /* XXX */ + +#ifdef DEBUG + if (vpagerdebug & VDB_FOLLOW) + printf("vnode_pager_io(%x, %x, %c): vnode %x\n", + vnp, m, rw == UIO_READ ? 'R' : 'W', vnp->vnp_vp); +#endif + foff = m->offset + m->object->paging_offset; + /* + * Return failure if beyond current EOF + */ + if (foff >= vnp->vnp_size) { +#ifdef DEBUG + if (vpagerdebug & VDB_SIZE) + printf("vnode_pager_io: vp %x, off %d size %d\n", + vnp->vnp_vp, foff, vnp->vnp_size); +#endif + return(VM_PAGER_BAD); + } + if (foff + PAGE_SIZE > vnp->vnp_size) + size = vnp->vnp_size - foff; + else + size = PAGE_SIZE; + /* + * Allocate a kernel virtual address and initialize so that + * we can use VOP_READ/WRITE routines. + */ + kva = vm_pager_map_page(m); + aiov.iov_base = (caddr_t)kva; + aiov.iov_len = size; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_offset = foff; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_rw = rw; + auio.uio_resid = size; + auio.uio_procp = (struct proc *)0; +#ifdef DEBUG + if (vpagerdebug & VDB_IO) + printf("vnode_pager_io: vp %x kva %x foff %x size %x", + vnp->vnp_vp, kva, foff, size); +#endif + if (rw == UIO_READ) + error = VOP_READ(vnp->vnp_vp, &auio, 0, p->p_ucred); + else + error = VOP_WRITE(vnp->vnp_vp, &auio, 0, p->p_ucred); +#ifdef DEBUG + if (vpagerdebug & VDB_IO) { + if (error || auio.uio_resid) + printf(" returns error %x, resid %x", + error, auio.uio_resid); + printf("\n"); + } +#endif + if (!error) { + register int count = size - auio.uio_resid; + + if (count == 0) + error = EINVAL; + else if (count != PAGE_SIZE && rw == UIO_READ) + bzero(kva + count, PAGE_SIZE - count); + } + vm_pager_unmap_page(kva); + return (error ? VM_PAGER_FAIL : VM_PAGER_OK); +} +#endif diff --git a/sys/vm/vnode_pager.h b/sys/vm/vnode_pager.h new file mode 100644 index 000000000000..020543576a55 --- /dev/null +++ b/sys/vm/vnode_pager.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 1990 University of Utah. + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vnode_pager.h 7.1 (Berkeley) 12/5/90 + * $Id: vnode_pager.h,v 1.2 1993/10/16 16:21:03 rgrimes Exp $ + */ + +#ifndef _VNODE_PAGER_ +#define _VNODE_PAGER_ 1 + +/* + * VNODE pager private data. + */ +struct vnpager { + int vnp_flags; /* flags */ + struct vnode *vnp_vp; /* vnode */ + vm_size_t vnp_size; /* vnode current size */ +}; +typedef struct vnpager *vn_pager_t; + +#define VN_PAGER_NULL ((vn_pager_t)0) + +#define VNP_PAGING 0x01 /* vnode used for pageout */ +#define VNP_CACHED 0x02 /* vnode is cached */ + +#ifdef KERNEL + +void vnode_pager_init(); +vm_pager_t vnode_pager_alloc(); +void vnode_pager_dealloc(); +int vnode_pager_getpage(), vnode_pager_putpage(); +boolean_t vnode_pager_haspage(); + +struct pagerops vnodepagerops = { + vnode_pager_init, + vnode_pager_alloc, + vnode_pager_dealloc, + vnode_pager_getpage, + vnode_pager_putpage, + vnode_pager_haspage +}; + +#endif + +#endif /* _VNODE_PAGER_ */ |
