aboutsummaryrefslogtreecommitdiff
path: root/sys/vm
diff options
context:
space:
mode:
authorJohn Dyson <dyson@FreeBSD.org>1997-12-29 00:25:11 +0000
committerJohn Dyson <dyson@FreeBSD.org>1997-12-29 00:25:11 +0000
commit2be70f79f6dcc03377819b327700531ce5455896 (patch)
treee16c806fdb19ecadb7a3d2c2fd2ffb344ef606f3 /sys/vm
parentd0cc10a88b4696dafc997d9a2acef1ef25ab1def (diff)
downloadsrc-2be70f79f6dcc03377819b327700531ce5455896.tar.gz
src-2be70f79f6dcc03377819b327700531ce5455896.zip
Lots of improvements, including restructring the caching and management
of vnodes and objects. There are some metadata performance improvements that come along with this. There are also a few prototypes added when the need is noticed. Changes include: 1) Cleaning up vref, vget. 2) Removal of the object cache. 3) Nuke vnode_pager_uncache and friends, because they aren't needed anymore. 4) Correct some missing LK_RETRY's in vn_lock. 5) Correct the page range in the code for msync. Be gentle, and please give me feedback asap.
Notes
Notes: svn path=/head/; revision=32071
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_map.c14
-rw-r--r--sys/vm/vm_object.c142
-rw-r--r--sys/vm/vm_object.h9
-rw-r--r--sys/vm/vm_page.c11
-rw-r--r--sys/vm/vm_pageout.c19
-rw-r--r--sys/vm/vm_pager.c24
-rw-r--r--sys/vm/vnode_pager.c81
7 files changed, 79 insertions, 221 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 1fe8cc351641..b20b433b4a2c 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.100 1997/12/19 15:31:13 dyson Exp $
+ * $Id: vm_map.c,v 1.101 1997/12/25 20:55:15 dyson Exp $
*/
/*
@@ -1725,15 +1725,19 @@ vm_map_clean(map, start, end, syncio, invalidate)
* idea.
*/
if (current->protection & VM_PROT_WRITE) {
- vm_object_page_clean(object,
+ if (object->type == OBJT_VNODE)
+ vn_lock(object->handle, LK_EXCLUSIVE, curproc);
+ vm_object_page_clean(object,
OFF_TO_IDX(offset),
- OFF_TO_IDX(offset + size),
- (syncio||invalidate)?1:0, TRUE);
+ OFF_TO_IDX(offset + size + PAGE_MASK),
+ (syncio||invalidate)?1:0);
if (invalidate)
vm_object_page_remove(object,
OFF_TO_IDX(offset),
- OFF_TO_IDX(offset + size),
+ OFF_TO_IDX(offset + size + PAGE_MASK),
FALSE);
+ if (object->type == OBJT_VNODE)
+ VOP_UNLOCK(object->handle, 0, curproc);
}
}
start += size;
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 30f26c9c46a2..221d7fd59799 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.c,v 1.101 1997/11/18 11:02:19 bde Exp $
+ * $Id: vm_object.c,v 1.102 1997/12/19 09:03:14 dyson Exp $
*/
/*
@@ -95,7 +95,6 @@ static void vm_object_qcollapse __P((vm_object_t object));
static void vm_object_deactivate_pages __P((vm_object_t));
#endif
static void vm_object_terminate __P((vm_object_t));
-static void vm_object_cache_trim __P((void));
/*
* Virtual memory objects maintain the actual data
@@ -123,9 +122,6 @@ static void vm_object_cache_trim __P((void));
*
*/
-int vm_object_cache_max;
-struct object_q vm_object_cached_list;
-static int vm_object_cached; /* size of cached list */
struct object_q vm_object_list;
struct simplelock vm_object_list_lock;
static long vm_object_count; /* count of all objects */
@@ -187,15 +183,10 @@ _vm_object_allocate(type, size, object)
void
vm_object_init()
{
- TAILQ_INIT(&vm_object_cached_list);
TAILQ_INIT(&vm_object_list);
simple_lock_init(&vm_object_list_lock);
vm_object_count = 0;
- vm_object_cache_max = 84;
- if (cnt.v_page_count > 1000)
- vm_object_cache_max += (cnt.v_page_count - 1000) / 4;
-
kernel_object = &kernel_object_store;
_vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kernel_object);
@@ -245,14 +236,19 @@ vm_object_reference(object)
{
if (object == NULL)
return;
-
if (object->ref_count == 0) {
- if ((object->flags & OBJ_CANPERSIST) == 0)
- panic("vm_object_reference: non-persistent object with 0 ref_count");
- TAILQ_REMOVE(&vm_object_cached_list, object, cached_list);
- vm_object_cached--;
+ panic("vm_object_reference: attempting to reference deallocated obj");
}
object->ref_count++;
+ if ((object->type == OBJT_VNODE) && (object->flags & OBJ_VFS_REF)) {
+ struct vnode *vp;
+ vp = (struct vnode *)object->handle;
+ simple_lock(&vp->v_interlock);
+ if (vp->v_flag & VOBJREF)
+ vp->v_flag |= VOBJREF;
+ ++vp->v_usecount;
+ simple_unlock(&vp->v_interlock);
+ }
}
/*
@@ -271,19 +267,51 @@ vm_object_deallocate(object)
vm_object_t object;
{
vm_object_t temp;
+ struct vnode *vp;
while (object != NULL) {
- if (object->ref_count == 0)
+ if (object->ref_count == 0) {
panic("vm_object_deallocate: object deallocated too many times");
+ } else if (object->ref_count > 2) {
+ object->ref_count--;
+ return;
+ }
+
+ /*
+ * Here on ref_count of one or two, which are special cases for
+ * objects.
+ */
+ vp = NULL;
+ if (object->type == OBJT_VNODE) {
+ vp = (struct vnode *)object->handle;
+ if (vp->v_flag & VOBJREF) {
+ if (object->ref_count < 2) {
+ panic("vm_object_deallocate: "
+ "not enough references for OBJT_VNODE: %d",
+ object->ref_count);
+ } else {
+
+ /*
+ * Freeze optimized copies.
+ */
+ vm_freeze_copyopts(object, 0, object->size);
+
+ /*
+ * Loose our reference to the vnode.
+ */
+ vp->v_flag &= ~VOBJREF;
+ vrele(vp);
+ }
+ }
+ }
/*
* Lose the reference
*/
- object->ref_count--;
- if (object->ref_count != 0) {
- if ((object->ref_count == 1) &&
- (object->handle == NULL) &&
+ if (object->ref_count == 2) {
+ object->ref_count--;
+ if ((object->handle == NULL) &&
(object->type == OBJT_DEFAULT ||
object->type == OBJT_SWAP)) {
vm_object_t robject;
@@ -328,45 +356,15 @@ vm_object_deallocate(object)
return;
}
- if (object->type == OBJT_VNODE) {
- struct vnode *vp = object->handle;
-
- vp->v_flag &= ~VTEXT;
- }
-
- /*
- * See if this object can persist and has some resident
- * pages. If so, enter it in the cache.
- */
- if (object->flags & OBJ_CANPERSIST) {
- if (object->resident_page_count != 0) {
-#if 0
- vm_object_page_clean(object, 0, 0 ,TRUE, TRUE);
-#endif
- TAILQ_INSERT_TAIL(&vm_object_cached_list, object,
- cached_list);
- vm_object_cached++;
-
- vm_object_cache_trim();
- return;
- } else {
- object->flags &= ~OBJ_CANPERSIST;
- }
- }
-
/*
* Make sure no one uses us.
*/
object->flags |= OBJ_DEAD;
- if (object->type == OBJT_VNODE) {
- struct vnode *vp = object->handle;
- if (vp->v_flag & VVMIO) {
- object->ref_count++;
- vm_freeze_copyopts(object, 0, object->size);
- object->ref_count--;
- }
- }
+ if (vp)
+ vp->v_flag &= ~VTEXT;
+
+ object->ref_count--;
temp = object->backing_object;
if (temp) {
@@ -414,16 +412,8 @@ vm_object_terminate(object)
*/
if (object->type == OBJT_VNODE) {
struct vnode *vp = object->handle;
- struct proc *cp = curproc; /* XXX */
- int waslocked;
-
- waslocked = VOP_ISLOCKED(vp);
- if (!waslocked)
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, cp);
- vm_object_page_clean(object, 0, 0, TRUE, FALSE);
+ vm_object_page_clean(object, 0, 0, TRUE);
vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
- if (!waslocked)
- VOP_UNLOCK(vp, 0, cp);
}
/*
@@ -468,12 +458,11 @@ vm_object_terminate(object)
*/
void
-vm_object_page_clean(object, start, end, syncio, lockflag)
+vm_object_page_clean(object, start, end, syncio)
vm_object_t object;
vm_pindex_t start;
vm_pindex_t end;
boolean_t syncio;
- boolean_t lockflag;
{
register vm_page_t p, np, tp;
register vm_offset_t tstart, tend;
@@ -496,8 +485,6 @@ vm_object_page_clean(object, start, end, syncio, lockflag)
vp = object->handle;
- if (lockflag)
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, pproc);
object->flags |= OBJ_CLEANING;
tstart = start;
@@ -614,8 +601,6 @@ rescan:
VOP_FSYNC(vp, NULL, syncio, curproc);
- if (lockflag)
- VOP_UNLOCK(vp, 0, pproc);
object->flags &= ~OBJ_CLEANING;
return;
}
@@ -644,23 +629,6 @@ vm_object_deactivate_pages(object)
#endif
/*
- * Trim the object cache to size.
- */
-static void
-vm_object_cache_trim()
-{
- register vm_object_t object;
-
- while (vm_object_cached > vm_object_cache_max) {
- object = TAILQ_FIRST(&vm_object_cached_list);
-
- vm_object_reference(object);
- pager_cache(object, FALSE);
- }
-}
-
-
-/*
* vm_object_pmap_copy:
*
* Makes all physical pages in the specified
@@ -1554,8 +1522,6 @@ DB_SHOW_COMMAND(object, vm_object_print_static)
db_printf("offset=0x%x, backing_object=(0x%x)+0x%x\n",
(int) object->paging_offset,
(int) object->backing_object, (int) object->backing_object_offset);
- db_printf("cache: next=%p, prev=%p\n",
- TAILQ_NEXT(object, cached_list), TAILQ_PREV(object, cached_list));
if (!full)
return;
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 305043b8229b..a13a5bf12d5d 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.h,v 1.38 1997/09/21 04:24:24 dyson Exp $
+ * $Id: vm_object.h,v 1.39 1997/12/19 09:03:16 dyson Exp $
*/
/*
@@ -84,7 +84,6 @@ typedef enum obj_type objtype_t;
struct vm_object {
TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
- TAILQ_ENTRY(vm_object) cached_list; /* list of cached (persistent) objects */
TAILQ_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */
TAILQ_ENTRY(vm_object) shadow_list; /* chain of shadow objects */
TAILQ_HEAD(, vm_page) memq; /* list of resident pages */
@@ -142,12 +141,9 @@ struct vm_object {
#define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT))
#ifdef KERNEL
-extern int vm_object_cache_max;
TAILQ_HEAD(object_q, vm_object);
-extern struct object_q vm_object_cached_list; /* list of objects persisting */
-
extern struct object_q vm_object_list; /* list of allocated objects */
/* lock for object list and count */
@@ -170,13 +166,12 @@ vm_object_pip_wakeup(vm_object_t object)
vm_object_t vm_object_allocate __P((objtype_t, vm_size_t));
void _vm_object_allocate __P((objtype_t, vm_size_t, vm_object_t));
-void vm_object_cache_clear __P((void));
boolean_t vm_object_coalesce __P((vm_object_t, vm_pindex_t, vm_size_t, vm_size_t));
void vm_object_collapse __P((vm_object_t));
void vm_object_copy __P((vm_object_t, vm_pindex_t, vm_object_t *, vm_pindex_t *, boolean_t *));
void vm_object_deallocate __P((vm_object_t));
void vm_object_init __P((void));
-void vm_object_page_clean __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t, boolean_t));
+void vm_object_page_clean __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t));
void vm_object_page_remove __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t));
void vm_object_pmap_copy __P((vm_object_t, vm_pindex_t, vm_pindex_t));
void vm_object_pmap_copy_1 __P((vm_object_t, vm_pindex_t, vm_pindex_t));
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 5571b7083038..3b365264ac1e 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.82 1997/10/10 18:18:47 phk Exp $
+ * $Id: vm_page.c,v 1.83 1997/11/06 08:35:50 dyson Exp $
*/
/*
@@ -73,6 +73,7 @@
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/vmmeter.h>
+#include <sys/vnode.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@@ -1357,7 +1358,9 @@ again1:
vm_page_test_dirty(m);
if (m->dirty) {
if (m->object->type == OBJT_VNODE) {
- vm_object_page_clean(m->object, 0, 0, TRUE, TRUE);
+ vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
+ vm_object_page_clean(m->object, 0, 0, TRUE);
+ VOP_UNLOCK(m->object->handle, 0, curproc);
goto again1;
} else if (m->object->type == OBJT_SWAP ||
m->object->type == OBJT_DEFAULT) {
@@ -1389,7 +1392,9 @@ again1:
vm_page_test_dirty(m);
if (m->dirty) {
if (m->object->type == OBJT_VNODE) {
- vm_object_page_clean(m->object, 0, 0, TRUE, TRUE);
+ vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
+ vm_object_page_clean(m->object, 0, 0, TRUE);
+ VOP_UNLOCK(m->object->handle, 0, curproc);
goto again1;
} else if (m->object->type == OBJT_SWAP ||
m->object->type == OBJT_DEFAULT) {
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 51b04ac138a8..99ee5a424ae6 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.103 1997/12/06 02:23:33 dyson Exp $
+ * $Id: vm_pageout.c,v 1.104 1997/12/24 15:05:25 dyson Exp $
*/
/*
@@ -1323,23 +1323,6 @@ vm_daemon()
(vm_pindex_t)(limit >> PAGE_SHIFT) );
}
}
-
- /*
- * we remove cached objects that have no RSS...
- */
-restart:
- object = TAILQ_FIRST(&vm_object_cached_list);
- while (object) {
- /*
- * if there are no resident pages -- get rid of the object
- */
- if (object->resident_page_count == 0) {
- vm_object_reference(object);
- pager_cache(object, FALSE);
- goto restart;
- }
- object = TAILQ_NEXT(object, cached_list);
- }
}
}
#endif
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index bfc2cc13bccd..44a3bc0fd802 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pager.c,v 1.29 1997/09/01 03:17:28 bde Exp $
+ * $Id: vm_pager.c,v 1.30 1997/10/12 20:26:31 phk Exp $
*/
/*
@@ -252,28 +252,6 @@ vm_pager_object_lookup(pg_list, handle)
}
/*
- * This routine loses a reference to the object -
- * thus a reference must be gained before calling.
- */
-int
-pager_cache(object, should_cache)
- vm_object_t object;
- boolean_t should_cache;
-{
- if (object == NULL)
- return (KERN_INVALID_ARGUMENT);
-
- if (should_cache)
- object->flags |= OBJ_CANPERSIST;
- else
- object->flags &= ~OBJ_CANPERSIST;
-
- vm_object_deallocate(object);
-
- return (KERN_SUCCESS);
-}
-
-/*
* initialize a physical buffer
*/
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 8ea272888bc4..360188a5a867 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
- * $Id: vnode_pager.c,v 1.76 1997/12/02 21:07:20 phk Exp $
+ * $Id: vnode_pager.c,v 1.77 1997/12/19 09:03:17 dyson Exp $
*/
/*
@@ -132,6 +132,9 @@ vnode_pager_alloc(void *handle, vm_size_t size, vm_prot_t prot,
tsleep(object, PVM, "vadead", 0);
}
+ if (vp->v_usecount == 0)
+ panic("vnode_pager_alloc: no vnode reference");
+
if (object == NULL) {
/*
* And an object of the appropriate size
@@ -142,12 +145,6 @@ vnode_pager_alloc(void *handle, vm_size_t size, vm_prot_t prot,
else
object->flags = 0;
- if (vp->v_usecount == 0)
- panic("vnode_pager_alloc: no vnode reference");
- /*
- * Hold a reference to the vnode and initialize object data.
- */
- vp->v_usecount++;
object->un_pager.vnp.vnp_size = (vm_ooffset_t) size * PAGE_SIZE;
object->handle = handle;
@@ -193,7 +190,6 @@ vnode_pager_dealloc(object)
vp->v_object = NULL;
vp->v_flag &= ~(VTEXT | VVMIO);
- vrele(vp);
}
static boolean_t
@@ -321,75 +317,6 @@ vnode_pager_setsize(vp, nsize)
}
void
-vnode_pager_umount(mp)
- register struct mount *mp;
-{
- struct proc *p = curproc; /* XXX */
- struct vnode *vp, *nvp;
-
-loop:
- for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
- /*
- * Vnode can be reclaimed by getnewvnode() while we
- * traverse the list.
- */
- if (vp->v_mount != mp)
- goto loop;
-
- /*
- * Save the next pointer now since uncaching may terminate the
- * object and render vnode invalid
- */
- nvp = vp->v_mntvnodes.le_next;
-
- if (vp->v_object != NULL) {
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- vnode_pager_uncache(vp, p);
- VOP_UNLOCK(vp, 0, p);
- }
- }
-}
-
-/*
- * Remove vnode associated object from the object cache.
- * This routine must be called with the vnode locked.
- *
- * XXX unlock the vnode.
- * We must do this since uncaching the object may result in its
- * destruction which may initiate paging activity which may necessitate
- * re-locking the vnode.
- */
-void
-vnode_pager_uncache(vp, p)
- struct vnode *vp;
- struct proc *p;
-{
- vm_object_t object;
-
- /*
- * Not a mapped vnode
- */
- object = vp->v_object;
- if (object == NULL)
- return;
-
- vm_object_reference(object);
- vm_freeze_copyopts(object, 0, object->size);
-
- /*
- * XXX We really should handle locking on
- * VBLK devices...
- */
- if (vp->v_type != VBLK)
- VOP_UNLOCK(vp, 0, p);
- pager_cache(object, FALSE);
- if (vp->v_type != VBLK)
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- return;
-}
-
-
-void
vnode_pager_freepage(m)
vm_page_t m;
{