diff options
author | John Dyson <dyson@FreeBSD.org> | 1997-12-29 00:25:11 +0000 |
---|---|---|
committer | John Dyson <dyson@FreeBSD.org> | 1997-12-29 00:25:11 +0000 |
commit | 2be70f79f6dcc03377819b327700531ce5455896 (patch) | |
tree | e16c806fdb19ecadb7a3d2c2fd2ffb344ef606f3 | |
parent | d0cc10a88b4696dafc997d9a2acef1ef25ab1def (diff) | |
download | src-2be70f79f6dcc03377819b327700531ce5455896.tar.gz src-2be70f79f6dcc03377819b327700531ce5455896.zip |
Lots of improvements, including restructring the caching and management
of vnodes and objects. There are some metadata performance improvements
that come along with this. There are also a few prototypes added when
the need is noticed. Changes include:
1) Cleaning up vref, vget.
2) Removal of the object cache.
3) Nuke vnode_pager_uncache and friends, because they aren't needed anymore.
4) Correct some missing LK_RETRY's in vn_lock.
5) Correct the page range in the code for msync.
Be gentle, and please give me feedback asap.
Notes
Notes:
svn path=/head/; revision=32071
-rw-r--r-- | sys/fs/specfs/spec_vnops.c | 13 | ||||
-rw-r--r-- | sys/kern/kern_synch.c | 4 | ||||
-rw-r--r-- | sys/kern/vfs_export.c | 173 | ||||
-rw-r--r-- | sys/kern/vfs_extattr.c | 10 | ||||
-rw-r--r-- | sys/kern/vfs_lookup.c | 8 | ||||
-rw-r--r-- | sys/kern/vfs_subr.c | 173 | ||||
-rw-r--r-- | sys/kern/vfs_syscalls.c | 10 | ||||
-rw-r--r-- | sys/kern/vfs_vnops.c | 6 | ||||
-rw-r--r-- | sys/miscfs/specfs/spec_vnops.c | 13 | ||||
-rw-r--r-- | sys/nfs/nfs_serv.c | 4 | ||||
-rw-r--r-- | sys/nfs/nfs_vnops.c | 3 | ||||
-rw-r--r-- | sys/nfsclient/nfs_vnops.c | 3 | ||||
-rw-r--r-- | sys/nfsserver/nfs_serv.c | 4 | ||||
-rw-r--r-- | sys/sys/vnode.h | 11 | ||||
-rw-r--r-- | sys/ufs/ffs/ffs_vfsops.c | 6 | ||||
-rw-r--r-- | sys/vm/vm_map.c | 14 | ||||
-rw-r--r-- | sys/vm/vm_object.c | 142 | ||||
-rw-r--r-- | sys/vm/vm_object.h | 9 | ||||
-rw-r--r-- | sys/vm/vm_page.c | 11 | ||||
-rw-r--r-- | sys/vm/vm_pageout.c | 19 | ||||
-rw-r--r-- | sys/vm/vm_pager.c | 24 | ||||
-rw-r--r-- | sys/vm/vnode_pager.c | 81 |
22 files changed, 265 insertions, 476 deletions
diff --git a/sys/fs/specfs/spec_vnops.c b/sys/fs/specfs/spec_vnops.c index b771aa8a4073..6da09a60ec2c 100644 --- a/sys/fs/specfs/spec_vnops.c +++ b/sys/fs/specfs/spec_vnops.c @@ -31,7 +31,7 @@ * SUCH DAMAGE. * * @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95 - * $Id: spec_vnops.c,v 1.50 1997/10/26 20:55:24 phk Exp $ + * $Id: spec_vnops.c,v 1.51 1997/10/27 13:33:42 bde Exp $ */ #include <sys/param.h> @@ -640,16 +640,9 @@ spec_close(ap) * sum of the reference counts on all the aliased * vnodes descends to one, we are on last close. */ - if ((vcount(vp) > (vp->v_object?2:1)) && - (vp->v_flag & VXLOCK) == 0) + if ((vcount(vp) > 1) && (vp->v_flag & VXLOCK) == 0) return (0); - if (vp->v_object) { - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); - vnode_pager_uncache(vp, p); - VOP_UNLOCK(vp, 0, p); - } - devclose = bdevsw[major(dev)]->d_close; mode = S_IFBLK; break; @@ -796,7 +789,7 @@ spec_getpages(ap) /* We definitely need to be at splbio here. */ while ((bp->b_flags & B_DONE) == 0) - tsleep(bp, PVM, "vnread", 0); + tsleep(bp, PVM, "spread", 0); splx(s); diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index c09efb1bedf1..6ecab6fdd23d 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -36,7 +36,7 @@ * SUCH DAMAGE. * * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 - * $Id: kern_synch.c,v 1.41 1997/11/22 08:35:38 bde Exp $ + * $Id: kern_synch.c,v 1.42 1997/11/25 07:07:44 julian Exp $ */ #include "opt_ktrace.h" @@ -66,6 +66,8 @@ int lbolt; /* once a second sleep address */ static void endtsleep __P((void *)); static void updatepri __P((struct proc *p)); +static void roundrobin __P((void *arg)); +static void schedcpu __P((void *arg)); #define MAXIMUM_SCHEDULE_QUANTUM (1000000) /* arbitrary limit */ #ifndef DEFAULT_SCHEDULE_QUANTUM diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c index c2e536934d9a..f39c17d712dd 100644 --- a/sys/kern/vfs_export.c +++ b/sys/kern/vfs_export.c @@ -36,7 +36,7 @@ * SUCH DAMAGE. * * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 - * $Id: vfs_subr.c,v 1.115 1997/12/15 03:09:32 wollman Exp $ + * $Id: vfs_subr.c,v 1.116 1997/12/19 09:03:28 dyson Exp $ */ /* @@ -57,6 +57,7 @@ #include <sys/poll.h> #include <sys/domain.h> #include <sys/dirent.h> +#include <sys/vmmeter.h> #include <machine/limits.h> @@ -130,7 +131,7 @@ void vntblinit() { - desiredvnodes = maxproc + vm_object_cache_max; + desiredvnodes = maxproc + cnt.v_page_count / 4; simple_lock_init(&mntvnode_slock); simple_lock_init(&mntid_slock); simple_lock_init(&spechash_slock); @@ -394,9 +395,9 @@ getnewvnode(tag, mp, vops, vpp) simple_unlock(&vnode_free_list_slock); cache_purge(vp); vp->v_lease = NULL; - if (vp->v_type != VBAD) + if (vp->v_type != VBAD) { vgonel(vp, p); - else { + } else { simple_unlock(&vp->v_interlock); } @@ -588,8 +589,10 @@ bgetvp(vp, bp) { int s; +#if defined(DIAGNOSTIC) if (bp->b_vp) panic("bgetvp: not free"); +#endif vhold(vp); bp->b_vp = vp; if (vp->v_type == VBLK || vp->v_type == VCHR) @@ -614,8 +617,11 @@ brelvp(bp) struct vnode *vp; int s; +#if defined(DIAGNOSTIC) if (bp->b_vp == (struct vnode *) 0) panic("brelvp: NULL"); +#endif + /* * Delete from old vnode list, if on one. */ @@ -846,19 +852,18 @@ vget(vp, flags, p) tsleep((caddr_t)vp, PINOD, "vget", 0); return (ENOENT); } + vp->v_usecount++; + if (VSHOULDBUSY(vp)) vbusy(vp); /* * Create the VM object, if needed */ - if ((vp->v_type == VREG) && + if (((vp->v_type == VREG) || (vp->v_type == VBLK)) && ((vp->v_object == NULL) || (vp->v_object->flags & OBJ_VFS_REF) == 0 || (vp->v_object->flags & OBJ_DEAD))) { - /* - * XXX vfs_object_create probably needs the interlock. - */ simple_unlock(&vp->v_interlock); vfs_object_create(vp, curproc, curproc->p_ucred, 0); simple_lock(&vp->v_interlock); @@ -871,119 +876,88 @@ vget(vp, flags, p) simple_unlock(&vp->v_interlock); return (0); } -/* #ifdef DIAGNOSTIC */ + /* - * Vnode reference, just increment the count + * Vnode put/release. + * If count drops to zero, call inactive routine and return to freelist. */ void -vref(vp) +vrele(vp) struct vnode *vp; { + struct proc *p = curproc; /* XXX */ + +#ifdef DIAGNOSTIC + if (vp == NULL) + panic("vrele: null vp"); +#endif simple_lock(&vp->v_interlock); - if (vp->v_usecount <= 0) - panic("vref used where vget required"); - vp->v_usecount++; + if (vp->v_usecount > 1) { - if ((vp->v_type == VREG) && - ((vp->v_object == NULL) || - ((vp->v_object->flags & OBJ_VFS_REF) == 0) || - (vp->v_object->flags & OBJ_DEAD))) { - /* - * We need to lock to VP during the time that - * the object is created. This is necessary to - * keep the system from re-entrantly doing it - * multiple times. - * XXX vfs_object_create probably needs the interlock? - */ + vp->v_usecount--; simple_unlock(&vp->v_interlock); - vfs_object_create(vp, curproc, curproc->p_ucred, 0); - return; + + } else if (vp->v_usecount == 1) { + + vp->v_usecount--; + + if (VSHOULDFREE(vp)) + vfree(vp); + /* + * If we are doing a vput, the node is already locked, and we must + * call VOP_INACTIVE with the node locked. So, in the case of + * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. + */ + if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) { + VOP_INACTIVE(vp, p); + } + + } else { +#ifdef DIAGNOSTIC + vprint("vrele: negative ref count", vp); +#endif + panic("vrele: negative ref cnt"); } - simple_unlock(&vp->v_interlock); } -/* - * Vnode put/release. - * If count drops to zero, call inactive routine and return to freelist. - */ -static void -vputrele(vp, put) +void +vput(vp) struct vnode *vp; - int put; { struct proc *p = curproc; /* XXX */ #ifdef DIAGNOSTIC if (vp == NULL) - panic("vputrele: null vp"); + panic("vput: null vp"); #endif simple_lock(&vp->v_interlock); - if ((vp->v_usecount == 2) && - vp->v_object && - (vp->v_object->flags & OBJ_VFS_REF)) { + if (vp->v_usecount > 1) { - vm_freeze_copyopts(vp->v_object, 0, vp->v_object->size); vp->v_usecount--; - vp->v_object->flags &= ~OBJ_VFS_REF; - if (put) { - VOP_UNLOCK(vp, LK_INTERLOCK, p); - } else { - simple_unlock(&vp->v_interlock); - } - vm_object_deallocate(vp->v_object); - return; - } + VOP_UNLOCK(vp, LK_INTERLOCK, p); - if (vp->v_usecount > 1) { - vp->v_usecount--; - if (put) { - VOP_UNLOCK(vp, LK_INTERLOCK, p); - } else { - simple_unlock(&vp->v_interlock); - } - return; - } + } else if (vp->v_usecount == 1) { - if (vp->v_usecount < 1) { -#ifdef DIAGNOSTIC - vprint("vputrele: negative ref count", vp); -#endif - panic("vputrele: negative ref cnt"); - } + vp->v_usecount--; - vp->v_usecount--; - if (VSHOULDFREE(vp)) - vfree(vp); + if (VSHOULDFREE(vp)) + vfree(vp); /* * If we are doing a vput, the node is already locked, and we must * call VOP_INACTIVE with the node locked. So, in the case of * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. */ - if (put) { simple_unlock(&vp->v_interlock); VOP_INACTIVE(vp, p); - } else if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) { - VOP_INACTIVE(vp, p); - } -} -/* - * vput(), just unlock and vrele() - */ -void -vput(vp) - struct vnode *vp; -{ - vputrele(vp, 1); -} - -void -vrele(vp) - struct vnode *vp; -{ - vputrele(vp, 0); + } else { +#ifdef DIAGNOSTIC + vprint("vput: negative ref count", vp); +#endif + panic("vput: negative ref cnt"); + } } /* @@ -1152,15 +1126,6 @@ vclean(vp, flags, p) VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p); object = vp->v_object; - irefed = 0; - if (object && ((object->flags & OBJ_DEAD) == 0)) { - if (object->ref_count == 0) { - vm_object_reference(object); - irefed = 1; - } - ++object->ref_count; - pager_cache(object, FALSE); - } /* * Clean out any buffers associated with the vnode. @@ -1168,7 +1133,8 @@ vclean(vp, flags, p) if (flags & DOCLOSE) vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); - if (irefed) { + if (vp->v_object && (vp->v_object->flags & OBJ_VFS_REF)) { + vp->v_object->flags &= ~OBJ_VFS_REF; vm_object_deallocate(object); } @@ -1352,6 +1318,7 @@ vgonel(vp, p) * Clean out the filesystem specific data. */ vclean(vp, DOCLOSE, p); + /* * Delete from old mount point vnode list, if on one. */ @@ -2031,7 +1998,9 @@ loop: continue; if (vp->v_object && (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) { - vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc); + vm_object_page_clean(vp->v_object, 0, 0, TRUE); + VOP_UNLOCK(vp, 0, curproc); } } } @@ -2053,6 +2022,9 @@ vfs_object_create(vp, p, cred, waslocked) vm_object_t object; int error = 0; + if ((vp->v_type != VREG) && (vp->v_type != VBLK)) + return 0; + retry: if ((object = vp->v_object) == NULL) { if (vp->v_type == VREG) { @@ -2060,6 +2032,7 @@ retry: goto retn; (void) vnode_pager_alloc(vp, OFF_TO_IDX(round_page(vat.va_size)), 0, 0); + vp->v_object->flags |= OBJ_VFS_REF; } else { /* * This simply allocates the biggest object possible @@ -2067,8 +2040,8 @@ retry: * cause any problems (yet). */ (void) vnode_pager_alloc(vp, INT_MAX, 0, 0); + vp->v_object->flags |= OBJ_VFS_REF; } - vp->v_object->flags |= OBJ_VFS_REF; } else { if (object->flags & OBJ_DEAD) { if (waslocked) @@ -2079,8 +2052,8 @@ retry: goto retry; } if ((object->flags & OBJ_VFS_REF) == 0) { - object->flags |= OBJ_VFS_REF; vm_object_reference(object); + object->flags |= OBJ_VFS_REF; } } if (vp->v_object) diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c index 6d71a6c447a7..69751c4aff51 100644 --- a/sys/kern/vfs_extattr.c +++ b/sys/kern/vfs_extattr.c @@ -36,7 +36,7 @@ * SUCH DAMAGE. * * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94 - * $Id: vfs_syscalls.c,v 1.86 1997/12/16 17:40:31 eivind Exp $ + * $Id: vfs_syscalls.c,v 1.87 1997/12/27 02:56:23 bde Exp $ */ /* For 4.3 integer FS ID compatibility */ @@ -430,7 +430,6 @@ dounmount(mp, flags, p) mp->mnt_flag &=~ MNT_ASYNC; vfs_msync(mp, MNT_NOWAIT); - vnode_pager_umount(mp); /* release cached vnodes */ cache_purgevfs(mp); /* remove cache entries for this file sys */ if (((mp->mnt_flag & MNT_RDONLY) || (error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p)) == 0) || @@ -1263,8 +1262,6 @@ unlink(p, uap) */ if (vp->v_flag & VROOT) error = EBUSY; - else - (void) vnode_pager_uncache(vp, p); } if (!error) { @@ -2160,9 +2157,9 @@ fsync(p, uap) if (error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) return (error); vp = (struct vnode *)fp->f_data; - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + vn_lock(vp, LK_EXCLUSIVE, p); if (vp->v_object) { - vm_object_page_clean(vp->v_object, 0, 0 ,0, FALSE); + vm_object_page_clean(vp->v_object, 0, 0 ,0); } error = VOP_FSYNC(vp, fp->f_cred, (vp->v_mount && (vp->v_mount->mnt_flag & MNT_ASYNC)) ? @@ -2242,7 +2239,6 @@ out: VOP_LEASE(fromnd.ni_dvp, p, p->p_ucred, LEASE_WRITE); if (tvp) { VOP_LEASE(tvp, p, p->p_ucred, LEASE_WRITE); - (void) vnode_pager_uncache(tvp, p); } error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd, tond.ni_dvp, tond.ni_vp, &tond.ni_cnd); diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c index fcfd1da21e39..8ceedd4de7e1 100644 --- a/sys/kern/vfs_lookup.c +++ b/sys/kern/vfs_lookup.c @@ -36,7 +36,7 @@ * SUCH DAMAGE. * * @(#)vfs_lookup.c 8.4 (Berkeley) 2/16/94 - * $Id: vfs_lookup.c,v 1.20 1997/09/21 04:23:01 dyson Exp $ + * $Id: vfs_lookup.c,v 1.21 1997/12/27 02:56:22 bde Exp $ */ #include "opt_ktrace.h" @@ -537,6 +537,9 @@ nextname: } if (!wantparent) vrele(ndp->ni_dvp); + + vfs_object_create(dp, ndp->ni_cnd.cn_proc, ndp->ni_cnd.cn_cred, 1); + if ((cnp->cn_flags & LOCKLEAF) == 0) VOP_UNLOCK(dp, 0, p); return (0); @@ -683,6 +686,9 @@ relookup(dvp, vpp, cnp) if (!wantparent) vrele(dvp); + + vfs_object_create(dp, cnp->cn_proc, cnp->cn_cred, 1); + if ((cnp->cn_flags & LOCKLEAF) == 0) VOP_UNLOCK(dp, 0, p); return (0); diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index c2e536934d9a..f39c17d712dd 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -36,7 +36,7 @@ * SUCH DAMAGE. * * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 - * $Id: vfs_subr.c,v 1.115 1997/12/15 03:09:32 wollman Exp $ + * $Id: vfs_subr.c,v 1.116 1997/12/19 09:03:28 dyson Exp $ */ /* @@ -57,6 +57,7 @@ #include <sys/poll.h> #include <sys/domain.h> #include <sys/dirent.h> +#include <sys/vmmeter.h> #include <machine/limits.h> @@ -130,7 +131,7 @@ void vntblinit() { - desiredvnodes = maxproc + vm_object_cache_max; + desiredvnodes = maxproc + cnt.v_page_count / 4; simple_lock_init(&mntvnode_slock); simple_lock_init(&mntid_slock); simple_lock_init(&spechash_slock); @@ -394,9 +395,9 @@ getnewvnode(tag, mp, vops, vpp) simple_unlock(&vnode_free_list_slock); cache_purge(vp); vp->v_lease = NULL; - if (vp->v_type != VBAD) + if (vp->v_type != VBAD) { vgonel(vp, p); - else { + } else { simple_unlock(&vp->v_interlock); } @@ -588,8 +589,10 @@ bgetvp(vp, bp) { int s; +#if defined(DIAGNOSTIC) if (bp->b_vp) panic("bgetvp: not free"); +#endif vhold(vp); bp->b_vp = vp; if (vp->v_type == VBLK || vp->v_type == VCHR) @@ -614,8 +617,11 @@ brelvp(bp) struct vnode *vp; int s; +#if defined(DIAGNOSTIC) if (bp->b_vp == (struct vnode *) 0) panic("brelvp: NULL"); +#endif + /* * Delete from old vnode list, if on one. */ @@ -846,19 +852,18 @@ vget(vp, flags, p) tsleep((caddr_t)vp, PINOD, "vget", 0); return (ENOENT); } + vp->v_usecount++; + if (VSHOULDBUSY(vp)) vbusy(vp); /* * Create the VM object, if needed */ - if ((vp->v_type == VREG) && + if (((vp->v_type == VREG) || (vp->v_type == VBLK)) && ((vp->v_object == NULL) || (vp->v_object->flags & OBJ_VFS_REF) == 0 || (vp->v_object->flags & OBJ_DEAD))) { - /* - * XXX vfs_object_create probably needs the interlock. - */ simple_unlock(&vp->v_interlock); vfs_object_create(vp, curproc, curproc->p_ucred, 0); simple_lock(&vp->v_interlock); @@ -871,119 +876,88 @@ vget(vp, flags, p) simple_unlock(&vp->v_interlock); return (0); } -/* #ifdef DIAGNOSTIC */ + /* - * Vnode reference, just increment the count + * Vnode put/release. + * If count drops to zero, call inactive routine and return to freelist. */ void -vref(vp) +vrele(vp) struct vnode *vp; { + struct proc *p = curproc; /* XXX */ + +#ifdef DIAGNOSTIC + if (vp == NULL) + panic("vrele: null vp"); +#endif simple_lock(&vp->v_interlock); - if (vp->v_usecount <= 0) - panic("vref used where vget required"); - vp->v_usecount++; + if (vp->v_usecount > 1) { - if ((vp->v_type == VREG) && - ((vp->v_object == NULL) || - ((vp->v_object->flags & OBJ_VFS_REF) == 0) || - (vp->v_object->flags & OBJ_DEAD))) { - /* - * We need to lock to VP during the time that - * the object is created. This is necessary to - * keep the system from re-entrantly doing it - * multiple times. - * XXX vfs_object_create probably needs the interlock? - */ + vp->v_usecount--; simple_unlock(&vp->v_interlock); - vfs_object_create(vp, curproc, curproc->p_ucred, 0); - return; + + } else if (vp->v_usecount == 1) { + + vp->v_usecount--; + + if (VSHOULDFREE(vp)) + vfree(vp); + /* + * If we are doing a vput, the node is already locked, and we must + * call VOP_INACTIVE with the node locked. So, in the case of + * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. + */ + if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) { + VOP_INACTIVE(vp, p); + } + + } else { +#ifdef DIAGNOSTIC + vprint("vrele: negative ref count", vp); +#endif + panic("vrele: negative ref cnt"); } - simple_unlock(&vp->v_interlock); } -/* - * Vnode put/release. - * If count drops to zero, call inactive routine and return to freelist. - */ -static void -vputrele(vp, put) +void +vput(vp) struct vnode *vp; - int put; { struct proc *p = curproc; /* XXX */ #ifdef DIAGNOSTIC if (vp == NULL) - panic("vputrele: null vp"); + panic("vput: null vp"); #endif simple_lock(&vp->v_interlock); - if ((vp->v_usecount == 2) && - vp->v_object && - (vp->v_object->flags & OBJ_VFS_REF)) { + if (vp->v_usecount > 1) { - vm_freeze_copyopts(vp->v_object, 0, vp->v_object->size); vp->v_usecount--; - vp->v_object->flags &= ~OBJ_VFS_REF; - if (put) { - VOP_UNLOCK(vp, LK_INTERLOCK, p); - } else { - simple_unlock(&vp->v_interlock); - } - vm_object_deallocate(vp->v_object); - return; - } + VOP_UNLOCK(vp, LK_INTERLOCK, p); - if (vp->v_usecount > 1) { - vp->v_usecount--; - if (put) { - VOP_UNLOCK(vp, LK_INTERLOCK, p); - } else { - simple_unlock(&vp->v_interlock); - } - return; - } + } else if (vp->v_usecount == 1) { - if (vp->v_usecount < 1) { -#ifdef DIAGNOSTIC - vprint("vputrele: negative ref count", vp); -#endif - panic("vputrele: negative ref cnt"); - } + vp->v_usecount--; - vp->v_usecount--; - if (VSHOULDFREE(vp)) - vfree(vp); + if (VSHOULDFREE(vp)) + vfree(vp); /* * If we are doing a vput, the node is already locked, and we must * call VOP_INACTIVE with the node locked. So, in the case of * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. */ - if (put) { simple_unlock(&vp->v_interlock); VOP_INACTIVE(vp, p); - } else if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) { - VOP_INACTIVE(vp, p); - } -} -/* - * vput(), just unlock and vrele() - */ -void -vput(vp) - struct vnode *vp; -{ - vputrele(vp, 1); -} - -void -vrele(vp) - struct vnode *vp; -{ - vputrele(vp, 0); + } else { +#ifdef DIAGNOSTIC + vprint("vput: negative ref count", vp); +#endif + panic("vput: negative ref cnt"); + } } /* @@ -1152,15 +1126,6 @@ vclean(vp, flags, p) VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p); object = vp->v_object; - irefed = 0; - if (object && ((object->flags & OBJ_DEAD) == 0)) { - if (object->ref_count == 0) { - vm_object_reference(object); - irefed = 1; - } - ++object->ref_count; - pager_cache(object, FALSE); - } /* * Clean out any buffers associated with the vnode. @@ -1168,7 +1133,8 @@ vclean(vp, flags, p) if (flags & DOCLOSE) vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); - if (irefed) { + if (vp->v_object && (vp->v_object->flags & OBJ_VFS_REF)) { + vp->v_object->flags &= ~OBJ_VFS_REF; vm_object_deallocate(object); } @@ -1352,6 +1318,7 @@ vgonel(vp, p) * Clean out the filesystem specific data. */ vclean(vp, DOCLOSE, p); + /* * Delete from old mount point vnode list, if on one. */ @@ -2031,7 +1998,9 @@ loop: continue; if (vp->v_object && (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) { - vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc); + vm_object_page_clean(vp->v_object, 0, 0, TRUE); + VOP_UNLOCK(vp, 0, curproc); } } } @@ -2053,6 +2022,9 @@ vfs_object_create(vp, p, cred, waslocked) vm_object_t object; int error = 0; + if ((vp->v_type != VREG) && (vp->v_type != VBLK)) + return 0; + retry: if ((object = vp->v_object) == NULL) { if (vp->v_type == VREG) { @@ -2060,6 +2032,7 @@ retry: goto retn; (void) vnode_pager_alloc(vp, OFF_TO_IDX(round_page(vat.va_size)), 0, 0); + vp->v_object->flags |= OBJ_VFS_REF; } else { /* * This simply allocates the biggest object possible @@ -2067,8 +2040,8 @@ retry: * cause any problems (yet). */ (void) vnode_pager_alloc(vp, INT_MAX, 0, 0); + vp->v_object->flags |= OBJ_VFS_REF; } - vp->v_object->flags |= OBJ_VFS_REF; } else { if (object->flags & OBJ_DEAD) { if (waslocked) @@ -2079,8 +2052,8 @@ retry: goto retry; } if ((object->flags & OBJ_VFS_REF) == 0) { - object->flags |= OBJ_VFS_REF; vm_object_reference(object); + object->flags |= OBJ_VFS_REF; } } if (vp->v_object) diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c index 6d71a6c447a7..69751c4aff51 100644 --- a/sys/kern/vfs_syscalls.c +++ b/sys/kern/vfs_syscalls.c @@ -36,7 +36,7 @@ * SUCH DAMAGE. * * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94 - * $Id: vfs_syscalls.c,v 1.86 1997/12/16 17:40:31 eivind Exp $ + * $Id: vfs_syscalls.c,v 1.87 1997/12/27 02:56:23 bde Exp $ */ /* For 4.3 integer FS ID compatibility */ @@ -430,7 +430,6 @@ dounmount(mp, flags, p) mp->mnt_flag &=~ MNT_ASYNC; vfs_msync(mp, MNT_NOWAIT); - vnode_pager_umount(mp); /* release cached vnodes */ cache_purgevfs(mp); /* remove cache entries for this file sys */ if (((mp->mnt_flag & MNT_RDONLY) || (error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p)) == 0) || @@ -1263,8 +1262,6 @@ unlink(p, uap) */ if (vp->v_flag & VROOT) error = EBUSY; - else - (void) vnode_pager_uncache(vp, p); } if (!error) { @@ -2160,9 +2157,9 @@ fsync(p, uap) if (error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) return (error); vp = (struct vnode *)fp->f_data; - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + vn_lock(vp, LK_EXCLUSIVE, p); if (vp->v_object) { - vm_object_page_clean(vp->v_object, 0, 0 ,0, FALSE); + vm_object_page_clean(vp->v_object, 0, 0 ,0); } error = VOP_FSYNC(vp, fp->f_cred, (vp->v_mount && (vp->v_mount->mnt_flag & MNT_ASYNC)) ? @@ -2242,7 +2239,6 @@ out: VOP_LEASE(fromnd.ni_dvp, p, p->p_ucred, LEASE_WRITE); if (tvp) { VOP_LEASE(tvp, p, p->p_ucred, LEASE_WRITE); - (void) vnode_pager_uncache(tvp, p); } error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd, tond.ni_dvp, tond.ni_vp, &tond.ni_cnd); diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c index 58dfad550cc3..460bdd7b67c4 100644 --- a/sys/kern/vfs_vnops.c +++ b/sys/kern/vfs_vnops.c @@ -36,7 +36,7 @@ * SUCH DAMAGE. * * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 - * $Id: vfs_vnops.c,v 1.42 1997/11/29 01:33:10 dyson Exp $ + * $Id: vfs_vnops.c,v 1.43 1997/12/06 04:11:11 sef Exp $ */ #include <sys/param.h> @@ -510,7 +510,9 @@ vn_lock(vp, flags, p) if (vp->v_flag & VXLOCK) { vp->v_flag |= VXWANT; simple_unlock(&vp->v_interlock); - tsleep((caddr_t)vp, PINOD, "vn_lock", 0); + if (tsleep((caddr_t)vp, PINOD, "vn_lock", 100*5)) { + vprint("vn_lock:", vp); + } error = ENOENT; } else { error = VOP_LOCK(vp, flags | LK_NOPAUSE | LK_INTERLOCK, p); diff --git a/sys/miscfs/specfs/spec_vnops.c b/sys/miscfs/specfs/spec_vnops.c index b771aa8a4073..6da09a60ec2c 100644 --- a/sys/miscfs/specfs/spec_vnops.c +++ b/sys/miscfs/specfs/spec_vnops.c @@ -31,7 +31,7 @@ * SUCH DAMAGE. * * @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95 - * $Id: spec_vnops.c,v 1.50 1997/10/26 20:55:24 phk Exp $ + * $Id: spec_vnops.c,v 1.51 1997/10/27 13:33:42 bde Exp $ */ #include <sys/param.h> @@ -640,16 +640,9 @@ spec_close(ap) * sum of the reference counts on all the aliased * vnodes descends to one, we are on last close. */ - if ((vcount(vp) > (vp->v_object?2:1)) && - (vp->v_flag & VXLOCK) == 0) + if ((vcount(vp) > 1) && (vp->v_flag & VXLOCK) == 0) return (0); - if (vp->v_object) { - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); - vnode_pager_uncache(vp, p); - VOP_UNLOCK(vp, 0, p); - } - devclose = bdevsw[major(dev)]->d_close; mode = S_IFBLK; break; @@ -796,7 +789,7 @@ spec_getpages(ap) /* We definitely need to be at splbio here. */ while ((bp->b_flags & B_DONE) == 0) - tsleep(bp, PVM, "vnread", 0); + tsleep(bp, PVM, "spread", 0); splx(s); diff --git a/sys/nfs/nfs_serv.c b/sys/nfs/nfs_serv.c index e5cc74535a24..029897b2a42f 100644 --- a/sys/nfs/nfs_serv.c +++ b/sys/nfs/nfs_serv.c @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * @(#)nfs_serv.c 8.3 (Berkeley) 1/12/94 - * $Id: nfs_serv.c,v 1.52 1997/10/28 15:59:05 bde Exp $ + * $Id: nfs_serv.c,v 1.53 1997/12/27 02:56:34 bde Exp $ */ /* @@ -1792,7 +1792,6 @@ nfsrv_remove(nfsd, slp, procp, mrq) } out: if (!error) { - vnode_pager_uncache(vp, procp); nqsrv_getl(nd.ni_dvp, ND_WRITE); nqsrv_getl(vp, ND_WRITE); @@ -1969,7 +1968,6 @@ out: nqsrv_getl(tdvp, ND_WRITE); if (tvp) { nqsrv_getl(tvp, ND_WRITE); - (void) vnode_pager_uncache(tvp, procp); } error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd, tond.ni_dvp, tond.ni_vp, &tond.ni_cnd); diff --git a/sys/nfs/nfs_vnops.c b/sys/nfs/nfs_vnops.c index 6aa27cafc288..cc20b0e659c4 100644 --- a/sys/nfs/nfs_vnops.c +++ b/sys/nfs/nfs_vnops.c @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95 - * $Id: nfs_vnops.c,v 1.72 1997/11/07 09:20:48 phk Exp $ + * $Id: nfs_vnops.c,v 1.73 1997/12/27 02:56:36 bde Exp $ */ @@ -408,7 +408,6 @@ nfs_open(ap) if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1)) == EINTR) return (error); - (void) vnode_pager_uncache(vp, ap->a_p); np->n_brev = np->n_lrev; } } diff --git a/sys/nfsclient/nfs_vnops.c b/sys/nfsclient/nfs_vnops.c index 6aa27cafc288..cc20b0e659c4 100644 --- a/sys/nfsclient/nfs_vnops.c +++ b/sys/nfsclient/nfs_vnops.c @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95 - * $Id: nfs_vnops.c,v 1.72 1997/11/07 09:20:48 phk Exp $ + * $Id: nfs_vnops.c,v 1.73 1997/12/27 02:56:36 bde Exp $ */ @@ -408,7 +408,6 @@ nfs_open(ap) if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1)) == EINTR) return (error); - (void) vnode_pager_uncache(vp, ap->a_p); np->n_brev = np->n_lrev; } } diff --git a/sys/nfsserver/nfs_serv.c b/sys/nfsserver/nfs_serv.c index e5cc74535a24..029897b2a42f 100644 --- a/sys/nfsserver/nfs_serv.c +++ b/sys/nfsserver/nfs_serv.c @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * @(#)nfs_serv.c 8.3 (Berkeley) 1/12/94 - * $Id: nfs_serv.c,v 1.52 1997/10/28 15:59:05 bde Exp $ + * $Id: nfs_serv.c,v 1.53 1997/12/27 02:56:34 bde Exp $ */ /* @@ -1792,7 +1792,6 @@ nfsrv_remove(nfsd, slp, procp, mrq) } out: if (!error) { - vnode_pager_uncache(vp, procp); nqsrv_getl(nd.ni_dvp, ND_WRITE); nqsrv_getl(vp, ND_WRITE); @@ -1969,7 +1968,6 @@ out: nqsrv_getl(tdvp, ND_WRITE); if (tvp) { nqsrv_getl(tvp, ND_WRITE); - (void) vnode_pager_uncache(tvp, procp); } error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd, tond.ni_dvp, tond.ni_vp, &tond.ni_cnd); diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h index fec331aec28a..e6a356a58095 100644 --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -31,7 +31,7 @@ * SUCH DAMAGE. * * @(#)vnode.h 8.7 (Berkeley) 2/4/94 - * $Id: vnode.h,v 1.58 1997/12/05 19:55:49 bde Exp $ + * $Id: vnode.h,v 1.59 1997/12/15 03:09:51 wollman Exp $ */ #ifndef _SYS_VNODE_H_ @@ -153,6 +153,7 @@ struct vnode { #define VOWANT 0x20000 /* a process is waiting for VOLOCK */ #define VDOOMED 0x40000 /* This vnode is being recycled */ #define VFREE 0x80000 /* This vnode is on the freelist */ +#define VOBJREF 0x100000 /* This vnode is referenced by it's object */ /* * Vnode attributes. A field value of VNOVAL represents a field whose value @@ -238,6 +239,12 @@ extern int vttoif_tab[]; #define V_SAVEMETA 0x0002 /* vinvalbuf: leave indirect blocks */ #define REVOKEALL 0x0001 /* vop_revoke: revoke all aliases */ +static __inline void +vref(struct vnode *vp) +{ + vp->v_usecount++; +} + #define VREF(vp) vref(vp) #ifdef DIAGNOSTIC @@ -520,8 +527,8 @@ int vop_null __P((struct vop_generic_args *ap)); struct vnode * checkalias __P((struct vnode *vp, dev_t nvp_rdev, struct mount *mp)); void vput __P((struct vnode *vp)); -void vref __P((struct vnode *vp)); void vrele __P((struct vnode *vp)); +void vrefobj __P((struct vnode *vp)); extern vop_t **default_vnodeop_p; #endif /* KERNEL */ diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c index 4c8f5bb6cbc7..16f77c609c94 100644 --- a/sys/ufs/ffs/ffs_vfsops.c +++ b/sys/ufs/ffs/ffs_vfsops.c @@ -31,7 +31,7 @@ * SUCH DAMAGE. * * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 - * $Id: ffs_vfsops.c,v 1.61 1997/10/16 20:32:35 phk Exp $ + * $Id: ffs_vfsops.c,v 1.62 1997/11/12 05:42:25 julian Exp $ */ #include "opt_quota.h" @@ -727,10 +727,6 @@ ffs_unmount(mp, mntflags, p) } ump->um_devvp->v_specflags &= ~SI_MOUNTEDON; - vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p); - vnode_pager_uncache(ump->um_devvp, p); - VOP_UNLOCK(ump->um_devvp, 0, p); - error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE, NOCRED, p); diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 1fe8cc351641..b20b433b4a2c 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_map.c,v 1.100 1997/12/19 15:31:13 dyson Exp $ + * $Id: vm_map.c,v 1.101 1997/12/25 20:55:15 dyson Exp $ */ /* @@ -1725,15 +1725,19 @@ vm_map_clean(map, start, end, syncio, invalidate) * idea. */ if (current->protection & VM_PROT_WRITE) { - vm_object_page_clean(object, + if (object->type == OBJT_VNODE) + vn_lock(object->handle, LK_EXCLUSIVE, curproc); + vm_object_page_clean(object, OFF_TO_IDX(offset), - OFF_TO_IDX(offset + size), - (syncio||invalidate)?1:0, TRUE); + OFF_TO_IDX(offset + size + PAGE_MASK), + (syncio||invalidate)?1:0); if (invalidate) vm_object_page_remove(object, OFF_TO_IDX(offset), - OFF_TO_IDX(offset + size), + OFF_TO_IDX(offset + size + PAGE_MASK), FALSE); + if (object->type == OBJT_VNODE) + VOP_UNLOCK(object->handle, 0, curproc); } } start += size; diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index 30f26c9c46a2..221d7fd59799 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_object.c,v 1.101 1997/11/18 11:02:19 bde Exp $ + * $Id: vm_object.c,v 1.102 1997/12/19 09:03:14 dyson Exp $ */ /* @@ -95,7 +95,6 @@ static void vm_object_qcollapse __P((vm_object_t object)); static void vm_object_deactivate_pages __P((vm_object_t)); #endif static void vm_object_terminate __P((vm_object_t)); -static void vm_object_cache_trim __P((void)); /* * Virtual memory objects maintain the actual data @@ -123,9 +122,6 @@ static void vm_object_cache_trim __P((void)); * */ -int vm_object_cache_max; -struct object_q vm_object_cached_list; -static int vm_object_cached; /* size of cached list */ struct object_q vm_object_list; struct simplelock vm_object_list_lock; static long vm_object_count; /* count of all objects */ @@ -187,15 +183,10 @@ _vm_object_allocate(type, size, object) void vm_object_init() { - TAILQ_INIT(&vm_object_cached_list); TAILQ_INIT(&vm_object_list); simple_lock_init(&vm_object_list_lock); vm_object_count = 0; - vm_object_cache_max = 84; - if (cnt.v_page_count > 1000) - vm_object_cache_max += (cnt.v_page_count - 1000) / 4; - kernel_object = &kernel_object_store; _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), kernel_object); @@ -245,14 +236,19 @@ vm_object_reference(object) { if (object == NULL) return; - if (object->ref_count == 0) { - if ((object->flags & OBJ_CANPERSIST) == 0) - panic("vm_object_reference: non-persistent object with 0 ref_count"); - TAILQ_REMOVE(&vm_object_cached_list, object, cached_list); - vm_object_cached--; + panic("vm_object_reference: attempting to reference deallocated obj"); } object->ref_count++; + if ((object->type == OBJT_VNODE) && (object->flags & OBJ_VFS_REF)) { + struct vnode *vp; + vp = (struct vnode *)object->handle; + simple_lock(&vp->v_interlock); + if (vp->v_flag & VOBJREF) + vp->v_flag |= VOBJREF; + ++vp->v_usecount; + simple_unlock(&vp->v_interlock); + } } /* @@ -271,19 +267,51 @@ vm_object_deallocate(object) vm_object_t object; { vm_object_t temp; + struct vnode *vp; while (object != NULL) { - if (object->ref_count == 0) + if (object->ref_count == 0) { panic("vm_object_deallocate: object deallocated too many times"); + } else if (object->ref_count > 2) { + object->ref_count--; + return; + } + + /* + * Here on ref_count of one or two, which are special cases for + * objects. + */ + vp = NULL; + if (object->type == OBJT_VNODE) { + vp = (struct vnode *)object->handle; + if (vp->v_flag & VOBJREF) { + if (object->ref_count < 2) { + panic("vm_object_deallocate: " + "not enough references for OBJT_VNODE: %d", + object->ref_count); + } else { + + /* + * Freeze optimized copies. + */ + vm_freeze_copyopts(object, 0, object->size); + + /* + * Loose our reference to the vnode. + */ + vp->v_flag &= ~VOBJREF; + vrele(vp); + } + } + } /* * Lose the reference */ - object->ref_count--; - if (object->ref_count != 0) { - if ((object->ref_count == 1) && - (object->handle == NULL) && + if (object->ref_count == 2) { + object->ref_count--; + if ((object->handle == NULL) && (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { vm_object_t robject; @@ -328,45 +356,15 @@ vm_object_deallocate(object) return; } - if (object->type == OBJT_VNODE) { - struct vnode *vp = object->handle; - - vp->v_flag &= ~VTEXT; - } - - /* - * See if this object can persist and has some resident - * pages. If so, enter it in the cache. - */ - if (object->flags & OBJ_CANPERSIST) { - if (object->resident_page_count != 0) { -#if 0 - vm_object_page_clean(object, 0, 0 ,TRUE, TRUE); -#endif - TAILQ_INSERT_TAIL(&vm_object_cached_list, object, - cached_list); - vm_object_cached++; - - vm_object_cache_trim(); - return; - } else { - object->flags &= ~OBJ_CANPERSIST; - } - } - /* * Make sure no one uses us. */ object->flags |= OBJ_DEAD; - if (object->type == OBJT_VNODE) { - struct vnode *vp = object->handle; - if (vp->v_flag & VVMIO) { - object->ref_count++; - vm_freeze_copyopts(object, 0, object->size); - object->ref_count--; - } - } + if (vp) + vp->v_flag &= ~VTEXT; + + object->ref_count--; temp = object->backing_object; if (temp) { @@ -414,16 +412,8 @@ vm_object_terminate(object) */ if (object->type == OBJT_VNODE) { struct vnode *vp = object->handle; - struct proc *cp = curproc; /* XXX */ - int waslocked; - - waslocked = VOP_ISLOCKED(vp); - if (!waslocked) - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, cp); - vm_object_page_clean(object, 0, 0, TRUE, FALSE); + vm_object_page_clean(object, 0, 0, TRUE); vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); - if (!waslocked) - VOP_UNLOCK(vp, 0, cp); } /* @@ -468,12 +458,11 @@ vm_object_terminate(object) */ void -vm_object_page_clean(object, start, end, syncio, lockflag) +vm_object_page_clean(object, start, end, syncio) vm_object_t object; vm_pindex_t start; vm_pindex_t end; boolean_t syncio; - boolean_t lockflag; { register vm_page_t p, np, tp; register vm_offset_t tstart, tend; @@ -496,8 +485,6 @@ vm_object_page_clean(object, start, end, syncio, lockflag) vp = object->handle; - if (lockflag) - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, pproc); object->flags |= OBJ_CLEANING; tstart = start; @@ -614,8 +601,6 @@ rescan: VOP_FSYNC(vp, NULL, syncio, curproc); - if (lockflag) - VOP_UNLOCK(vp, 0, pproc); object->flags &= ~OBJ_CLEANING; return; } @@ -644,23 +629,6 @@ vm_object_deactivate_pages(object) #endif /* - * Trim the object cache to size. - */ -static void -vm_object_cache_trim() -{ - register vm_object_t object; - - while (vm_object_cached > vm_object_cache_max) { - object = TAILQ_FIRST(&vm_object_cached_list); - - vm_object_reference(object); - pager_cache(object, FALSE); - } -} - - -/* * vm_object_pmap_copy: * * Makes all physical pages in the specified @@ -1554,8 +1522,6 @@ DB_SHOW_COMMAND(object, vm_object_print_static) db_printf("offset=0x%x, backing_object=(0x%x)+0x%x\n", (int) object->paging_offset, (int) object->backing_object, (int) object->backing_object_offset); - db_printf("cache: next=%p, prev=%p\n", - TAILQ_NEXT(object, cached_list), TAILQ_PREV(object, cached_list)); if (!full) return; diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h index 305043b8229b..a13a5bf12d5d 100644 --- a/sys/vm/vm_object.h +++ b/sys/vm/vm_object.h @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_object.h,v 1.38 1997/09/21 04:24:24 dyson Exp $ + * $Id: vm_object.h,v 1.39 1997/12/19 09:03:16 dyson Exp $ */ /* @@ -84,7 +84,6 @@ typedef enum obj_type objtype_t; struct vm_object { TAILQ_ENTRY(vm_object) object_list; /* list of all objects */ - TAILQ_ENTRY(vm_object) cached_list; /* list of cached (persistent) objects */ TAILQ_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */ TAILQ_ENTRY(vm_object) shadow_list; /* chain of shadow objects */ TAILQ_HEAD(, vm_page) memq; /* list of resident pages */ @@ -142,12 +141,9 @@ struct vm_object { #define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT)) #ifdef KERNEL -extern int vm_object_cache_max; TAILQ_HEAD(object_q, vm_object); -extern struct object_q vm_object_cached_list; /* list of objects persisting */ - extern struct object_q vm_object_list; /* list of allocated objects */ /* lock for object list and count */ @@ -170,13 +166,12 @@ vm_object_pip_wakeup(vm_object_t object) vm_object_t vm_object_allocate __P((objtype_t, vm_size_t)); void _vm_object_allocate __P((objtype_t, vm_size_t, vm_object_t)); -void vm_object_cache_clear __P((void)); boolean_t vm_object_coalesce __P((vm_object_t, vm_pindex_t, vm_size_t, vm_size_t)); void vm_object_collapse __P((vm_object_t)); void vm_object_copy __P((vm_object_t, vm_pindex_t, vm_object_t *, vm_pindex_t *, boolean_t *)); void vm_object_deallocate __P((vm_object_t)); void vm_object_init __P((void)); -void vm_object_page_clean __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t, boolean_t)); +void vm_object_page_clean __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t)); void vm_object_page_remove __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t)); void vm_object_pmap_copy __P((vm_object_t, vm_pindex_t, vm_pindex_t)); void vm_object_pmap_copy_1 __P((vm_object_t, vm_pindex_t, vm_pindex_t)); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 5571b7083038..3b365264ac1e 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 - * $Id: vm_page.c,v 1.82 1997/10/10 18:18:47 phk Exp $ + * $Id: vm_page.c,v 1.83 1997/11/06 08:35:50 dyson Exp $ */ /* @@ -73,6 +73,7 @@ #include <sys/malloc.h> #include <sys/proc.h> #include <sys/vmmeter.h> +#include <sys/vnode.h> #include <vm/vm.h> #include <vm/vm_param.h> @@ -1357,7 +1358,9 @@ again1: vm_page_test_dirty(m); if (m->dirty) { if (m->object->type == OBJT_VNODE) { - vm_object_page_clean(m->object, 0, 0, TRUE, TRUE); + vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc); + vm_object_page_clean(m->object, 0, 0, TRUE); + VOP_UNLOCK(m->object->handle, 0, curproc); goto again1; } else if (m->object->type == OBJT_SWAP || m->object->type == OBJT_DEFAULT) { @@ -1389,7 +1392,9 @@ again1: vm_page_test_dirty(m); if (m->dirty) { if (m->object->type == OBJT_VNODE) { - vm_object_page_clean(m->object, 0, 0, TRUE, TRUE); + vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc); + vm_object_page_clean(m->object, 0, 0, TRUE); + VOP_UNLOCK(m->object->handle, 0, curproc); goto again1; } else if (m->object->type == OBJT_SWAP || m->object->type == OBJT_DEFAULT) { diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 51b04ac138a8..99ee5a424ae6 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -65,7 +65,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_pageout.c,v 1.103 1997/12/06 02:23:33 dyson Exp $ + * $Id: vm_pageout.c,v 1.104 1997/12/24 15:05:25 dyson Exp $ */ /* @@ -1323,23 +1323,6 @@ vm_daemon() (vm_pindex_t)(limit >> PAGE_SHIFT) ); } } - - /* - * we remove cached objects that have no RSS... - */ -restart: - object = TAILQ_FIRST(&vm_object_cached_list); - while (object) { - /* - * if there are no resident pages -- get rid of the object - */ - if (object->resident_page_count == 0) { - vm_object_reference(object); - pager_cache(object, FALSE); - goto restart; - } - object = TAILQ_NEXT(object, cached_list); - } } } #endif diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c index bfc2cc13bccd..44a3bc0fd802 100644 --- a/sys/vm/vm_pager.c +++ b/sys/vm/vm_pager.c @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_pager.c,v 1.29 1997/09/01 03:17:28 bde Exp $ + * $Id: vm_pager.c,v 1.30 1997/10/12 20:26:31 phk Exp $ */ /* @@ -252,28 +252,6 @@ vm_pager_object_lookup(pg_list, handle) } /* - * This routine loses a reference to the object - - * thus a reference must be gained before calling. - */ -int -pager_cache(object, should_cache) - vm_object_t object; - boolean_t should_cache; -{ - if (object == NULL) - return (KERN_INVALID_ARGUMENT); - - if (should_cache) - object->flags |= OBJ_CANPERSIST; - else - object->flags &= ~OBJ_CANPERSIST; - - vm_object_deallocate(object); - - return (KERN_SUCCESS); -} - -/* * initialize a physical buffer */ diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index 8ea272888bc4..360188a5a867 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -38,7 +38,7 @@ * SUCH DAMAGE. * * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 - * $Id: vnode_pager.c,v 1.76 1997/12/02 21:07:20 phk Exp $ + * $Id: vnode_pager.c,v 1.77 1997/12/19 09:03:17 dyson Exp $ */ /* @@ -132,6 +132,9 @@ vnode_pager_alloc(void *handle, vm_size_t size, vm_prot_t prot, tsleep(object, PVM, "vadead", 0); } + if (vp->v_usecount == 0) + panic("vnode_pager_alloc: no vnode reference"); + if (object == NULL) { /* * And an object of the appropriate size @@ -142,12 +145,6 @@ vnode_pager_alloc(void *handle, vm_size_t size, vm_prot_t prot, else object->flags = 0; - if (vp->v_usecount == 0) - panic("vnode_pager_alloc: no vnode reference"); - /* - * Hold a reference to the vnode and initialize object data. - */ - vp->v_usecount++; object->un_pager.vnp.vnp_size = (vm_ooffset_t) size * PAGE_SIZE; object->handle = handle; @@ -193,7 +190,6 @@ vnode_pager_dealloc(object) vp->v_object = NULL; vp->v_flag &= ~(VTEXT | VVMIO); - vrele(vp); } static boolean_t @@ -321,75 +317,6 @@ vnode_pager_setsize(vp, nsize) } void -vnode_pager_umount(mp) - register struct mount *mp; -{ - struct proc *p = curproc; /* XXX */ - struct vnode *vp, *nvp; - -loop: - for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { - /* - * Vnode can be reclaimed by getnewvnode() while we - * traverse the list. - */ - if (vp->v_mount != mp) - goto loop; - - /* - * Save the next pointer now since uncaching may terminate the - * object and render vnode invalid - */ - nvp = vp->v_mntvnodes.le_next; - - if (vp->v_object != NULL) { - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); - vnode_pager_uncache(vp, p); - VOP_UNLOCK(vp, 0, p); - } - } -} - -/* - * Remove vnode associated object from the object cache. - * This routine must be called with the vnode locked. - * - * XXX unlock the vnode. - * We must do this since uncaching the object may result in its - * destruction which may initiate paging activity which may necessitate - * re-locking the vnode. - */ -void -vnode_pager_uncache(vp, p) - struct vnode *vp; - struct proc *p; -{ - vm_object_t object; - - /* - * Not a mapped vnode - */ - object = vp->v_object; - if (object == NULL) - return; - - vm_object_reference(object); - vm_freeze_copyopts(object, 0, object->size); - - /* - * XXX We really should handle locking on - * VBLK devices... - */ - if (vp->v_type != VBLK) - VOP_UNLOCK(vp, 0, p); - pager_cache(object, FALSE); - if (vp->v_type != VBLK) - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); - return; -} - - -void vnode_pager_freepage(m) vm_page_t m; { |