aboutsummaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_synch.c4
-rw-r--r--sys/kern/vfs_export.c173
-rw-r--r--sys/kern/vfs_extattr.c10
-rw-r--r--sys/kern/vfs_lookup.c8
-rw-r--r--sys/kern/vfs_subr.c173
-rw-r--r--sys/kern/vfs_syscalls.c10
-rw-r--r--sys/kern/vfs_vnops.c6
7 files changed, 166 insertions, 218 deletions
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index c09efb1bedf1..6ecab6fdd23d 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
- * $Id: kern_synch.c,v 1.41 1997/11/22 08:35:38 bde Exp $
+ * $Id: kern_synch.c,v 1.42 1997/11/25 07:07:44 julian Exp $
*/
#include "opt_ktrace.h"
@@ -66,6 +66,8 @@ int lbolt; /* once a second sleep address */
static void endtsleep __P((void *));
static void updatepri __P((struct proc *p));
+static void roundrobin __P((void *arg));
+static void schedcpu __P((void *arg));
#define MAXIMUM_SCHEDULE_QUANTUM (1000000) /* arbitrary limit */
#ifndef DEFAULT_SCHEDULE_QUANTUM
diff --git a/sys/kern/vfs_export.c b/sys/kern/vfs_export.c
index c2e536934d9a..f39c17d712dd 100644
--- a/sys/kern/vfs_export.c
+++ b/sys/kern/vfs_export.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
- * $Id: vfs_subr.c,v 1.115 1997/12/15 03:09:32 wollman Exp $
+ * $Id: vfs_subr.c,v 1.116 1997/12/19 09:03:28 dyson Exp $
*/
/*
@@ -57,6 +57,7 @@
#include <sys/poll.h>
#include <sys/domain.h>
#include <sys/dirent.h>
+#include <sys/vmmeter.h>
#include <machine/limits.h>
@@ -130,7 +131,7 @@ void
vntblinit()
{
- desiredvnodes = maxproc + vm_object_cache_max;
+ desiredvnodes = maxproc + cnt.v_page_count / 4;
simple_lock_init(&mntvnode_slock);
simple_lock_init(&mntid_slock);
simple_lock_init(&spechash_slock);
@@ -394,9 +395,9 @@ getnewvnode(tag, mp, vops, vpp)
simple_unlock(&vnode_free_list_slock);
cache_purge(vp);
vp->v_lease = NULL;
- if (vp->v_type != VBAD)
+ if (vp->v_type != VBAD) {
vgonel(vp, p);
- else {
+ } else {
simple_unlock(&vp->v_interlock);
}
@@ -588,8 +589,10 @@ bgetvp(vp, bp)
{
int s;
+#if defined(DIAGNOSTIC)
if (bp->b_vp)
panic("bgetvp: not free");
+#endif
vhold(vp);
bp->b_vp = vp;
if (vp->v_type == VBLK || vp->v_type == VCHR)
@@ -614,8 +617,11 @@ brelvp(bp)
struct vnode *vp;
int s;
+#if defined(DIAGNOSTIC)
if (bp->b_vp == (struct vnode *) 0)
panic("brelvp: NULL");
+#endif
+
/*
* Delete from old vnode list, if on one.
*/
@@ -846,19 +852,18 @@ vget(vp, flags, p)
tsleep((caddr_t)vp, PINOD, "vget", 0);
return (ENOENT);
}
+
vp->v_usecount++;
+
if (VSHOULDBUSY(vp))
vbusy(vp);
/*
* Create the VM object, if needed
*/
- if ((vp->v_type == VREG) &&
+ if (((vp->v_type == VREG) || (vp->v_type == VBLK)) &&
((vp->v_object == NULL) ||
(vp->v_object->flags & OBJ_VFS_REF) == 0 ||
(vp->v_object->flags & OBJ_DEAD))) {
- /*
- * XXX vfs_object_create probably needs the interlock.
- */
simple_unlock(&vp->v_interlock);
vfs_object_create(vp, curproc, curproc->p_ucred, 0);
simple_lock(&vp->v_interlock);
@@ -871,119 +876,88 @@ vget(vp, flags, p)
simple_unlock(&vp->v_interlock);
return (0);
}
-/* #ifdef DIAGNOSTIC */
+
/*
- * Vnode reference, just increment the count
+ * Vnode put/release.
+ * If count drops to zero, call inactive routine and return to freelist.
*/
void
-vref(vp)
+vrele(vp)
struct vnode *vp;
{
+ struct proc *p = curproc; /* XXX */
+
+#ifdef DIAGNOSTIC
+ if (vp == NULL)
+ panic("vrele: null vp");
+#endif
simple_lock(&vp->v_interlock);
- if (vp->v_usecount <= 0)
- panic("vref used where vget required");
- vp->v_usecount++;
+ if (vp->v_usecount > 1) {
- if ((vp->v_type == VREG) &&
- ((vp->v_object == NULL) ||
- ((vp->v_object->flags & OBJ_VFS_REF) == 0) ||
- (vp->v_object->flags & OBJ_DEAD))) {
- /*
- * We need to lock to VP during the time that
- * the object is created. This is necessary to
- * keep the system from re-entrantly doing it
- * multiple times.
- * XXX vfs_object_create probably needs the interlock?
- */
+ vp->v_usecount--;
simple_unlock(&vp->v_interlock);
- vfs_object_create(vp, curproc, curproc->p_ucred, 0);
- return;
+
+ } else if (vp->v_usecount == 1) {
+
+ vp->v_usecount--;
+
+ if (VSHOULDFREE(vp))
+ vfree(vp);
+ /*
+ * If we are doing a vput, the node is already locked, and we must
+ * call VOP_INACTIVE with the node locked. So, in the case of
+ * vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
+ */
+ if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) {
+ VOP_INACTIVE(vp, p);
+ }
+
+ } else {
+#ifdef DIAGNOSTIC
+ vprint("vrele: negative ref count", vp);
+#endif
+ panic("vrele: negative ref cnt");
}
- simple_unlock(&vp->v_interlock);
}
-/*
- * Vnode put/release.
- * If count drops to zero, call inactive routine and return to freelist.
- */
-static void
-vputrele(vp, put)
+void
+vput(vp)
struct vnode *vp;
- int put;
{
struct proc *p = curproc; /* XXX */
#ifdef DIAGNOSTIC
if (vp == NULL)
- panic("vputrele: null vp");
+ panic("vput: null vp");
#endif
simple_lock(&vp->v_interlock);
- if ((vp->v_usecount == 2) &&
- vp->v_object &&
- (vp->v_object->flags & OBJ_VFS_REF)) {
+ if (vp->v_usecount > 1) {
- vm_freeze_copyopts(vp->v_object, 0, vp->v_object->size);
vp->v_usecount--;
- vp->v_object->flags &= ~OBJ_VFS_REF;
- if (put) {
- VOP_UNLOCK(vp, LK_INTERLOCK, p);
- } else {
- simple_unlock(&vp->v_interlock);
- }
- vm_object_deallocate(vp->v_object);
- return;
- }
+ VOP_UNLOCK(vp, LK_INTERLOCK, p);
- if (vp->v_usecount > 1) {
- vp->v_usecount--;
- if (put) {
- VOP_UNLOCK(vp, LK_INTERLOCK, p);
- } else {
- simple_unlock(&vp->v_interlock);
- }
- return;
- }
+ } else if (vp->v_usecount == 1) {
- if (vp->v_usecount < 1) {
-#ifdef DIAGNOSTIC
- vprint("vputrele: negative ref count", vp);
-#endif
- panic("vputrele: negative ref cnt");
- }
+ vp->v_usecount--;
- vp->v_usecount--;
- if (VSHOULDFREE(vp))
- vfree(vp);
+ if (VSHOULDFREE(vp))
+ vfree(vp);
/*
* If we are doing a vput, the node is already locked, and we must
* call VOP_INACTIVE with the node locked. So, in the case of
* vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
*/
- if (put) {
simple_unlock(&vp->v_interlock);
VOP_INACTIVE(vp, p);
- } else if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) {
- VOP_INACTIVE(vp, p);
- }
-}
-/*
- * vput(), just unlock and vrele()
- */
-void
-vput(vp)
- struct vnode *vp;
-{
- vputrele(vp, 1);
-}
-
-void
-vrele(vp)
- struct vnode *vp;
-{
- vputrele(vp, 0);
+ } else {
+#ifdef DIAGNOSTIC
+ vprint("vput: negative ref count", vp);
+#endif
+ panic("vput: negative ref cnt");
+ }
}
/*
@@ -1152,15 +1126,6 @@ vclean(vp, flags, p)
VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p);
object = vp->v_object;
- irefed = 0;
- if (object && ((object->flags & OBJ_DEAD) == 0)) {
- if (object->ref_count == 0) {
- vm_object_reference(object);
- irefed = 1;
- }
- ++object->ref_count;
- pager_cache(object, FALSE);
- }
/*
* Clean out any buffers associated with the vnode.
@@ -1168,7 +1133,8 @@ vclean(vp, flags, p)
if (flags & DOCLOSE)
vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
- if (irefed) {
+ if (vp->v_object && (vp->v_object->flags & OBJ_VFS_REF)) {
+ vp->v_object->flags &= ~OBJ_VFS_REF;
vm_object_deallocate(object);
}
@@ -1352,6 +1318,7 @@ vgonel(vp, p)
* Clean out the filesystem specific data.
*/
vclean(vp, DOCLOSE, p);
+
/*
* Delete from old mount point vnode list, if on one.
*/
@@ -2031,7 +1998,9 @@ loop:
continue;
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
- vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
+ vm_object_page_clean(vp->v_object, 0, 0, TRUE);
+ VOP_UNLOCK(vp, 0, curproc);
}
}
}
@@ -2053,6 +2022,9 @@ vfs_object_create(vp, p, cred, waslocked)
vm_object_t object;
int error = 0;
+ if ((vp->v_type != VREG) && (vp->v_type != VBLK))
+ return 0;
+
retry:
if ((object = vp->v_object) == NULL) {
if (vp->v_type == VREG) {
@@ -2060,6 +2032,7 @@ retry:
goto retn;
(void) vnode_pager_alloc(vp,
OFF_TO_IDX(round_page(vat.va_size)), 0, 0);
+ vp->v_object->flags |= OBJ_VFS_REF;
} else {
/*
* This simply allocates the biggest object possible
@@ -2067,8 +2040,8 @@ retry:
* cause any problems (yet).
*/
(void) vnode_pager_alloc(vp, INT_MAX, 0, 0);
+ vp->v_object->flags |= OBJ_VFS_REF;
}
- vp->v_object->flags |= OBJ_VFS_REF;
} else {
if (object->flags & OBJ_DEAD) {
if (waslocked)
@@ -2079,8 +2052,8 @@ retry:
goto retry;
}
if ((object->flags & OBJ_VFS_REF) == 0) {
- object->flags |= OBJ_VFS_REF;
vm_object_reference(object);
+ object->flags |= OBJ_VFS_REF;
}
}
if (vp->v_object)
diff --git a/sys/kern/vfs_extattr.c b/sys/kern/vfs_extattr.c
index 6d71a6c447a7..69751c4aff51 100644
--- a/sys/kern/vfs_extattr.c
+++ b/sys/kern/vfs_extattr.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
- * $Id: vfs_syscalls.c,v 1.86 1997/12/16 17:40:31 eivind Exp $
+ * $Id: vfs_syscalls.c,v 1.87 1997/12/27 02:56:23 bde Exp $
*/
/* For 4.3 integer FS ID compatibility */
@@ -430,7 +430,6 @@ dounmount(mp, flags, p)
mp->mnt_flag &=~ MNT_ASYNC;
vfs_msync(mp, MNT_NOWAIT);
- vnode_pager_umount(mp); /* release cached vnodes */
cache_purgevfs(mp); /* remove cache entries for this file sys */
if (((mp->mnt_flag & MNT_RDONLY) ||
(error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p)) == 0) ||
@@ -1263,8 +1262,6 @@ unlink(p, uap)
*/
if (vp->v_flag & VROOT)
error = EBUSY;
- else
- (void) vnode_pager_uncache(vp, p);
}
if (!error) {
@@ -2160,9 +2157,9 @@ fsync(p, uap)
if (error = getvnode(p->p_fd, SCARG(uap, fd), &fp))
return (error);
vp = (struct vnode *)fp->f_data;
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE, p);
if (vp->v_object) {
- vm_object_page_clean(vp->v_object, 0, 0 ,0, FALSE);
+ vm_object_page_clean(vp->v_object, 0, 0 ,0);
}
error = VOP_FSYNC(vp, fp->f_cred,
(vp->v_mount && (vp->v_mount->mnt_flag & MNT_ASYNC)) ?
@@ -2242,7 +2239,6 @@ out:
VOP_LEASE(fromnd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
if (tvp) {
VOP_LEASE(tvp, p, p->p_ucred, LEASE_WRITE);
- (void) vnode_pager_uncache(tvp, p);
}
error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd,
tond.ni_dvp, tond.ni_vp, &tond.ni_cnd);
diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c
index fcfd1da21e39..8ceedd4de7e1 100644
--- a/sys/kern/vfs_lookup.c
+++ b/sys/kern/vfs_lookup.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_lookup.c 8.4 (Berkeley) 2/16/94
- * $Id: vfs_lookup.c,v 1.20 1997/09/21 04:23:01 dyson Exp $
+ * $Id: vfs_lookup.c,v 1.21 1997/12/27 02:56:22 bde Exp $
*/
#include "opt_ktrace.h"
@@ -537,6 +537,9 @@ nextname:
}
if (!wantparent)
vrele(ndp->ni_dvp);
+
+ vfs_object_create(dp, ndp->ni_cnd.cn_proc, ndp->ni_cnd.cn_cred, 1);
+
if ((cnp->cn_flags & LOCKLEAF) == 0)
VOP_UNLOCK(dp, 0, p);
return (0);
@@ -683,6 +686,9 @@ relookup(dvp, vpp, cnp)
if (!wantparent)
vrele(dvp);
+
+ vfs_object_create(dp, cnp->cn_proc, cnp->cn_cred, 1);
+
if ((cnp->cn_flags & LOCKLEAF) == 0)
VOP_UNLOCK(dp, 0, p);
return (0);
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index c2e536934d9a..f39c17d712dd 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
- * $Id: vfs_subr.c,v 1.115 1997/12/15 03:09:32 wollman Exp $
+ * $Id: vfs_subr.c,v 1.116 1997/12/19 09:03:28 dyson Exp $
*/
/*
@@ -57,6 +57,7 @@
#include <sys/poll.h>
#include <sys/domain.h>
#include <sys/dirent.h>
+#include <sys/vmmeter.h>
#include <machine/limits.h>
@@ -130,7 +131,7 @@ void
vntblinit()
{
- desiredvnodes = maxproc + vm_object_cache_max;
+ desiredvnodes = maxproc + cnt.v_page_count / 4;
simple_lock_init(&mntvnode_slock);
simple_lock_init(&mntid_slock);
simple_lock_init(&spechash_slock);
@@ -394,9 +395,9 @@ getnewvnode(tag, mp, vops, vpp)
simple_unlock(&vnode_free_list_slock);
cache_purge(vp);
vp->v_lease = NULL;
- if (vp->v_type != VBAD)
+ if (vp->v_type != VBAD) {
vgonel(vp, p);
- else {
+ } else {
simple_unlock(&vp->v_interlock);
}
@@ -588,8 +589,10 @@ bgetvp(vp, bp)
{
int s;
+#if defined(DIAGNOSTIC)
if (bp->b_vp)
panic("bgetvp: not free");
+#endif
vhold(vp);
bp->b_vp = vp;
if (vp->v_type == VBLK || vp->v_type == VCHR)
@@ -614,8 +617,11 @@ brelvp(bp)
struct vnode *vp;
int s;
+#if defined(DIAGNOSTIC)
if (bp->b_vp == (struct vnode *) 0)
panic("brelvp: NULL");
+#endif
+
/*
* Delete from old vnode list, if on one.
*/
@@ -846,19 +852,18 @@ vget(vp, flags, p)
tsleep((caddr_t)vp, PINOD, "vget", 0);
return (ENOENT);
}
+
vp->v_usecount++;
+
if (VSHOULDBUSY(vp))
vbusy(vp);
/*
* Create the VM object, if needed
*/
- if ((vp->v_type == VREG) &&
+ if (((vp->v_type == VREG) || (vp->v_type == VBLK)) &&
((vp->v_object == NULL) ||
(vp->v_object->flags & OBJ_VFS_REF) == 0 ||
(vp->v_object->flags & OBJ_DEAD))) {
- /*
- * XXX vfs_object_create probably needs the interlock.
- */
simple_unlock(&vp->v_interlock);
vfs_object_create(vp, curproc, curproc->p_ucred, 0);
simple_lock(&vp->v_interlock);
@@ -871,119 +876,88 @@ vget(vp, flags, p)
simple_unlock(&vp->v_interlock);
return (0);
}
-/* #ifdef DIAGNOSTIC */
+
/*
- * Vnode reference, just increment the count
+ * Vnode put/release.
+ * If count drops to zero, call inactive routine and return to freelist.
*/
void
-vref(vp)
+vrele(vp)
struct vnode *vp;
{
+ struct proc *p = curproc; /* XXX */
+
+#ifdef DIAGNOSTIC
+ if (vp == NULL)
+ panic("vrele: null vp");
+#endif
simple_lock(&vp->v_interlock);
- if (vp->v_usecount <= 0)
- panic("vref used where vget required");
- vp->v_usecount++;
+ if (vp->v_usecount > 1) {
- if ((vp->v_type == VREG) &&
- ((vp->v_object == NULL) ||
- ((vp->v_object->flags & OBJ_VFS_REF) == 0) ||
- (vp->v_object->flags & OBJ_DEAD))) {
- /*
- * We need to lock to VP during the time that
- * the object is created. This is necessary to
- * keep the system from re-entrantly doing it
- * multiple times.
- * XXX vfs_object_create probably needs the interlock?
- */
+ vp->v_usecount--;
simple_unlock(&vp->v_interlock);
- vfs_object_create(vp, curproc, curproc->p_ucred, 0);
- return;
+
+ } else if (vp->v_usecount == 1) {
+
+ vp->v_usecount--;
+
+ if (VSHOULDFREE(vp))
+ vfree(vp);
+ /*
+ * If we are doing a vput, the node is already locked, and we must
+ * call VOP_INACTIVE with the node locked. So, in the case of
+ * vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
+ */
+ if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) {
+ VOP_INACTIVE(vp, p);
+ }
+
+ } else {
+#ifdef DIAGNOSTIC
+ vprint("vrele: negative ref count", vp);
+#endif
+ panic("vrele: negative ref cnt");
}
- simple_unlock(&vp->v_interlock);
}
-/*
- * Vnode put/release.
- * If count drops to zero, call inactive routine and return to freelist.
- */
-static void
-vputrele(vp, put)
+void
+vput(vp)
struct vnode *vp;
- int put;
{
struct proc *p = curproc; /* XXX */
#ifdef DIAGNOSTIC
if (vp == NULL)
- panic("vputrele: null vp");
+ panic("vput: null vp");
#endif
simple_lock(&vp->v_interlock);
- if ((vp->v_usecount == 2) &&
- vp->v_object &&
- (vp->v_object->flags & OBJ_VFS_REF)) {
+ if (vp->v_usecount > 1) {
- vm_freeze_copyopts(vp->v_object, 0, vp->v_object->size);
vp->v_usecount--;
- vp->v_object->flags &= ~OBJ_VFS_REF;
- if (put) {
- VOP_UNLOCK(vp, LK_INTERLOCK, p);
- } else {
- simple_unlock(&vp->v_interlock);
- }
- vm_object_deallocate(vp->v_object);
- return;
- }
+ VOP_UNLOCK(vp, LK_INTERLOCK, p);
- if (vp->v_usecount > 1) {
- vp->v_usecount--;
- if (put) {
- VOP_UNLOCK(vp, LK_INTERLOCK, p);
- } else {
- simple_unlock(&vp->v_interlock);
- }
- return;
- }
+ } else if (vp->v_usecount == 1) {
- if (vp->v_usecount < 1) {
-#ifdef DIAGNOSTIC
- vprint("vputrele: negative ref count", vp);
-#endif
- panic("vputrele: negative ref cnt");
- }
+ vp->v_usecount--;
- vp->v_usecount--;
- if (VSHOULDFREE(vp))
- vfree(vp);
+ if (VSHOULDFREE(vp))
+ vfree(vp);
/*
* If we are doing a vput, the node is already locked, and we must
* call VOP_INACTIVE with the node locked. So, in the case of
* vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
*/
- if (put) {
simple_unlock(&vp->v_interlock);
VOP_INACTIVE(vp, p);
- } else if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) {
- VOP_INACTIVE(vp, p);
- }
-}
-/*
- * vput(), just unlock and vrele()
- */
-void
-vput(vp)
- struct vnode *vp;
-{
- vputrele(vp, 1);
-}
-
-void
-vrele(vp)
- struct vnode *vp;
-{
- vputrele(vp, 0);
+ } else {
+#ifdef DIAGNOSTIC
+ vprint("vput: negative ref count", vp);
+#endif
+ panic("vput: negative ref cnt");
+ }
}
/*
@@ -1152,15 +1126,6 @@ vclean(vp, flags, p)
VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p);
object = vp->v_object;
- irefed = 0;
- if (object && ((object->flags & OBJ_DEAD) == 0)) {
- if (object->ref_count == 0) {
- vm_object_reference(object);
- irefed = 1;
- }
- ++object->ref_count;
- pager_cache(object, FALSE);
- }
/*
* Clean out any buffers associated with the vnode.
@@ -1168,7 +1133,8 @@ vclean(vp, flags, p)
if (flags & DOCLOSE)
vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
- if (irefed) {
+ if (vp->v_object && (vp->v_object->flags & OBJ_VFS_REF)) {
+ vp->v_object->flags &= ~OBJ_VFS_REF;
vm_object_deallocate(object);
}
@@ -1352,6 +1318,7 @@ vgonel(vp, p)
* Clean out the filesystem specific data.
*/
vclean(vp, DOCLOSE, p);
+
/*
* Delete from old mount point vnode list, if on one.
*/
@@ -2031,7 +1998,9 @@ loop:
continue;
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
- vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE);
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
+ vm_object_page_clean(vp->v_object, 0, 0, TRUE);
+ VOP_UNLOCK(vp, 0, curproc);
}
}
}
@@ -2053,6 +2022,9 @@ vfs_object_create(vp, p, cred, waslocked)
vm_object_t object;
int error = 0;
+ if ((vp->v_type != VREG) && (vp->v_type != VBLK))
+ return 0;
+
retry:
if ((object = vp->v_object) == NULL) {
if (vp->v_type == VREG) {
@@ -2060,6 +2032,7 @@ retry:
goto retn;
(void) vnode_pager_alloc(vp,
OFF_TO_IDX(round_page(vat.va_size)), 0, 0);
+ vp->v_object->flags |= OBJ_VFS_REF;
} else {
/*
* This simply allocates the biggest object possible
@@ -2067,8 +2040,8 @@ retry:
* cause any problems (yet).
*/
(void) vnode_pager_alloc(vp, INT_MAX, 0, 0);
+ vp->v_object->flags |= OBJ_VFS_REF;
}
- vp->v_object->flags |= OBJ_VFS_REF;
} else {
if (object->flags & OBJ_DEAD) {
if (waslocked)
@@ -2079,8 +2052,8 @@ retry:
goto retry;
}
if ((object->flags & OBJ_VFS_REF) == 0) {
- object->flags |= OBJ_VFS_REF;
vm_object_reference(object);
+ object->flags |= OBJ_VFS_REF;
}
}
if (vp->v_object)
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index 6d71a6c447a7..69751c4aff51 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
- * $Id: vfs_syscalls.c,v 1.86 1997/12/16 17:40:31 eivind Exp $
+ * $Id: vfs_syscalls.c,v 1.87 1997/12/27 02:56:23 bde Exp $
*/
/* For 4.3 integer FS ID compatibility */
@@ -430,7 +430,6 @@ dounmount(mp, flags, p)
mp->mnt_flag &=~ MNT_ASYNC;
vfs_msync(mp, MNT_NOWAIT);
- vnode_pager_umount(mp); /* release cached vnodes */
cache_purgevfs(mp); /* remove cache entries for this file sys */
if (((mp->mnt_flag & MNT_RDONLY) ||
(error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p)) == 0) ||
@@ -1263,8 +1262,6 @@ unlink(p, uap)
*/
if (vp->v_flag & VROOT)
error = EBUSY;
- else
- (void) vnode_pager_uncache(vp, p);
}
if (!error) {
@@ -2160,9 +2157,9 @@ fsync(p, uap)
if (error = getvnode(p->p_fd, SCARG(uap, fd), &fp))
return (error);
vp = (struct vnode *)fp->f_data;
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(vp, LK_EXCLUSIVE, p);
if (vp->v_object) {
- vm_object_page_clean(vp->v_object, 0, 0 ,0, FALSE);
+ vm_object_page_clean(vp->v_object, 0, 0 ,0);
}
error = VOP_FSYNC(vp, fp->f_cred,
(vp->v_mount && (vp->v_mount->mnt_flag & MNT_ASYNC)) ?
@@ -2242,7 +2239,6 @@ out:
VOP_LEASE(fromnd.ni_dvp, p, p->p_ucred, LEASE_WRITE);
if (tvp) {
VOP_LEASE(tvp, p, p->p_ucred, LEASE_WRITE);
- (void) vnode_pager_uncache(tvp, p);
}
error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd,
tond.ni_dvp, tond.ni_vp, &tond.ni_cnd);
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index 58dfad550cc3..460bdd7b67c4 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
- * $Id: vfs_vnops.c,v 1.42 1997/11/29 01:33:10 dyson Exp $
+ * $Id: vfs_vnops.c,v 1.43 1997/12/06 04:11:11 sef Exp $
*/
#include <sys/param.h>
@@ -510,7 +510,9 @@ vn_lock(vp, flags, p)
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
simple_unlock(&vp->v_interlock);
- tsleep((caddr_t)vp, PINOD, "vn_lock", 0);
+ if (tsleep((caddr_t)vp, PINOD, "vn_lock", 100*5)) {
+ vprint("vn_lock:", vp);
+ }
error = ENOENT;
} else {
error = VOP_LOCK(vp, flags | LK_NOPAUSE | LK_INTERLOCK, p);