aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/vfs_subr.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/vfs_subr.c')
-rw-r--r--sys/kern/vfs_subr.c367
1 files changed, 209 insertions, 158 deletions
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index b749976c9249..59f6232bfea5 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
- * $Id: vfs_subr.c,v 1.12 1994/10/06 21:06:37 davidg Exp $
+ * $Id: vfs_subr.c,v 1.13 1994/12/23 04:52:55 davidg Exp $
*/
/*
@@ -63,13 +63,13 @@
#include <miscfs/specfs/specdev.h>
-void insmntque __P((struct vnode *, struct mount *));
+void insmntque __P((struct vnode *, struct mount *));
enum vtype iftovt_tab[16] = {
VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
};
-int vttoif_tab[9] = {
+int vttoif_tab[9] = {
0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
S_IFSOCK, S_IFIFO, S_IFMT,
};
@@ -84,7 +84,9 @@ int vttoif_tab[9] = {
}
TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
-struct mntlist mountlist; /* mounted filesystem list */
+struct mntlist mountlist; /* mounted filesystem list */
+
+int desiredvnodes;
/*
* Initialize the vnode management data structures.
@@ -92,6 +94,9 @@ struct mntlist mountlist; /* mounted filesystem list */
void
vntblinit()
{
+ extern int vm_object_cache_max;
+
+ desiredvnodes = maxproc + vm_object_cache_max;
TAILQ_INIT(&vnode_free_list);
TAILQ_INIT(&mountlist);
@@ -106,9 +111,9 @@ vfs_lock(mp)
register struct mount *mp;
{
- while(mp->mnt_flag & MNT_MLOCK) {
+ while (mp->mnt_flag & MNT_MLOCK) {
mp->mnt_flag |= MNT_MWAIT;
- (void) tsleep((caddr_t)mp, PVFS, "vfslck", 0);
+ (void) tsleep((caddr_t) mp, PVFS, "vfslck", 0);
}
mp->mnt_flag |= MNT_MLOCK;
return (0);
@@ -128,7 +133,7 @@ vfs_unlock(mp)
mp->mnt_flag &= ~MNT_MLOCK;
if (mp->mnt_flag & MNT_MWAIT) {
mp->mnt_flag &= ~MNT_MWAIT;
- wakeup((caddr_t)mp);
+ wakeup((caddr_t) mp);
}
}
@@ -141,9 +146,9 @@ vfs_busy(mp)
register struct mount *mp;
{
- while(mp->mnt_flag & MNT_MPBUSY) {
+ while (mp->mnt_flag & MNT_MPBUSY) {
mp->mnt_flag |= MNT_MPWANT;
- (void) tsleep((caddr_t)&mp->mnt_flag, PVFS, "vfsbsy", 0);
+ (void) tsleep((caddr_t) & mp->mnt_flag, PVFS, "vfsbsy", 0);
}
if (mp->mnt_flag & MNT_UNMOUNT)
return (1);
@@ -165,7 +170,7 @@ vfs_unbusy(mp)
mp->mnt_flag &= ~MNT_MPBUSY;
if (mp->mnt_flag & MNT_MPWANT) {
mp->mnt_flag &= ~MNT_MPWANT;
- wakeup((caddr_t)&mp->mnt_flag);
+ wakeup((caddr_t) & mp->mnt_flag);
}
}
@@ -173,20 +178,18 @@ void
vfs_unmountroot(rootfs)
struct mount *rootfs;
{
- struct mount *mp = rootfs;
- int error;
+ struct mount *mp = rootfs;
+ int error;
if (vfs_busy(mp)) {
printf("failed to unmount root\n");
return;
}
-
mp->mnt_flag |= MNT_UNMOUNT;
if ((error = vfs_lock(mp))) {
printf("lock of root filesystem failed (%d)\n", error);
return;
}
-
vnode_pager_umount(mp); /* release cached vnodes */
cache_purgevfs(mp); /* remove cache entries for this file sys */
@@ -200,7 +203,6 @@ vfs_unmountroot(rootfs)
else
printf("%d)\n", error);
}
-
mp->mnt_flag &= ~MNT_UNMOUNT;
vfs_unbusy(mp);
}
@@ -222,7 +224,6 @@ vfs_unmountall()
rootfs = mp;
continue;
}
-
error = dounmount(mp, MNT_FORCE, initproc);
if (error) {
printf("unmount of %s failed (", mp->mnt_stat.f_mntonname);
@@ -255,7 +256,7 @@ getvfs(fsid)
mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
return (mp);
}
- return ((struct mount *)0);
+ return ((struct mount *) 0);
}
/*
@@ -266,7 +267,7 @@ getnewfsid(mp, mtype)
struct mount *mp;
int mtype;
{
-static u_short xxxfs_mntid;
+ static u_short xxxfs_mntid;
fsid_t tfsid;
@@ -297,19 +298,19 @@ vattr_null(vap)
vap->va_size = VNOVAL;
vap->va_bytes = VNOVAL;
vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
- vap->va_fsid = vap->va_fileid =
- vap->va_blocksize = vap->va_rdev =
- vap->va_atime.ts_sec = vap->va_atime.ts_nsec =
- vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec =
- vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec =
- vap->va_flags = vap->va_gen = VNOVAL;
+ vap->va_fsid = vap->va_fileid =
+ vap->va_blocksize = vap->va_rdev =
+ vap->va_atime.ts_sec = vap->va_atime.ts_nsec =
+ vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec =
+ vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec =
+ vap->va_flags = vap->va_gen = VNOVAL;
vap->va_vaflags = 0;
}
/*
* Routines having to do with the management of the vnode table.
*/
-extern int (**dead_vnodeop_p)();
+extern int (**dead_vnodeop_p) ();
extern void vclean();
long numvnodes;
@@ -320,17 +321,16 @@ int
getnewvnode(tag, mp, vops, vpp)
enum vtagtype tag;
struct mount *mp;
- int (**vops)();
+ int (**vops) ();
struct vnode **vpp;
{
register struct vnode *vp;
- if ((vnode_free_list.tqh_first == NULL &&
- numvnodes < 2 * desiredvnodes) ||
+ if (vnode_free_list.tqh_first == NULL ||
numvnodes < desiredvnodes) {
- vp = (struct vnode *)malloc((u_long)sizeof *vp,
+ vp = (struct vnode *) malloc((u_long) sizeof *vp,
M_VNODE, M_WAITOK);
- bzero((char *)vp, sizeof *vp);
+ bzero((char *) vp, sizeof *vp);
numvnodes++;
} else {
if ((vp = vnode_free_list.tqh_first) == NULL) {
@@ -340,21 +340,23 @@ getnewvnode(tag, mp, vops, vpp)
}
if (vp->v_usecount)
panic("free vnode isn't");
+
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
/* see comment on why 0xdeadb is set at end of vgone (below) */
- vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
+ vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb;
vp->v_lease = NULL;
if (vp->v_type != VBAD)
vgone(vp);
#ifdef DIAGNOSTIC
{
- int s;
- if (vp->v_data)
- panic("cleaned vnode isn't");
- s = splbio();
- if (vp->v_numoutput)
- panic("Clean vnode has pending I/O's");
- splx(s);
+ int s;
+
+ if (vp->v_data)
+ panic("cleaned vnode isn't");
+ s = splbio();
+ if (vp->v_numoutput)
+ panic("Clean vnode has pending I/O's");
+ splx(s);
}
#endif
vp->v_flag = 0;
@@ -366,7 +368,7 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_cstart = 0;
vp->v_clen = 0;
vp->v_socket = 0;
- vp->v_writecount = 0; /* XXX */
+ vp->v_writecount = 0; /* XXX */
}
vp->v_type = VNON;
cache_purge(vp);
@@ -415,11 +417,9 @@ vwakeup(bp)
vp->v_numoutput--;
if (vp->v_numoutput < 0)
panic("vwakeup: neg numoutput");
- if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
- if (vp->v_numoutput < 0)
- panic("vwakeup: neg numoutput");
+ if (vp->v_flag & VBWAIT) {
vp->v_flag &= ~VBWAIT;
- wakeup((caddr_t)&vp->v_numoutput);
+ wakeup((caddr_t) & vp->v_numoutput);
}
}
}
@@ -452,7 +452,7 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA))
while (blist && blist->b_lblkno < 0)
blist = blist->b_vnbufs.le_next;
- if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
+ if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
(flags & V_SAVEMETA))
while (blist && blist->b_lblkno < 0)
blist = blist->b_vnbufs.le_next;
@@ -466,9 +466,9 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
s = splbio();
if (bp->b_flags & B_BUSY) {
bp->b_flags |= B_WANTED;
- error = tsleep((caddr_t)bp,
- slpflag | (PRIBIO + 1), "vinvalbuf",
- slptimeo);
+ error = tsleep((caddr_t) bp,
+ slpflag | (PRIBIO + 1), "vinvalbuf",
+ slptimeo);
splx(s);
if (error)
return (error);
@@ -478,9 +478,10 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
bp->b_flags |= B_BUSY;
splx(s);
/*
- * XXX Since there are no node locks for NFS, I believe
- * there is a slight chance that a delayed write will
- * occur while sleeping just above, so check for it.
+ * XXX Since there are no node locks for NFS, I
+ * believe there is a slight chance that a delayed
+ * write will occur while sleeping just above, so
+ * check for it.
*/
if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
(void) VOP_BWRITE(bp);
@@ -491,9 +492,17 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
}
}
+
+ s = splbio();
+ while (vp->v_numoutput > 0) {
+ vp->v_flag |= VBWAIT;
+ tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
+ }
+ splx(s);
+
pager = NULL;
- object = (vm_object_t)vp->v_vmdata;
- if( object != NULL)
+ object = (vm_object_t) vp->v_vmdata;
+ if (object != NULL)
pager = object->pager;
if (pager != NULL) {
object = vm_object_lookup(pager);
@@ -506,7 +515,6 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
vm_object_deallocate(object);
}
}
-
if (!(flags & V_SAVEMETA) &&
(vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
panic("vinvalbuf: flush failed");
@@ -565,6 +573,41 @@ brelvp(bp)
}
/*
+ * Associate a p-buffer with a vnode.
+ */
+void
+pbgetvp(vp, bp)
+ register struct vnode *vp;
+ register struct buf *bp;
+{
+ if (bp->b_vp)
+ panic("pbgetvp: not free");
+ VHOLD(vp);
+ bp->b_vp = vp;
+ if (vp->v_type == VBLK || vp->v_type == VCHR)
+ bp->b_dev = vp->v_rdev;
+ else
+ bp->b_dev = NODEV;
+}
+
+/*
+ * Disassociate a p-buffer from a vnode.
+ */
+void
+pbrelvp(bp)
+ register struct buf *bp;
+{
+ struct vnode *vp;
+
+ if (bp->b_vp == (struct vnode *) 0)
+ panic("brelvp: NULL");
+
+ vp = bp->b_vp;
+ bp->b_vp = (struct vnode *) 0;
+ HOLDRELE(vp);
+}
+
+/*
* Reassign a buffer from one vnode to another.
* Used to assign file specific control information
* (indirect blocks) to the vnode to which they belong.
@@ -586,14 +629,25 @@ reassignbuf(bp, newvp)
if (bp->b_vnbufs.le_next != NOLIST)
bufremvn(bp);
/*
- * If dirty, put on list of dirty buffers;
- * otherwise insert onto list of clean buffers.
+ * If dirty, put on list of dirty buffers; otherwise insert onto list
+ * of clean buffers.
*/
- if (bp->b_flags & B_DELWRI)
- listheadp = &newvp->v_dirtyblkhd;
- else
+ if (bp->b_flags & B_DELWRI) {
+ struct buf *tbp;
+
+ tbp = newvp->v_dirtyblkhd.lh_first;
+ if (!tbp || (tbp->b_lblkno > bp->b_lblkno)) {
+ bufinsvn(bp, &newvp->v_dirtyblkhd);
+ } else {
+ while (tbp->b_vnbufs.le_next && (tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) {
+ tbp = tbp->b_vnbufs.le_next;
+ }
+ LIST_INSERT_AFTER(tbp, bp, b_vnbufs);
+ }
+ } else {
listheadp = &newvp->v_cleanblkhd;
- bufinsvn(bp, listheadp);
+ bufinsvn(bp, listheadp);
+ }
}
/*
@@ -612,14 +666,14 @@ bdevvp(dev, vpp)
if (dev == NODEV)
return (0);
- error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp);
+ error = getnewvnode(VT_NON, (struct mount *) 0, spec_vnodeop_p, &nvp);
if (error) {
*vpp = 0;
return (error);
}
vp = nvp;
vp->v_type = VBLK;
- if ((nvp = checkalias(vp, dev, (struct mount *)0))) {
+ if ((nvp = checkalias(vp, dev, (struct mount *) 0))) {
vput(vp);
vp = nvp;
}
@@ -665,7 +719,7 @@ loop:
}
if (vp == NULL || vp->v_tag != VT_NON) {
MALLOC(nvp->v_specinfo, struct specinfo *,
- sizeof(struct specinfo), M_VNODE, M_WAITOK);
+ sizeof(struct specinfo), M_VNODE, M_WAITOK);
nvp->v_rdev = nvp_rdev;
nvp->v_hashchain = vpp;
nvp->v_specnext = *vpp;
@@ -702,20 +756,19 @@ vget(vp, lockflag)
{
/*
- * If the vnode is in the process of being cleaned out for
- * another use, we wait for the cleaning to finish and then
- * return failure. Cleaning is determined either by checking
- * that the VXLOCK flag is set, or that the use count is
- * zero with the back pointer set to show that it has been
- * removed from the free list by getnewvnode. The VXLOCK
- * flag may not have been set yet because vclean is blocked in
- * the VOP_LOCK call waiting for the VOP_INACTIVE to complete.
+ * If the vnode is in the process of being cleaned out for another
+ * use, we wait for the cleaning to finish and then return failure.
+ * Cleaning is determined either by checking that the VXLOCK flag is
+ * set, or that the use count is zero with the back pointer set to
+ * show that it has been removed from the free list by getnewvnode.
+ * The VXLOCK flag may not have been set yet because vclean is blocked
+ * in the VOP_LOCK call waiting for the VOP_INACTIVE to complete.
*/
if ((vp->v_flag & VXLOCK) ||
(vp->v_usecount == 0 &&
- vp->v_freelist.tqe_prev == (struct vnode **)0xdeadb)) {
+ vp->v_freelist.tqe_prev == (struct vnode **) 0xdeadb)) {
vp->v_flag |= VXWANT;
- (void) tsleep((caddr_t)vp, PINOD, "vget", 0);
+ (void) tsleep((caddr_t) vp, PINOD, "vget", 0);
return (1);
}
if (vp->v_usecount == 0)
@@ -768,7 +821,7 @@ vrele(vp)
if (vp->v_usecount > 0)
return;
#ifdef DIAGNOSTIC
- if (vp->v_usecount != 0 /* || vp->v_writecount != 0 */) {
+ if (vp->v_usecount != 0 /* || vp->v_writecount != 0 */ ) {
vprint("vrele: bad ref count", vp);
panic("vrele: ref cnt");
}
@@ -813,8 +866,9 @@ holdrele(vp)
* that are found.
*/
#ifdef DIAGNOSTIC
-int busyprt = 0; /* print out busy vnodes */
-struct ctldebug debug1 = { "busyprt", &busyprt };
+int busyprt = 0; /* print out busy vnodes */
+struct ctldebug debug1 = {"busyprt", &busyprt};
+
#endif
int
@@ -844,24 +898,24 @@ loop:
if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
continue;
/*
- * If WRITECLOSE is set, only flush out regular file
- * vnodes open for writing.
+ * If WRITECLOSE is set, only flush out regular file vnodes
+ * open for writing.
*/
if ((flags & WRITECLOSE) &&
(vp->v_writecount == 0 || vp->v_type != VREG))
continue;
/*
- * With v_usecount == 0, all we need to do is clear
- * out the vnode data structures and we are done.
+ * With v_usecount == 0, all we need to do is clear out the
+ * vnode data structures and we are done.
*/
if (vp->v_usecount == 0) {
vgone(vp);
continue;
}
/*
- * If FORCECLOSE is set, forcibly close the vnode.
- * For block or character devices, revert to an
- * anonymous device. For all other files, just kill them.
+ * If FORCECLOSE is set, forcibly close the vnode. For block
+ * or character devices, revert to an anonymous device. For
+ * all other files, just kill them.
*/
if (flags & FORCECLOSE) {
if (vp->v_type != VBLK && vp->v_type != VCHR) {
@@ -869,7 +923,7 @@ loop:
} else {
vclean(vp, 0);
vp->v_op = spec_vnodeop_p;
- insmntque(vp, (struct mount *)0);
+ insmntque(vp, (struct mount *) 0);
}
continue;
}
@@ -895,24 +949,23 @@ vclean(vp, flags)
int active;
/*
- * Check to see if the vnode is in use.
- * If so we have to reference it before we clean it out
- * so that its count cannot fall to zero and generate a
- * race against ourselves to recycle it.
+ * Check to see if the vnode is in use. If so we have to reference it
+ * before we clean it out so that its count cannot fall to zero and
+ * generate a race against ourselves to recycle it.
*/
if ((active = vp->v_usecount))
VREF(vp);
/*
- * Even if the count is zero, the VOP_INACTIVE routine may still
- * have the object locked while it cleans it out. The VOP_LOCK
- * ensures that the VOP_INACTIVE routine is done with its work.
- * For active vnodes, it ensures that no other activity can
- * occur while the underlying object is being cleaned out.
+ * Even if the count is zero, the VOP_INACTIVE routine may still have
+ * the object locked while it cleans it out. The VOP_LOCK ensures that
+ * the VOP_INACTIVE routine is done with its work. For active vnodes,
+ * it ensures that no other activity can occur while the underlying
+ * object is being cleaned out.
*/
VOP_LOCK(vp);
/*
- * Prevent the vnode from being recycled or
- * brought into use while we clean it out.
+ * Prevent the vnode from being recycled or brought into use while we
+ * clean it out.
*/
if (vp->v_flag & VXLOCK)
panic("vclean: deadlock");
@@ -923,13 +976,13 @@ vclean(vp, flags)
if (flags & DOCLOSE)
vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
/*
- * Any other processes trying to obtain this lock must first
- * wait for VXLOCK to clear, then call the new lock operation.
+ * Any other processes trying to obtain this lock must first wait for
+ * VXLOCK to clear, then call the new lock operation.
*/
VOP_UNLOCK(vp);
/*
- * If purging an active vnode, it must be closed and
- * deactivated before being reclaimed.
+ * If purging an active vnode, it must be closed and deactivated
+ * before being reclaimed.
*/
if (active) {
if (flags & DOCLOSE)
@@ -952,7 +1005,7 @@ vclean(vp, flags)
vp->v_flag &= ~VXLOCK;
if (vp->v_flag & VXWANT) {
vp->v_flag &= ~VXWANT;
- wakeup((caddr_t)vp);
+ wakeup((caddr_t) vp);
}
}
@@ -968,17 +1021,17 @@ vgoneall(vp)
if (vp->v_flag & VALIASED) {
/*
- * If a vgone (or vclean) is already in progress,
- * wait until it is done and return.
+ * If a vgone (or vclean) is already in progress, wait until
+ * it is done and return.
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- (void) tsleep((caddr_t)vp, PINOD, "vgall", 0);
+ (void) tsleep((caddr_t) vp, PINOD, "vgall", 0);
return;
}
/*
- * Ensure that vp will not be vgone'd while we
- * are eliminating its aliases.
+ * Ensure that vp will not be vgone'd while we are eliminating
+ * its aliases.
*/
vp->v_flag |= VXLOCK;
while (vp->v_flag & VALIASED) {
@@ -991,9 +1044,8 @@ vgoneall(vp)
}
}
/*
- * Remove the lock so that vgone below will
- * really eliminate the vnode after which time
- * vgone will awaken any sleepers.
+ * Remove the lock so that vgone below will really eliminate
+ * the vnode after which time vgone will awaken any sleepers.
*/
vp->v_flag &= ~VXLOCK;
}
@@ -1012,12 +1064,12 @@ vgone(vp)
struct vnode *vx;
/*
- * If a vgone (or vclean) is already in progress,
- * wait until it is done and return.
+ * If a vgone (or vclean) is already in progress, wait until it is
+ * done and return.
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- (void) tsleep((caddr_t)vp, PINOD, "vgone", 0);
+ (void) tsleep((caddr_t) vp, PINOD, "vgone", 0);
return;
}
/*
@@ -1067,20 +1119,18 @@ vgone(vp)
vp->v_specinfo = NULL;
}
/*
- * If it is on the freelist and not already at the head,
- * move it to the head of the list. The test of the back
- * pointer and the reference count of zero is because
- * it will be removed from the free list by getnewvnode,
- * but will not have its reference count incremented until
- * after calling vgone. If the reference count were
- * incremented first, vgone would (incorrectly) try to
- * close the previous instance of the underlying object.
- * So, the back pointer is explicitly set to `0xdeadb' in
- * getnewvnode after removing it from the freelist to ensure
- * that we do not try to move it here.
+ * If it is on the freelist and not already at the head, move it to
+ * the head of the list. The test of the back pointer and the
+ * reference count of zero is because it will be removed from the free
+ * list by getnewvnode, but will not have its reference count
+ * incremented until after calling vgone. If the reference count were
+ * incremented first, vgone would (incorrectly) try to close the
+ * previous instance of the underlying object. So, the back pointer is
+ * explicitly set to `0xdeadb' in getnewvnode after removing it from
+ * the freelist to ensure that we do not try to move it here.
*/
if (vp->v_usecount == 0 &&
- vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb &&
+ vp->v_freelist.tqe_prev != (struct vnode **) 0xdeadb &&
vnode_free_list.tqh_first != vp) {
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
@@ -1141,7 +1191,7 @@ loop:
* Print out a description of a vnode.
*/
static char *typename[] =
- { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
+{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
void
vprint(label, vp)
@@ -1153,8 +1203,8 @@ vprint(label, vp)
if (label != NULL)
printf("%s: ", label);
printf("type %s, usecount %d, writecount %d, refcount %ld,",
- typename[vp->v_type], vp->v_usecount, vp->v_writecount,
- vp->v_holdcnt);
+ typename[vp->v_type], vp->v_usecount, vp->v_writecount,
+ vp->v_holdcnt);
buf[0] = '\0';
if (vp->v_flag & VROOT)
strcat(buf, "|VROOT");
@@ -1194,16 +1244,17 @@ printlockedvnodes()
printf("Locked vnodes\n");
for (mp = mountlist.tqh_first; mp != NULL; mp = mp->mnt_list.tqe_next) {
for (vp = mp->mnt_vnodelist.lh_first;
- vp != NULL;
- vp = vp->v_mntvnodes.le_next)
+ vp != NULL;
+ vp = vp->v_mntvnodes.le_next)
if (VOP_ISLOCKED(vp))
- vprint((char *)0, vp);
+ vprint((char *) 0, vp);
}
}
#endif
int kinfo_vdebug = 1;
int kinfo_vgetfailed;
+
#define KINFO_VNODESLOP 10
/*
* Dump vnode list (via sysctl).
@@ -1228,7 +1279,7 @@ sysctl_vnode(where, sizep)
return (0);
}
ewhere = where + *sizep;
-
+
for (mp = mountlist.tqh_first; mp != NULL; mp = nmp) {
nmp = mp->mnt_list.tqe_next;
if (vfs_busy(mp))
@@ -1236,12 +1287,12 @@ sysctl_vnode(where, sizep)
savebp = bp;
again:
for (vp = mp->mnt_vnodelist.lh_first;
- vp != NULL;
- vp = vp->v_mntvnodes.le_next) {
+ vp != NULL;
+ vp = vp->v_mntvnodes.le_next) {
/*
- * Check that the vp is still associated with
- * this filesystem. RACE: could have been
- * recycled onto the same filesystem.
+ * Check that the vp is still associated with this
+ * filesystem. RACE: could have been recycled onto
+ * the same filesystem.
*/
if (vp->v_mount != mp) {
if (kinfo_vdebug)
@@ -1253,8 +1304,8 @@ again:
*sizep = bp - where;
return (ENOMEM);
}
- if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) ||
- (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ)))
+ if ((error = copyout((caddr_t) & vp, bp, VPTRSZ)) ||
+ (error = copyout((caddr_t) vp, bp + VPTRSZ, VNODESZ)))
return (error);
bp += VPTRSZ + VNODESZ;
}
@@ -1317,16 +1368,16 @@ vfs_hang_addrlist(mp, nep, argp)
return (0);
}
i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
- np = (struct netcred *)malloc(i, M_NETADDR, M_WAITOK);
- bzero((caddr_t)np, i);
- saddr = (struct sockaddr *)(np + 1);
- if ((error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen)))
+ np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK);
+ bzero((caddr_t) np, i);
+ saddr = (struct sockaddr *) (np + 1);
+ if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen)))
goto out;
if (saddr->sa_len > argp->ex_addrlen)
saddr->sa_len = argp->ex_addrlen;
if (argp->ex_masklen) {
- smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
- error = copyin(argp->ex_addr, (caddr_t)smask, argp->ex_masklen);
+ smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen);
+ error = copyin(argp->ex_addr, (caddr_t) smask, argp->ex_masklen);
if (error)
goto out;
if (smask->sa_len > argp->ex_masklen)
@@ -1335,13 +1386,13 @@ vfs_hang_addrlist(mp, nep, argp)
i = saddr->sa_family;
if ((rnh = nep->ne_rtable[i]) == 0) {
/*
- * Seems silly to initialize every AF when most are not
- * used, do so on demand here
+ * Seems silly to initialize every AF when most are not used,
+ * do so on demand here
*/
for (dom = domains; dom; dom = dom->dom_next)
if (dom->dom_family == i && dom->dom_rtattach) {
- dom->dom_rtattach((void **)&nep->ne_rtable[i],
- dom->dom_rtoffset);
+ dom->dom_rtattach((void **) &nep->ne_rtable[i],
+ dom->dom_rtoffset);
break;
}
if ((rnh = nep->ne_rtable[i]) == 0) {
@@ -1349,9 +1400,9 @@ vfs_hang_addrlist(mp, nep, argp)
goto out;
}
}
- rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh,
- np->netc_rnodes);
- if (rn == 0 || np != (struct netcred *)rn) { /* already exists */
+ rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh,
+ np->netc_rnodes);
+ if (rn == 0 || np != (struct netcred *) rn) { /* already exists */
error = EPERM;
goto out;
}
@@ -1370,13 +1421,13 @@ vfs_free_netcred(rn, w)
struct radix_node *rn;
caddr_t w;
{
- register struct radix_node_head *rnh = (struct radix_node_head *)w;
+ register struct radix_node_head *rnh = (struct radix_node_head *) w;
- (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
- free((caddr_t)rn, M_NETADDR);
+ (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
+ free((caddr_t) rn, M_NETADDR);
return (0);
}
-
+
/*
* Free the net address hash lists that are hanging off the mount points.
*/
@@ -1389,9 +1440,9 @@ vfs_free_addrlist(nep)
for (i = 0; i <= AF_MAX; i++)
if ((rnh = nep->ne_rtable[i])) {
- (*rnh->rnh_walktree)(rnh, vfs_free_netcred,
- (caddr_t)rnh);
- free((caddr_t)rnh, M_RTABLE);
+ (*rnh->rnh_walktree) (rnh, vfs_free_netcred,
+ (caddr_t) rnh);
+ free((caddr_t) rnh, M_RTABLE);
nep->ne_rtable[i] = 0;
}
}
@@ -1436,8 +1487,8 @@ vfs_export_lookup(mp, nep, nam)
rnh = nep->ne_rtable[saddr->sa_family];
if (rnh != NULL) {
np = (struct netcred *)
- (*rnh->rnh_matchaddr)((caddr_t)saddr,
- rnh);
+ (*rnh->rnh_matchaddr) ((caddr_t) saddr,
+ rnh);
if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
np = NULL;
}