diff options
author | John Dyson <dyson@FreeBSD.org> | 1998-03-07 21:37:31 +0000 |
---|---|---|
committer | John Dyson <dyson@FreeBSD.org> | 1998-03-07 21:37:31 +0000 |
commit | 8f9110f6a1b1bd7517643f1bde7fe2db25727efd (patch) | |
tree | 84886456070014690c43b27edb8a5dea62236aac /sys/vm/vm_page.c | |
parent | 051b1b1a7498201648f15c19a9880a463051e52c (diff) | |
download | src-8f9110f6a1b1bd7517643f1bde7fe2db25727efd.tar.gz src-8f9110f6a1b1bd7517643f1bde7fe2db25727efd.zip |
This mega-commit is meant to fix numerous interrelated problems. There
has been some bitrot and incorrect assumptions in the vfs_bio code. These
problems have manifest themselves worse on NFS type filesystems, but can
still affect local filesystems under certain circumstances. Most of
the problems have involved mmap consistancy, and as a side-effect broke
the vfs.ioopt code. This code might have been committed seperately, but
almost everything is interrelated.
1) Allow (pmap_object_init_pt) prefaulting of buffer-busy pages that
are fully valid.
2) Rather than deactivating erroneously read initial (header) pages in
kern_exec, we now free them.
3) Fix the rundown of non-VMIO buffers that are in an inconsistent
(missing vp) state.
4) Fix the disassociation of pages from buffers in brelse. The previous
code had rotted and was faulty in a couple of important circumstances.
5) Remove a gratuitious buffer wakeup in vfs_vmio_release.
6) Remove a crufty and currently unused cluster mechanism for VBLK
files in vfs_bio_awrite. When the code is functional, I'll add back
a cleaner version.
7) The page busy count wakeups assocated with the buffer cache usage were
incorrectly cleaned up in a previous commit by me. Revert to the
original, correct version, but with a cleaner implementation.
8) The cluster read code now tries to keep data associated with buffers
more aggressively (without breaking the heuristics) when it is presumed
that the read data (buffers) will be soon needed.
9) Change to filesystem lockmgr locks so that they use LK_NOPAUSE. The
delay loop waiting is not useful for filesystem locks, due to the
length of the time intervals.
10) Correct and clean-up spec_getpages.
11) Implement a fully functional nfs_getpages, nfs_putpages.
12) Fix nfs_write so that modifications are coherent with the NFS data on
the server disk (at least as well as NFS seems to allow.)
13) Properly support MS_INVALIDATE on NFS.
14) Properly pass down MS_INVALIDATE to lower levels of the VM code from
vm_map_clean.
15) Better support the notion of pages being busy but valid, so that
fewer in-transit waits occur. (use p->busy more for pageouts instead
of PG_BUSY.) Since the page is fully valid, it is still usable for
reads.
16) It is possible (in error) for cached pages to be busy. Make the
page allocation code handle that case correctly. (It should probably
be a printf or panic, but I want the system to handle coding errors
robustly. I'll probably add a printf.)
17) Correct the design and usage of vm_page_sleep. It didn't handle
consistancy problems very well, so make the design a little less
lofty. After vm_page_sleep, if it ever blocked, it is still important
to relookup the page (if the object generation count changed), and
verify it's status (always.)
18) In vm_pageout.c, vm_pageout_clean had rotted, so clean that up.
19) Push the page busy for writes and VM_PROT_READ into vm_pageout_flush.
20) Fix vm_pager_put_pages and it's descendents to support an int flag
instead of a boolean, so that we can pass down the invalidate bit.
Notes
Notes:
svn path=/head/; revision=34206
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r-- | sys/vm/vm_page.c | 55 |
1 files changed, 41 insertions, 14 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 439fc3d7c57f..f89666646048 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -34,7 +34,7 @@ * SUCH DAMAGE. * * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 - * $Id: vm_page.c,v 1.93 1998/02/09 06:11:32 eivind Exp $ + * $Id: vm_page.c,v 1.94 1998/03/01 04:18:24 dyson Exp $ */ /* @@ -88,6 +88,7 @@ static void vm_page_queue_init __P((void)); static vm_page_t vm_page_select_free __P((vm_object_t object, vm_pindex_t pindex, int prefqueue)); +static vm_page_t vm_page_select_cache __P((vm_object_t, vm_pindex_t)); /* * Associated with page of user-allocatable memory is a @@ -685,6 +686,36 @@ vm_page_select(object, pindex, basequeue) } /* + * Find a page on the cache queue with color optimization. As pages + * might be found, but not applicable, they are deactivated. This + * keeps us from using potentially busy cached pages. + */ +vm_page_t +vm_page_select_cache(object, pindex) + vm_object_t object; + vm_pindex_t pindex; +{ + vm_page_t m; + + while (TRUE) { +#if PQ_L2_SIZE > 1 + int index; + index = (pindex + object->pg_color) & PQ_L2_MASK; + m = vm_page_list_find(PQ_CACHE, index); + +#else + m = TAILQ_FIRST(vm_page_queues[PQ_CACHE].pl); +#endif + if (m && ((m->flags & PG_BUSY) || m->busy || + m->hold_count || m->wire_count)) { + vm_page_deactivate(m); + continue; + } + return m; + } +} + +/* * Find a free or zero page, with specified preference. */ static vm_page_t @@ -825,7 +856,7 @@ vm_page_alloc(object, pindex, page_req) panic("vm_page_alloc(NORMAL): missing page on free queue\n"); #endif } else { - m = vm_page_select(object, pindex, PQ_CACHE); + m = vm_page_select_cache(object, pindex); if (m == NULL) { splx(s); #if defined(DIAGNOSTIC) @@ -847,7 +878,7 @@ vm_page_alloc(object, pindex, page_req) panic("vm_page_alloc(ZERO): missing page on free queue\n"); #endif } else { - m = vm_page_select(object, pindex, PQ_CACHE); + m = vm_page_select_cache(object, pindex); if (m == NULL) { splx(s); #if defined(DIAGNOSTIC) @@ -871,7 +902,7 @@ vm_page_alloc(object, pindex, page_req) panic("vm_page_alloc(SYSTEM): missing page on free queue\n"); #endif } else { - m = vm_page_select(object, pindex, PQ_CACHE); + m = vm_page_select_cache(object, pindex); if (m == NULL) { splx(s); #if defined(DIAGNOSTIC) @@ -986,18 +1017,18 @@ vm_wait() int vm_page_sleep(vm_page_t m, char *msg, char *busy) { vm_object_t object = m->object; - int generation = object->generation; + int slept = 0; if ((busy && *busy) || (m->flags & PG_BUSY)) { int s; s = splvm(); if ((busy && *busy) || (m->flags & PG_BUSY)) { m->flags |= PG_WANTED; - tsleep(m, PVM, msg, 800); + tsleep(m, PVM, msg, 0); + slept = 1; } splx(s); } - return ((generation != object->generation) || (busy && *busy) || - (m->flags & PG_BUSY)); + return slept; } /* @@ -1540,13 +1571,11 @@ again1: if (m->dirty) { if (m->object->type == OBJT_VNODE) { vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc); - vm_object_page_clean(m->object, 0, 0, TRUE); + vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC); VOP_UNLOCK(m->object->handle, 0, curproc); goto again1; } else if (m->object->type == OBJT_SWAP || m->object->type == OBJT_DEFAULT) { - m->flags |= PG_BUSY; - vm_page_protect(m, VM_PROT_NONE); vm_pageout_flush(&m, 1, 0); goto again1; } @@ -1570,13 +1599,11 @@ again1: if (m->dirty) { if (m->object->type == OBJT_VNODE) { vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc); - vm_object_page_clean(m->object, 0, 0, TRUE); + vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC); VOP_UNLOCK(m->object->handle, 0, curproc); goto again1; } else if (m->object->type == OBJT_SWAP || m->object->type == OBJT_DEFAULT) { - m->flags |= PG_BUSY; - vm_page_protect(m, VM_PROT_NONE); vm_pageout_flush(&m, 1, 0); goto again1; } |