aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorPeter Wemm <peter@FreeBSD.org>2002-07-05 01:27:35 +0000
committerPeter Wemm <peter@FreeBSD.org>2002-07-05 01:27:35 +0000
commit7b719ec324ccd27df44793b5c46954aa6f90c143 (patch)
treeffdbf53ea7c45bc339e9803fd8c166f555117acc /sys
parent361323eac609ecdab12bc6dfbd1d08e40679c348 (diff)
downloadsrc-7b719ec324ccd27df44793b5c46954aa6f90c143.tar.gz
src-7b719ec324ccd27df44793b5c46954aa6f90c143.zip
Back out proc part of last commit. UMA manages the thread cache only, and
we just have to deal with the kstack when told to. We do not have a UMA-managed cache for the proc struct and its associated upage yet. So, go back to the old lazy mechanism. Note that if UMA destroys pages that used to contain proc structures, we'll lose the corresponding upage forever. (zones never did this - once a page was allocated, it stayed attached to the proc zone forever)
Notes
Notes: svn path=/head/; revision=99422
Diffstat (limited to 'sys')
-rw-r--r--sys/ia64/ia64/pmap.c30
-rw-r--r--sys/sparc64/sparc64/pmap.c30
2 files changed, 44 insertions, 16 deletions
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 3c824d67f5f6..5bf1aebb839b 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -722,16 +722,22 @@ pmap_new_proc(struct proc *p)
/*
* Allocate object for the upage.
*/
- upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
- p->p_upages_obj = upobj;
+ upobj = p->p_upages_obj;
+ if (upobj == NULL) {
+ upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
+ p->p_upages_obj = upobj;
+ }
/*
* Get a kernel virtual address for the U area for this process.
*/
- up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
- if (up == 0)
- panic("pmap_new_proc: upage allocation failed");
- p->p_uarea = (struct user *)up;
+ up = (vm_offset_t)p->p_uarea;
+ if (up == 0) {
+ up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
+ if (up == 0)
+ panic("pmap_new_proc: upage allocation failed");
+ p->p_uarea = (struct user *)up;
+ }
for (i = 0; i < UAREA_PAGES; i++) {
/*
@@ -781,8 +787,16 @@ pmap_dispose_proc(struct proc *p)
vm_page_free(m);
}
pmap_qremove(up, UAREA_PAGES);
- kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE);
- vm_object_deallocate(upobj);
+
+ /*
+ * If the process got swapped out some of its UPAGES might have gotten
+ * swapped. Just get rid of the object to clean up the swap use
+ * proactively. NOTE! might block waiting for paging I/O to complete.
+ */
+ if (upobj->type == OBJT_SWAP) {
+ p->p_upages_obj = NULL;
+ vm_object_deallocate(upobj);
+ }
}
/*
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index 79a1ce9f771b..b3c40ea415d4 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -827,16 +827,22 @@ pmap_new_proc(struct proc *p)
/*
* Allocate object for the upage.
*/
- upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
- p->p_upages_obj = upobj;
+ upobj = p->p_upages_obj;
+ if (upobj == NULL) {
+ upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
+ p->p_upages_obj = upobj;
+ }
/*
* Get a kernel virtual address for the U area for this process.
*/
- up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
- if (up == 0)
- panic("pmap_new_proc: upage allocation failed");
- p->p_uarea = (struct user *)up;
+ up = (vm_offset_t)p->p_uarea;
+ if (up == 0) {
+ up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
+ if (up == 0)
+ panic("pmap_new_proc: upage allocation failed");
+ p->p_uarea = (struct user *)up;
+ }
for (i = 0; i < UAREA_PAGES; i++) {
/*
@@ -886,8 +892,16 @@ pmap_dispose_proc(struct proc *p)
vm_page_free(m);
}
pmap_qremove(up, UAREA_PAGES);
- kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE);
- vm_object_deallocate(upobj);
+
+ /*
+ * If the process got swapped out some of its UPAGES might have gotten
+ * swapped. Just get rid of the object to clean up the swap use
+ * proactively. NOTE! might block waiting for paging I/O to complete.
+ */
+ if (upobj->type == OBJT_SWAP) {
+ p->p_upages_obj = NULL;
+ vm_object_deallocate(upobj);
+ }
}
/*