aboutsummaryrefslogtreecommitdiff
path: root/sys/vm
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2018-09-20 15:45:12 +0000
committerMark Johnston <markj@FreeBSD.org>2018-09-20 15:45:12 +0000
commit25ed23cfbb9200f1dd96a70e74a8e3e282635557 (patch)
tree94f017298f4e7f629f1af47e18dbce170f1b860f /sys/vm
parent51e13c93b6ef3be997aa3b0f5247dbaddbbd45f5 (diff)
downloadsrc-25ed23cfbb9200f1dd96a70e74a8e3e282635557.tar.gz
src-25ed23cfbb9200f1dd96a70e74a8e3e282635557.zip
Change the domain selection policy in kmem_back().
Ensure that pages backing the same virtual large page come from the same physical domain, as kmem_malloc_domain() does. PR: 231038 Reviewed by: alc, kib Approved by: re (gjb) Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D17248
Notes
Notes: svn path=/head/; revision=338830
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_kern.c45
1 files changed, 31 insertions, 14 deletions
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 31b85fd81854..5f7c4033c218 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -122,11 +122,12 @@ SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
"Max kernel address");
#if VM_NRESERVLEVEL > 0
-#define KVA_QUANTUM (1 << (VM_LEVEL_0_ORDER + PAGE_SHIFT))
+#define KVA_QUANTUM_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT)
#else
/* On non-superpage architectures want large import sizes. */
-#define KVA_QUANTUM (PAGE_SIZE * 1024)
+#define KVA_QUANTUM_SHIFT (10 + PAGE_SHIFT)
#endif
+#define KVA_QUANTUM (1 << KVA_QUANTUM_SHIFT)
/*
* kva_alloc:
@@ -416,9 +417,10 @@ kmem_malloc(vm_size_t size, int flags)
}
/*
- * kmem_back:
+ * kmem_back_domain:
*
- * Allocate physical pages for the specified virtual address range.
+ * Allocate physical pages from the specified domain for the specified
+ * virtual address range.
*/
int
kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
@@ -479,24 +481,39 @@ retry:
return (KERN_SUCCESS);
}
+/*
+ * kmem_back:
+ *
+ * Allocate physical pages for the specified virtual address range.
+ */
int
kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
{
- struct vm_domainset_iter di;
- int domain;
- int ret;
+ vm_offset_t end, next, start;
+ int domain, rv;
KASSERT(object == kernel_object,
("kmem_back: only supports kernel object."));
- vm_domainset_iter_malloc_init(&di, kernel_object, &domain, &flags);
- do {
- ret = kmem_back_domain(domain, object, addr, size, flags);
- if (ret == KERN_SUCCESS)
+ for (start = addr, end = addr + size; addr < end; addr = next) {
+ /*
+ * We must ensure that pages backing a given large virtual page
+ * all come from the same physical domain.
+ */
+ if (vm_ndomains > 1) {
+ domain = (addr >> KVA_QUANTUM_SHIFT) % vm_ndomains;
+ next = roundup2(addr + 1, KVA_QUANTUM);
+ if (next > end || next < start)
+ next = end;
+ } else
+ next = end;
+ rv = kmem_back_domain(domain, object, addr, next - addr, flags);
+ if (rv != KERN_SUCCESS) {
+ kmem_unback(object, start, addr - start);
break;
- } while (vm_domainset_iter_malloc(&di, &domain, &flags) == 0);
-
- return (ret);
+ }
+ }
+ return (rv);
}
/*