aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2020-08-26 14:31:48 +0000
committerMark Johnston <markj@FreeBSD.org>2020-08-26 14:31:48 +0000
commitaea9103e068e2fa8df4928f6b3d336523f616011 (patch)
tree16d97ba1464ab0a3b66b4d050dad56a274c3f69a
parent41c6838786c193bfeb2b2ff0c3d36c119217aeab (diff)
downloadsrc-aea9103e068e2fa8df4928f6b3d336523f616011.tar.gz
src-aea9103e068e2fa8df4928f6b3d336523f616011.zip
Use a large kmem arena import size on NUMA systems.
This helps minimize internal fragmentation that occurs when 2MB imports are interleaved across NUMA domains. Virtually all KVA allocations on direct map platforms consume more than one page, so the fragmentation manifests as runs of 511 4KB page mappings in the kernel. Reviewed by: alc, kib Tested by: pho Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D26050
Notes
Notes: svn path=/head/; revision=364820
-rw-r--r--sys/vm/vm_kern.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 2ddad0a9f235..f9414fea6638 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -128,6 +128,7 @@ SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
#define KVA_QUANTUM_SHIFT (8 + PAGE_SHIFT)
#endif
#define KVA_QUANTUM (1 << KVA_QUANTUM_SHIFT)
+#define KVA_NUMA_IMPORT_QUANTUM (KVA_QUANTUM * 128)
extern void uma_startup2(void);
@@ -745,6 +746,7 @@ kva_import_domain(void *arena, vmem_size_t size, int flags, vmem_addr_t *addrp)
void
kmem_init(vm_offset_t start, vm_offset_t end)
{
+ vm_size_t quantum;
int domain;
vm_map_init(kernel_map, kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
@@ -774,10 +776,20 @@ kmem_init(vm_offset_t start, vm_offset_t end)
vm_map_unlock(kernel_map);
/*
+ * Use a large import quantum on NUMA systems. This helps minimize
+ * interleaving of superpages, reducing internal fragmentation within
+ * the per-domain arenas.
+ */
+ if (vm_ndomains > 1 && PMAP_HAS_DMAP)
+ quantum = KVA_NUMA_IMPORT_QUANTUM;
+ else
+ quantum = KVA_QUANTUM;
+
+ /*
* Initialize the kernel_arena. This can grow on demand.
*/
vmem_init(kernel_arena, "kernel arena", 0, 0, PAGE_SIZE, 0, 0);
- vmem_set_import(kernel_arena, kva_import, NULL, NULL, KVA_QUANTUM);
+ vmem_set_import(kernel_arena, kva_import, NULL, NULL, quantum);
for (domain = 0; domain < vm_ndomains; domain++) {
/*
@@ -789,13 +801,15 @@ kmem_init(vm_offset_t start, vm_offset_t end)
vm_dom[domain].vmd_kernel_arena = vmem_create(
"kernel arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
vmem_set_import(vm_dom[domain].vmd_kernel_arena,
- kva_import_domain, NULL, kernel_arena, KVA_QUANTUM);
+ kva_import_domain, NULL, kernel_arena, quantum);
/*
* In architectures with superpages, maintain separate arenas
* for allocations with permissions that differ from the
* "standard" read/write permissions used for kernel memory,
* so as not to inhibit superpage promotion.
+ *
+ * Use the base import quantum since this arena is rarely used.
*/
#if VM_NRESERVLEVEL > 0
vm_dom[domain].vmd_kernel_rwx_arena = vmem_create(