aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2021-04-13 21:40:01 +0000
committerMark Johnston <markj@FreeBSD.org>2021-11-01 14:03:11 +0000
commited66f9c61b0f91194164702bf0919c12c0354344 (patch)
treef9f7a2f66d96326a1d6abcb51f5120bb8d20076a
parent9d95539ffed299a8457c7c718a1fe039d6f595bd (diff)
downloadsrc-ed66f9c61b0f91194164702bf0919c12c0354344.tar.gz
src-ed66f9c61b0f91194164702bf0919c12c0354344.zip
kmem: Add KASAN state transitions
Memory allocated with kmem_* is unmapped upon free, so KASAN doesn't provide a lot of benefit, but since allocations are always a multiple of the page size we can create a redzone when the allocation request size is not a multiple of the page size. Sponsored by: The FreeBSD Foundation (cherry picked from commit 2b914b85ddf4c25d112b2639bbbb7618641872b4)
-rw-r--r--sys/vm/vm_kern.c36
1 files changed, 22 insertions, 14 deletions
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 160821ba3a19..f25784857440 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -71,12 +71,13 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
-#include <sys/kernel.h> /* for ticks and hz */
+#include <sys/asan.h>
#include <sys/domainset.h>
#include <sys/eventhandler.h>
+#include <sys/kernel.h>
#include <sys/lock.h>
-#include <sys/proc.h>
#include <sys/malloc.h>
+#include <sys/proc.h>
#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/vmem.h>
@@ -215,25 +216,26 @@ kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
vm_object_t object;
vm_offset_t addr, i, offset;
vm_page_t m;
+ vm_size_t asize;
int pflags;
vm_prot_t prot;
object = kernel_object;
- size = round_page(size);
+ asize = round_page(size);
vmem = vm_dom[domain].vmd_kernel_arena;
- if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
+ if (vmem_alloc(vmem, asize, M_BESTFIT | flags, &addr))
return (0);
offset = addr - VM_MIN_KERNEL_ADDRESS;
pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW;
VM_OBJECT_WLOCK(object);
- for (i = 0; i < size; i += PAGE_SIZE) {
+ for (i = 0; i < asize; i += PAGE_SIZE) {
m = kmem_alloc_contig_pages(object, atop(offset + i),
domain, pflags, 1, low, high, PAGE_SIZE, 0, memattr);
if (m == NULL) {
VM_OBJECT_WUNLOCK(object);
kmem_unback(object, addr, i);
- vmem_free(vmem, addr, size);
+ vmem_free(vmem, addr, asize);
return (0);
}
KASSERT(vm_page_domain(m) == domain,
@@ -246,6 +248,7 @@ kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
prot | PMAP_ENTER_WIRED, 0);
}
VM_OBJECT_WUNLOCK(object);
+ kasan_mark((void *)addr, size, asize, KASAN_KMEM_REDZONE);
return (addr);
}
@@ -294,23 +297,24 @@ kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
vm_object_t object;
vm_offset_t addr, offset, tmp;
vm_page_t end_m, m;
+ vm_size_t asize;
u_long npages;
int pflags;
object = kernel_object;
- size = round_page(size);
+ asize = round_page(size);
vmem = vm_dom[domain].vmd_kernel_arena;
- if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
+ if (vmem_alloc(vmem, asize, flags | M_BESTFIT, &addr))
return (0);
offset = addr - VM_MIN_KERNEL_ADDRESS;
pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
- npages = atop(size);
+ npages = atop(asize);
VM_OBJECT_WLOCK(object);
m = kmem_alloc_contig_pages(object, atop(offset), domain,
pflags, npages, low, high, alignment, boundary, memattr);
if (m == NULL) {
VM_OBJECT_WUNLOCK(object);
- vmem_free(vmem, addr, size);
+ vmem_free(vmem, addr, asize);
return (0);
}
KASSERT(vm_page_domain(m) == domain,
@@ -327,6 +331,7 @@ kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
tmp += PAGE_SIZE;
}
VM_OBJECT_WUNLOCK(object);
+ kasan_mark((void *)addr, size, asize, KASAN_KMEM_REDZONE);
return (addr);
}
@@ -402,21 +407,23 @@ kmem_malloc_domain(int domain, vm_size_t size, int flags)
{
vmem_t *arena;
vm_offset_t addr;
+ vm_size_t asize;
int rv;
if (__predict_true((flags & M_EXEC) == 0))
arena = vm_dom[domain].vmd_kernel_arena;
else
arena = vm_dom[domain].vmd_kernel_rwx_arena;
- size = round_page(size);
- if (vmem_alloc(arena, size, flags | M_BESTFIT, &addr))
+ asize = round_page(size);
+ if (vmem_alloc(arena, asize, flags | M_BESTFIT, &addr))
return (0);
- rv = kmem_back_domain(domain, kernel_object, addr, size, flags);
+ rv = kmem_back_domain(domain, kernel_object, addr, asize, flags);
if (rv != KERN_SUCCESS) {
- vmem_free(arena, addr, size);
+ vmem_free(arena, addr, asize);
return (0);
}
+ kasan_mark((void *)addr, size, asize, KASAN_KMEM_REDZONE);
return (addr);
}
@@ -608,6 +615,7 @@ kmem_free(vm_offset_t addr, vm_size_t size)
struct vmem *arena;
size = round_page(size);
+ kasan_mark((void *)addr, size, size, 0);
arena = _kmem_unback(kernel_object, addr, size);
if (arena != NULL)
vmem_free(arena, addr, size);