aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKonstantin Belousov <kib@FreeBSD.org>2021-01-08 22:40:04 +0000
committerKonstantin Belousov <kib@FreeBSD.org>2021-01-11 23:15:43 +0000
commit2e1c94aa1fd582fb8ae0522f0827be719ff5fb67 (patch)
tree5f6306807478227e917e769fea9e6ba4ac4b0962
parent2c52512caf6ec10f49038e6884b9c2aea905cc4e (diff)
downloadsrc-2e1c94aa1fd5.tar.gz
src-2e1c94aa1fd5.zip
Implement enforcing write XOR execute mapping policy.
It is checked in vm_map_insert() and vm_map_protect() that PROT_WRITE | PROT_EXEC are never specified together, if vm_map has MAP_WX flag set. FreeBSD control flag allows specific binary to request WX exempt, and there are per ABI boolean sysctls kern.elf{32,64}.allow_wx to enable/ disable globally. Reviewed by: emaste, jhb Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D28050
-rw-r--r--sys/kern/imgact_elf.c8
-rw-r--r--sys/kern/kern_exec.c6
-rw-r--r--sys/vm/vm_map.c11
-rw-r--r--sys/vm/vm_map.h1
4 files changed, 23 insertions, 3 deletions
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index 4f5d5a9a0736..9ab95a63a67b 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -190,6 +190,11 @@ SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock,
CTLFLAG_RWTUN, &__elfN(sigfastblock), 0,
"enable sigfastblock for new processes");
+static bool __elfN(allow_wx) = true;
+SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx,
+ CTLFLAG_RWTUN, &__elfN(allow_wx), 0,
+ "Allow pages to be mapped simultaneously writable and executable");
+
static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
#define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a))
@@ -1237,6 +1242,9 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
imgp->map_flags |= MAP_ASLR_IGNSTART;
}
+ if (!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0)
+ imgp->map_flags |= MAP_WXORX;
+
error = exec_new_vmspace(imgp, sv);
vmspace = imgp->proc->p_vmspace;
map = &vmspace->vm_map;
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 6f7c01470804..13753177127f 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -1074,12 +1074,12 @@ exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv)
pmap_remove_pages(vmspace_pmap(vmspace));
vm_map_remove(map, vm_map_min(map), vm_map_max(map));
/*
- * An exec terminates mlockall(MCL_FUTURE), ASLR state
- * must be re-evaluated.
+ * An exec terminates mlockall(MCL_FUTURE).
+ * ASLR and W^X states must be re-evaluated.
*/
vm_map_lock(map);
vm_map_modflags(map, 0, MAP_WIREFUTURE | MAP_ASLR |
- MAP_ASLR_IGNSTART);
+ MAP_ASLR_IGNSTART | MAP_WXORX);
vm_map_unlock(map);
} else {
error = vmspace_exec(p, sv_minuser, sv->sv_maxuser);
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 5925ae8e96ad..bea776480391 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -1671,6 +1671,10 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
if (start == end || !vm_map_range_valid(map, start, end))
return (KERN_INVALID_ADDRESS);
+ if ((map->flags & MAP_WXORX) != 0 && (prot & (VM_PROT_WRITE |
+ VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE))
+ return (KERN_PROTECTION_FAILURE);
+
/*
* Find the entry prior to the proposed starting address; if it's part
* of an existing entry, this range is bogus.
@@ -2751,6 +2755,13 @@ again:
in_tran = NULL;
vm_map_lock(map);
+ if ((map->flags & MAP_WXORX) != 0 && (new_prot &
+ (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE |
+ VM_PROT_EXECUTE)) {
+ vm_map_unlock(map);
+ return (KERN_PROTECTION_FAILURE);
+ }
+
/*
* Ensure that we are not concurrently wiring pages. vm_map_wire() may
* need to fault pages into the map and will drop the map lock while
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 349bb4815762..44f99a31f3d9 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -228,6 +228,7 @@ struct vm_map {
#define MAP_ASLR 0x08 /* enabled ASLR */
#define MAP_ASLR_IGNSTART 0x10
#define MAP_REPLENISH 0x20
+#define MAP_WXORX 0x40 /* enforce W^X */
#ifdef _KERNEL
#if defined(KLD_MODULE) && !defined(KLD_TIED)