aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKonstantin Belousov <kib@FreeBSD.org>2021-01-12 12:43:39 +0000
committerKonstantin Belousov <kib@FreeBSD.org>2021-01-12 23:35:22 +0000
commit0659df6faddfb27ba54a2cae2a12552cf4f823a0 (patch)
treec4f516ac92240eb7ed5957b6a4ecd5ecb461e4ed
parentd89e1db5a3319f4e3bc9403ed883c64638b67f71 (diff)
downloadsrc-0659df6faddfb27ba54a2cae2a12552cf4f823a0.tar.gz
src-0659df6faddfb27ba54a2cae2a12552cf4f823a0.zip
vm_map_protect: allow to set prot and max_prot in one go.
This prevents a situation where other thread modifies map entries permissions between setting max_prot, then relocking, then setting prot, confusing the operation outcome. E.g. you can get an error that is not possible if operation is performed atomic. Also enable setting rwx for max_prot even if map does not allow to set effective rwx protection. Reviewed by: brooks, markj (previous version) Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D28117
-rw-r--r--sys/i386/linux/imgact_linux.c6
-rw-r--r--sys/kern/imgact_elf.c2
-rw-r--r--sys/kern/kern_resource.c3
-rw-r--r--sys/kern/link_elf.c2
-rw-r--r--sys/kern/link_elf_obj.c3
-rw-r--r--sys/vm/vm_map.c45
-rw-r--r--sys/vm/vm_map.h7
-rw-r--r--sys/vm/vm_mmap.c18
8 files changed, 53 insertions, 33 deletions
diff --git a/sys/i386/linux/imgact_linux.c b/sys/i386/linux/imgact_linux.c
index dad18b37aa40..661620b6ceaf 100644
--- a/sys/i386/linux/imgact_linux.c
+++ b/sys/i386/linux/imgact_linux.c
@@ -158,7 +158,8 @@ exec_linux_imgact(struct image_params *imgp)
* remove write enable on the 'text' part
*/
error = vm_map_protect(&vmspace->vm_map, vmaddr,
- vmaddr + a_out->a_text, VM_PROT_EXECUTE|VM_PROT_READ, TRUE);
+ vmaddr + a_out->a_text, 0, VM_PROT_EXECUTE | VM_PROT_READ,
+ VM_MAP_PROTECT_SET_MAXPROT);
if (error)
goto fail;
} else {
@@ -185,7 +186,8 @@ exec_linux_imgact(struct image_params *imgp)
* allow read/write of data
*/
error = vm_map_protect(&vmspace->vm_map, vmaddr + a_out->a_text,
- vmaddr + a_out->a_text + a_out->a_data, VM_PROT_ALL, FALSE);
+ vmaddr + a_out->a_text + a_out->a_data, VM_PROT_ALL, 0,
+ VM_MAP_PROTECT_SET_PROT);
if (error)
goto fail;
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index 9ab95a63a67b..dae11ab92a6c 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -692,7 +692,7 @@ __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset,
*/
if ((prot & VM_PROT_WRITE) == 0)
vm_map_protect(map, trunc_page(map_addr), round_page(map_addr +
- map_len), prot, FALSE);
+ map_len), prot, 0, VM_MAP_PROTECT_SET_PROT);
return (0);
}
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index 036cb0ccb945..e14be34aa6e0 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -770,7 +770,8 @@ kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which,
addr = trunc_page(addr);
size = round_page(size);
(void)vm_map_protect(&p->p_vmspace->vm_map,
- addr, addr + size, prot, FALSE);
+ addr, addr + size, prot, 0,
+ VM_MAP_PROTECT_SET_PROT);
}
}
diff --git a/sys/kern/link_elf.c b/sys/kern/link_elf.c
index d9b5f9437b57..ea21bf447a55 100644
--- a/sys/kern/link_elf.c
+++ b/sys/kern/link_elf.c
@@ -1224,7 +1224,7 @@ link_elf_load_file(linker_class_t cls, const char* filename,
error = vm_map_protect(kernel_map,
(vm_offset_t)segbase,
(vm_offset_t)segbase + round_page(segs[i]->p_memsz),
- prot, FALSE);
+ prot, 0, VM_MAP_PROTECT_SET_PROT);
if (error != KERN_SUCCESS) {
error = ENOMEM;
goto out;
diff --git a/sys/kern/link_elf_obj.c b/sys/kern/link_elf_obj.c
index 63ed9bf61fc3..6b5a6df0a56f 100644
--- a/sys/kern/link_elf_obj.c
+++ b/sys/kern/link_elf_obj.c
@@ -219,7 +219,8 @@ link_elf_protect_range(elf_file_t ef, vm_offset_t start, vm_offset_t end,
#endif
return;
}
- error = vm_map_protect(kernel_map, start, end, prot, FALSE);
+ error = vm_map_protect(kernel_map, start, end, prot, 0,
+ VM_MAP_PROTECT_SET_PROT);
KASSERT(error == KERN_SUCCESS,
("link_elf_protect_range: vm_map_protect() returned %d", error));
}
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 9746d07713d3..4bd0b245a881 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -2733,14 +2733,12 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
/*
* vm_map_protect:
*
- * Sets the protection of the specified address
- * region in the target map. If "set_max" is
- * specified, the maximum protection is to be set;
- * otherwise, only the current protection is affected.
+ * Sets the protection and/or the maximum protection of the
+ * specified address region in the target map.
*/
int
vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
- vm_prot_t new_prot, boolean_t set_max)
+ vm_prot_t new_prot, vm_prot_t new_maxprot, int flags)
{
vm_map_entry_t entry, first_entry, in_tran, prev_entry;
vm_object_t obj;
@@ -2751,12 +2749,18 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
if (start == end)
return (KERN_SUCCESS);
+ if ((flags & (VM_MAP_PROTECT_SET_PROT | VM_MAP_PROTECT_SET_MAXPROT)) ==
+ (VM_MAP_PROTECT_SET_PROT | VM_MAP_PROTECT_SET_MAXPROT) &&
+ (new_prot & new_maxprot) != new_prot)
+ return (KERN_OUT_OF_BOUNDS);
+
again:
in_tran = NULL;
vm_map_lock(map);
- if ((map->flags & MAP_WXORX) != 0 && (new_prot &
- (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE |
+ if ((map->flags & MAP_WXORX) != 0 &&
+ (flags & VM_MAP_PROTECT_SET_PROT) != 0 &&
+ (new_prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE |
VM_PROT_EXECUTE)) {
vm_map_unlock(map);
return (KERN_PROTECTION_FAILURE);
@@ -2786,7 +2790,12 @@ again:
vm_map_unlock(map);
return (KERN_INVALID_ARGUMENT);
}
- if ((new_prot & entry->max_protection) != new_prot) {
+ if ((flags & VM_MAP_PROTECT_SET_PROT) == 0)
+ new_prot = entry->protection;
+ if ((flags & VM_MAP_PROTECT_SET_MAXPROT) == 0)
+ new_maxprot = entry->max_protection;
+ if ((new_prot & entry->max_protection) != new_prot ||
+ (new_maxprot & entry->max_protection) != new_maxprot) {
vm_map_unlock(map);
return (KERN_PROTECTION_FAILURE);
}
@@ -2827,12 +2836,16 @@ again:
return (rv);
}
- if (set_max ||
+ if ((flags & VM_MAP_PROTECT_SET_PROT) == 0)
+ new_prot = entry->protection;
+ if ((flags & VM_MAP_PROTECT_SET_MAXPROT) == 0)
+ new_maxprot = entry->max_protection;
+
+ if ((flags & VM_MAP_PROTECT_SET_PROT) == 0 ||
((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 ||
ENTRY_CHARGED(entry) ||
- (entry->eflags & MAP_ENTRY_GUARD) != 0) {
+ (entry->eflags & MAP_ENTRY_GUARD) != 0)
continue;
- }
cred = curthread->td_ucred;
obj = entry->object.vm_object;
@@ -2893,11 +2906,11 @@ again:
old_prot = entry->protection;
- if (set_max)
- entry->protection =
- (entry->max_protection = new_prot) &
- old_prot;
- else
+ if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) {
+ entry->max_protection = new_maxprot;
+ entry->protection = new_maxprot & old_prot;
+ }
+ if ((flags & VM_MAP_PROTECT_SET_PROT) != 0)
entry->protection = new_prot;
/*
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 44f99a31f3d9..ace205b21b42 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -510,7 +510,12 @@ vm_map_entry_succ(vm_map_entry_t entry)
for ((it) = vm_map_entry_first(map); \
(it) != &(map)->header; \
(it) = vm_map_entry_succ(it))
-int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);
+
+#define VM_MAP_PROTECT_SET_PROT 0x0001
+#define VM_MAP_PROTECT_SET_MAXPROT 0x0002
+
+int vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
+ vm_prot_t new_prot, vm_prot_t new_maxprot, int flags);
int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
void vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev,
vm_map_entry_t entry);
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 7888ff15e36c..30c485010ac8 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -653,6 +653,7 @@ kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot)
vm_offset_t addr;
vm_size_t pageoff;
int vm_error, max_prot;
+ int flags;
addr = addr0;
if ((prot & ~(_PROT_ALL | PROT_MAX(_PROT_ALL))) != 0)
@@ -672,16 +673,11 @@ kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot)
if (addr + size < addr)
return (EINVAL);
- vm_error = KERN_SUCCESS;
- if (max_prot != 0) {
- if ((max_prot & prot) != prot)
- return (ENOTSUP);
- vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map,
- addr, addr + size, max_prot, TRUE);
- }
- if (vm_error == KERN_SUCCESS)
- vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map,
- addr, addr + size, prot, FALSE);
+ flags = VM_MAP_PROTECT_SET_PROT;
+ if (max_prot != 0)
+ flags |= VM_MAP_PROTECT_SET_MAXPROT;
+ vm_error = vm_map_protect(&td->td_proc->p_vmspace->vm_map,
+ addr, addr + size, prot, max_prot, flags);
switch (vm_error) {
case KERN_SUCCESS:
@@ -690,6 +686,8 @@ kern_mprotect(struct thread *td, uintptr_t addr0, size_t size, int prot)
return (EACCES);
case KERN_RESOURCE_SHORTAGE:
return (ENOMEM);
+ case KERN_OUT_OF_BOUNDS:
+ return (ENOTSUP);
}
return (EINVAL);
}