aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKonstantin Belousov <kib@FreeBSD.org>2023-07-19 11:05:32 +0000
committerKonstantin Belousov <kib@FreeBSD.org>2023-07-20 14:11:42 +0000
commit21e45c30c35c9aa732073f725924caf581c93460 (patch)
tree855618804d7e989fad94f1613c0a10bfd7946335
parenta52f23f4c49e4766fb9eb0bf460cc83c5f63f17d (diff)
downloadsrc-21e45c30c35c9aa732073f72.tar.gz
src-21e45c30c35c9aa732073f72.zip
mmap(MAP_STACK): on stack grow, use original protection
If mprotect(2) changed protection in the bottom of the currently grown stack region, currently the changed protection would be used for the stack grow on next fault. This is arguably unexpected. Store the original protection for the entry at mmap(2) time in the offset member of the gap vm_map_entry, and use it for protection of the grown stack region. PR: 272585 Reported by: John F. Carr <jfc@mit.edu> Reviewed by: alc, markj Sponsored by: The FreeBSD Foundation MFC after: 1 week Differential revision: https://reviews.freebsd.org/D41089
-rw-r--r--sys/vm/vm_map.c24
-rw-r--r--sys/vm/vm_map.h4
2 files changed, 20 insertions, 8 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index a02107b5e64d..997a49111a59 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -4493,7 +4493,7 @@ static int
vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
{
- vm_map_entry_t new_entry, prev_entry;
+ vm_map_entry_t gap_entry, new_entry, prev_entry;
vm_offset_t bot, gap_bot, gap_top, top;
vm_size_t init_ssize, sgp;
int orient, rv;
@@ -4575,11 +4575,14 @@ vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
* read-ahead logic is never used for it. Re-use
* next_read of the gap entry to store
* stack_guard_page for vm_map_growstack().
+ * Similarly, since a gap cannot have a backing object,
+ * store the original stack protections in the
+ * object offset.
*/
- if (orient == MAP_STACK_GROWS_DOWN)
- vm_map_entry_pred(new_entry)->next_read = sgp;
- else
- vm_map_entry_succ(new_entry)->next_read = sgp;
+ gap_entry = orient == MAP_STACK_GROWS_DOWN ?
+ vm_map_entry_pred(new_entry) : vm_map_entry_succ(new_entry);
+ gap_entry->next_read = sgp;
+ gap_entry->offset = prot;
} else {
(void)vm_map_delete(map, bot, top);
}
@@ -4599,6 +4602,7 @@ vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
struct ucred *cred;
vm_offset_t gap_end, gap_start, grow_start;
vm_size_t grow_amount, guard, max_grow;
+ vm_prot_t prot;
rlim_t lmemlim, stacklim, vmemlim;
int rv, rv1 __diagused;
bool gap_deleted, grow_down, is_procstack;
@@ -4739,6 +4743,12 @@ retry:
}
if (grow_down) {
+ /*
+ * The gap_entry "offset" field is overloaded. See
+ * vm_map_stack_locked().
+ */
+ prot = gap_entry->offset;
+
grow_start = gap_entry->end - grow_amount;
if (gap_entry->start + grow_amount == gap_entry->end) {
gap_start = gap_entry->start;
@@ -4751,9 +4761,7 @@ retry:
gap_deleted = false;
}
rv = vm_map_insert(map, NULL, 0, grow_start,
- grow_start + grow_amount,
- stack_entry->protection, stack_entry->max_protection,
- MAP_STACK_GROWS_DOWN);
+ grow_start + grow_amount, prot, prot, MAP_STACK_GROWS_DOWN);
if (rv != KERN_SUCCESS) {
if (gap_deleted) {
rv1 = vm_map_insert(map, NULL, 0, gap_start,
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index fd8b606e8ddc..c4ed36ce57ba 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -97,6 +97,10 @@ union vm_map_object {
* a VM object (or sharing map) and offset into that object,
* and user-exported inheritance and protection information.
* Also included is control information for virtual copy operations.
+ *
+ * For stack gap map entries (MAP_ENTRY_GUARD | MAP_ENTRY_GROWS_DOWN
+ * or UP), the next_read member is reused as the stack_guard_page
+ * storage, and offset is the stack protection.
*/
struct vm_map_entry {
struct vm_map_entry *left; /* left child or previous entry */