aboutsummaryrefslogtreecommitdiff
path: root/sys/vm/vm_map.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm/vm_map.c')
-rw-r--r--sys/vm/vm_map.c107
1 files changed, 73 insertions, 34 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 3111dda6e99d..3d82f0835c09 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -172,7 +172,7 @@ static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
start = end; \
}
-#ifndef UMA_MD_SMALL_ALLOC
+#ifndef UMA_USE_DMAP
/*
* Allocate a new slab for kernel map entries. The kernel map may be locked or
@@ -264,7 +264,7 @@ vm_map_startup(void)
kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
UMA_ZONE_VM | UMA_ZONE_NOBUCKET);
-#ifndef UMA_MD_SMALL_ALLOC
+#ifndef UMA_USE_DMAP
/* Reserve an extra map entry for use when replenishing the reserve. */
uma_zone_reserve(kmapentzone, KMAPENT_RESERVE + 1);
uma_prealloc(kmapentzone, KMAPENT_RESERVE + 1);
@@ -660,7 +660,7 @@ _vm_map_unlock(vm_map_t map, const char *file, int line)
VM_MAP_UNLOCK_CONSISTENT(map);
if (map->system_map) {
-#ifndef UMA_MD_SMALL_ALLOC
+#ifndef UMA_USE_DMAP
if (map == kernel_map && (map->flags & MAP_REPLENISH) != 0) {
uma_prealloc(kmapentzone, 1);
map->flags &= ~MAP_REPLENISH;
@@ -937,7 +937,7 @@ vm_map_entry_create(vm_map_t map)
{
vm_map_entry_t new_entry;
-#ifndef UMA_MD_SMALL_ALLOC
+#ifndef UMA_USE_DMAP
if (map == kernel_map) {
VM_MAP_ASSERT_LOCKED(map);
@@ -1993,8 +1993,15 @@ out:
return (result);
}
+#if VM_NRESERVLEVEL <= 1
static const int aslr_pages_rnd_64[2] = {0x1000, 0x10};
static const int aslr_pages_rnd_32[2] = {0x100, 0x4};
+#elif VM_NRESERVLEVEL == 2
+static const int aslr_pages_rnd_64[3] = {0x1000, 0x1000, 0x10};
+static const int aslr_pages_rnd_32[3] = {0x100, 0x100, 0x4};
+#else
+#error "Unsupported VM_NRESERVLEVEL"
+#endif
static int cluster_anon = 1;
SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW,
@@ -2110,9 +2117,24 @@ vm_map_find_aligned(vm_map_t map, vm_offset_t *addr, vm_size_t length,
*/
int
vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
- vm_offset_t *addr, /* IN/OUT */
- vm_size_t length, vm_offset_t max_addr, int find_space,
- vm_prot_t prot, vm_prot_t max, int cow)
+ vm_offset_t *addr, /* IN/OUT */
+ vm_size_t length, vm_offset_t max_addr, int find_space,
+ vm_prot_t prot, vm_prot_t max, int cow)
+{
+ int rv;
+
+ vm_map_lock(map);
+ rv = vm_map_find_locked(map, object, offset, addr, length, max_addr,
+ find_space, prot, max, cow);
+ vm_map_unlock(map);
+ return (rv);
+}
+
+int
+vm_map_find_locked(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
+ vm_offset_t *addr, /* IN/OUT */
+ vm_size_t length, vm_offset_t max_addr, int find_space,
+ vm_prot_t prot, vm_prot_t max, int cow)
{
vm_offset_t alignment, curr_min_addr, min_addr;
int gap, pidx, rv, try;
@@ -2120,7 +2142,7 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
object == NULL,
- ("vm_map_find: non-NULL backing object for stack"));
+ ("non-NULL backing object for stack"));
MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE &&
(cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0));
if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
@@ -2143,7 +2165,6 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
(map->flags & MAP_ASLR_IGNSTART) != 0)
curr_min_addr = min_addr = vm_map_min(map);
try = 0;
- vm_map_lock(map);
if (cluster) {
curr_min_addr = map->anon_loc;
if (curr_min_addr == 0)
@@ -2190,9 +2211,23 @@ again:
* Find space for allocation, including
* gap needed for later randomization.
*/
- pidx = MAXPAGESIZES > 1 && pagesizes[1] != 0 &&
- (find_space == VMFS_SUPER_SPACE || find_space ==
- VMFS_OPTIMAL_SPACE) ? 1 : 0;
+ pidx = 0;
+#if VM_NRESERVLEVEL > 0
+ if ((find_space == VMFS_SUPER_SPACE ||
+ find_space == VMFS_OPTIMAL_SPACE) &&
+ pagesizes[VM_NRESERVLEVEL] != 0) {
+ /*
+ * Do not pointlessly increase the space that
+ * is requested from vm_map_findspace().
+ * pmap_align_superpage() will only change a
+ * mapping's alignment if that mapping is at
+ * least a superpage in size.
+ */
+ pidx = VM_NRESERVLEVEL;
+ while (pidx > 0 && length < pagesizes[pidx])
+ pidx--;
+ }
+#endif
gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR &&
(max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ?
aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx];
@@ -2214,8 +2249,7 @@ again:
MPASS(try == 1);
goto again;
}
- rv = KERN_NO_SPACE;
- goto done;
+ return (KERN_NO_SPACE);
}
}
@@ -2229,16 +2263,14 @@ again:
try = 0;
goto again;
}
- goto done;
+ return (rv);
}
} else if ((cow & MAP_REMAP) != 0) {
- if (!vm_map_range_valid(map, *addr, *addr + length)) {
- rv = KERN_INVALID_ADDRESS;
- goto done;
- }
+ if (!vm_map_range_valid(map, *addr, *addr + length))
+ return (KERN_INVALID_ADDRESS);
rv = vm_map_delete(map, *addr, *addr + length);
if (rv != KERN_SUCCESS)
- goto done;
+ return (rv);
}
if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot,
@@ -2247,10 +2279,15 @@ again:
rv = vm_map_insert(map, object, offset, *addr, *addr + length,
prot, max, cow);
}
- if (rv == KERN_SUCCESS && update_anon)
- map->anon_loc = *addr + length;
-done:
- vm_map_unlock(map);
+
+ /*
+ * Update the starting address for clustered anonymous memory mappings
+ * if a starting address was not previously defined or an ASLR restart
+ * placed an anonymous memory mapping at a lower address.
+ */
+ if (update_anon && rv == KERN_SUCCESS && (map->anon_loc == 0 ||
+ *addr < map->anon_loc))
+ map->anon_loc = *addr;
return (rv);
}
@@ -2649,6 +2686,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
vm_offset_t start;
vm_page_t p, p_start;
vm_pindex_t mask, psize, threshold, tmpidx;
+ int psind;
if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
return;
@@ -2703,13 +2741,17 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
p_start = p;
}
/* Jump ahead if a superpage mapping is possible. */
- if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
- (pagesizes[p->psind] - 1)) == 0) {
- mask = atop(pagesizes[p->psind]) - 1;
- if (tmpidx + mask < psize &&
- vm_page_ps_test(p, PS_ALL_VALID, NULL)) {
- p += mask;
- threshold += mask;
+ for (psind = p->psind; psind > 0; psind--) {
+ if (((addr + ptoa(tmpidx)) &
+ (pagesizes[psind] - 1)) == 0) {
+ mask = atop(pagesizes[psind]) - 1;
+ if (tmpidx + mask < psize &&
+ vm_page_ps_test(p, psind,
+ PS_ALL_VALID, NULL)) {
+ p += mask;
+ threshold += mask;
+ break;
+ }
}
}
} else if (p_start != NULL) {
@@ -4041,9 +4083,6 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
entry->object.vm_object != NULL)
pmap_map_delete(map->pmap, entry->start, entry->end);
- if (entry->end == map->anon_loc)
- map->anon_loc = entry->start;
-
/*
* Delete the entry only after removing all pmap
* entries pointing to its pages. (Otherwise, its