aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKonstantin Belousov <kib@FreeBSD.org>2020-09-20 22:16:24 +0000
committerKonstantin Belousov <kib@FreeBSD.org>2020-09-20 22:16:24 +0000
commit7149d7209ea20969568edb115ba616866d1828c5 (patch)
treebe1397d45671ab14d1b81523b48dbdeb5476fdeb
parent1440f62266cd056b1520e799c02406020e526c65 (diff)
downloadsrc-7149d7209ea20969568edb115ba616866d1828c5.tar.gz
src-7149d7209ea20969568edb115ba616866d1828c5.zip
amd64 pmap: handle cases where pml4 page table page is not allocated.
Possible in LA57 pmap config. Noted by: alc Reviewed by: alc, markj Sponsored by: The FreeBSD Foundation Differential revision: https://reviews.freebsd.org/D26492
Notes
Notes: svn path=/head/; revision=365931
-rw-r--r--sys/amd64/amd64/pmap.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index f5a129f50d21..2f87b1bac050 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -6219,7 +6219,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
PMAP_LOCK(pmap);
for (; sva < eva; sva = va_next) {
pml4e = pmap_pml4e(pmap, sva);
- if ((*pml4e & PG_V) == 0) {
+ if (pml4e == NULL || (*pml4e & PG_V) == 0) {
va_next = (sva + NBPML4) & ~PML4MASK;
if (va_next < sva)
va_next = eva;
@@ -6502,7 +6502,7 @@ restart:
if (!pmap_pkru_same(pmap, va, va + NBPDP))
return (KERN_PROTECTION_FAILURE);
pml4e = pmap_pml4e(pmap, va);
- if ((*pml4e & PG_V) == 0) {
+ if (pml4e == NULL || (*pml4e & PG_V) == 0) {
mp = _pmap_allocpte(pmap, pmap_pml4e_pindex(va),
NULL, va);
if (mp == NULL) {
@@ -7363,7 +7363,7 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
PMAP_LOCK(pmap);
for (; sva < eva; sva = va_next) {
pml4e = pmap_pml4e(pmap, sva);
- if ((*pml4e & PG_V) == 0) {
+ if (pml4e == NULL || (*pml4e & PG_V) == 0) {
va_next = (sva + NBPML4) & ~PML4MASK;
if (va_next < sva)
va_next = eva;
@@ -7488,7 +7488,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
("pmap_copy: invalid to pmap_copy page tables"));
pml4e = pmap_pml4e(src_pmap, addr);
- if ((*pml4e & PG_V) == 0) {
+ if (pml4e == NULL || (*pml4e & PG_V) == 0) {
va_next = (addr + NBPML4) & ~PML4MASK;
if (va_next < addr)
va_next = end_addr;
@@ -8571,7 +8571,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
PMAP_LOCK(pmap);
for (; sva < eva; sva = va_next) {
pml4e = pmap_pml4e(pmap, sva);
- if ((*pml4e & PG_V) == 0) {
+ if (pml4e == NULL || (*pml4e & PG_V) == 0) {
va_next = (sva + NBPML4) & ~PML4MASK;
if (va_next < sva)
va_next = eva;
@@ -9795,6 +9795,8 @@ pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num)
PMAP_LOCK(pmap);
pml4 = pmap_pml4e(pmap, va);
+ if (pml4 == NULL)
+ goto done;
ptr[idx++] = *pml4;
if ((*pml4 & PG_V) == 0)
goto done;
@@ -10893,7 +10895,7 @@ pmap_pkru_update_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
for (changed = false, va = sva; va < eva; va = va_next) {
pml4e = pmap_pml4e(pmap, va);
- if ((*pml4e & X86_PG_V) == 0) {
+ if (pml4e == NULL || (*pml4e & X86_PG_V) == 0) {
va_next = (va + NBPML4) & ~PML4MASK;
if (va_next < va)
va_next = eva;