aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLeandro Lupori <luporl@FreeBSD.org>2019-06-07 17:58:59 +0000
committerLeandro Lupori <luporl@FreeBSD.org>2019-06-07 17:58:59 +0000
commitb934fc74683bd28c5c24175c2b3f4603b2d5fa1f (patch)
treeb3cbf3d659263ce01f812e942dd3a90231f97582
parent007d50155fb6b636b62051a36ec5915ca87a4139 (diff)
downloadsrc-b934fc74683b.tar.gz
src-b934fc74683b.zip
[PPC64] Support QEMU/KVM pseries without hugepages
This set of changes make it possible to run FreeBSD for PowerPC64/pseries, under QEMU/KVM, without requiring the host to make hugepages available to the guest. While there was already this possibility, by means of setting hw_direct_map to 0, on PowerPC64 there were a couple of issues/wrong assumptions that prevented this from working, before this changelist. Reviewed by: jhibbits Differential Revision: https://reviews.freebsd.org/D20522
Notes
Notes: svn path=/head/; revision=348783
-rw-r--r--sys/powerpc/aim/mmu_oea64.c26
-rw-r--r--sys/powerpc/aim/mmu_oea64.h1
-rw-r--r--sys/powerpc/aim/slb.c8
-rw-r--r--sys/powerpc/pseries/mmu_phyp.c47
4 files changed, 64 insertions, 18 deletions
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index ae285b72e80e..90dedc7518cf 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -638,7 +638,7 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
{
struct pvo_entry *pvo;
register_t msr;
- vm_paddr_t pa;
+ vm_paddr_t pa, pkernelstart, pkernelend;
vm_offset_t size, off;
uint64_t pte_lo;
int i;
@@ -686,9 +686,11 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
*/
if (!hw_direct_map || kernelstart < DMAP_BASE_ADDRESS) {
- for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
+ pkernelstart = kernelstart & ~DMAP_BASE_ADDRESS;
+ pkernelend = kernelend & ~DMAP_BASE_ADDRESS;
+ for (pa = pkernelstart & ~PAGE_MASK; pa < pkernelend;
pa += PAGE_SIZE)
- moea64_kenter(mmup, pa, pa);
+ moea64_kenter(mmup, pa | DMAP_BASE_ADDRESS, pa);
}
if (!hw_direct_map) {
@@ -696,6 +698,10 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
off = (vm_offset_t)(moea64_bpvo_pool);
for (pa = off; pa < off + size; pa += PAGE_SIZE)
moea64_kenter(mmup, pa, pa);
+
+ /* Map exception vectors */
+ for (pa = EXC_RSVD; pa < EXC_LAST; pa += PAGE_SIZE)
+ moea64_kenter(mmup, pa | DMAP_BASE_ADDRESS, pa);
}
ENABLE_TRANS(msr);
@@ -875,7 +881,7 @@ moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
* Initialise the bootstrap pvo pool.
*/
moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
- moea64_bpvo_pool_size*sizeof(struct pvo_entry), 0);
+ moea64_bpvo_pool_size*sizeof(struct pvo_entry), PAGE_SIZE);
moea64_bpvo_pool_index = 0;
/* Place at address usable through the direct map */
@@ -1169,15 +1175,19 @@ moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
*/
static __inline
-void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa) {
+void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa)
+{
+ struct pvo_entry *pvo;
KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
- moea64_scratchpage_pvo[which]->pvo_pte.pa =
+ pvo = moea64_scratchpage_pvo[which];
+ PMAP_LOCK(pvo->pvo_pmap);
+ pvo->pvo_pte.pa =
moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
- MOEA64_PTE_REPLACE(mmup, moea64_scratchpage_pvo[which],
- MOEA64_PTE_INVALIDATE);
+ MOEA64_PTE_REPLACE(mmup, pvo, MOEA64_PTE_INVALIDATE);
+ PMAP_UNLOCK(pvo->pvo_pmap);
isync();
}
diff --git a/sys/powerpc/aim/mmu_oea64.h b/sys/powerpc/aim/mmu_oea64.h
index 12ed3bd1e4b5..d791c0fbba0a 100644
--- a/sys/powerpc/aim/mmu_oea64.h
+++ b/sys/powerpc/aim/mmu_oea64.h
@@ -81,6 +81,7 @@ extern u_int moea64_pte_overflow;
extern int moea64_large_page_shift;
extern uint64_t moea64_large_page_size;
+extern uint64_t moea64_large_page_mask;
extern u_long moea64_pteg_count;
extern u_long moea64_pteg_mask;
extern int n_slbs;
diff --git a/sys/powerpc/aim/slb.c b/sys/powerpc/aim/slb.c
index 0f6e356a9e3d..8dcaf10cc611 100644
--- a/sys/powerpc/aim/slb.c
+++ b/sys/powerpc/aim/slb.c
@@ -500,10 +500,12 @@ slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
if (m == NULL)
return (NULL);
- va = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
-
- if (!hw_direct_map)
+ if (hw_direct_map)
+ va = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
+ else {
+ va = (void *)(VM_PAGE_TO_PHYS(m) | DMAP_BASE_ADDRESS);
pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
+ }
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
bzero(va, PAGE_SIZE);
diff --git a/sys/powerpc/pseries/mmu_phyp.c b/sys/powerpc/pseries/mmu_phyp.c
index cabefedf1fa7..ab78df15defc 100644
--- a/sys/powerpc/pseries/mmu_phyp.c
+++ b/sys/powerpc/pseries/mmu_phyp.c
@@ -59,6 +59,16 @@ __FBSDID("$FreeBSD$");
#include "phyp-hvcall.h"
+#define MMU_PHYP_DEBUG 0
+#define MMU_PHYP_ID "mmu_phyp: "
+#if MMU_PHYP_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ## __VA_ARGS__)
+#define dprintf0(fmt, ...) dprintf(MMU_PHYP_ID fmt, ## __VA_ARGS__)
+#else
+#define dprintf(fmt, args...) do { ; } while(0)
+#define dprintf0(fmt, args...) do { ; } while(0)
+#endif
+
static struct rmlock mphyp_eviction_lock;
/*
@@ -149,6 +159,7 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
res = OF_getencprop(node, "ibm,slb-size", prop, sizeof(prop[0]));
if (res > 0)
n_slbs = prop[0];
+ dprintf0("slb-size=%i\n", n_slbs);
moea64_pteg_count = final_pteg_count / sizeof(struct lpteg);
@@ -185,11 +196,22 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
shift = arr[idx];
slb_encoding = arr[idx + 1];
nptlp = arr[idx + 2];
+
+ dprintf0("Segment Page Size: "
+ "%uKB, slb_enc=0x%X: {size, encoding}[%u] =",
+ shift > 10? 1 << (shift-10) : 0,
+ slb_encoding, nptlp);
+
idx += 3;
len -= 3;
while (len > 0 && nptlp) {
lp_size = arr[idx];
lp_encoding = arr[idx+1];
+
+ dprintf(" {%uKB, 0x%X}",
+ lp_size > 10? 1 << (lp_size-10) : 0,
+ lp_encoding);
+
if (slb_encoding == SLBV_L && lp_encoding == 0)
break;
@@ -197,17 +219,28 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
len -= 2;
nptlp--;
}
+ dprintf("\n");
if (nptlp && slb_encoding == SLBV_L && lp_encoding == 0)
break;
}
- if (len == 0)
- panic("Standard large pages (SLB[L] = 1, PTE[LP] = 0) "
- "not supported by this system. Please enable huge "
- "page backing if running under PowerKVM.");
-
- moea64_large_page_shift = shift;
- moea64_large_page_size = 1ULL << lp_size;
+ if (len > 0) {
+ moea64_large_page_shift = shift;
+ moea64_large_page_size = 1ULL << lp_size;
+ moea64_large_page_mask = moea64_large_page_size - 1;
+ hw_direct_map = 1;
+ printf(MMU_PHYP_ID
+ "Support for hugepages of %uKB detected\n",
+ moea64_large_page_shift > 10?
+ 1 << (moea64_large_page_shift-10) : 0);
+ } else {
+ moea64_large_page_size = 0;
+ moea64_large_page_shift = 0;
+ moea64_large_page_mask = 0;
+ hw_direct_map = 0;
+ printf(MMU_PHYP_ID
+ "Support for hugepages not found\n");
+ }
}
moea64_mid_bootstrap(mmup, kernelstart, kernelend);