aboutsummaryrefslogtreecommitdiff
path: root/sys/powerpc
diff options
context:
space:
mode:
authorKonstantin Belousov <kib@FreeBSD.org>2013-03-14 20:18:12 +0000
committerKonstantin Belousov <kib@FreeBSD.org>2013-03-14 20:18:12 +0000
commite8a4a618cf321ba28407942ee8bbc2f7c9164022 (patch)
tree7fc0d10a416efdb974f78de808274b7ebcd9475a /sys/powerpc
parentd44ae92a6925c8124257b38064979fe238229a84 (diff)
downloadsrc-e8a4a618cf321ba28407942ee8bbc2f7c9164022.tar.gz
src-e8a4a618cf321ba28407942ee8bbc2f7c9164022.zip
Add pmap function pmap_copy_pages(), which copies the content of the
pages around, taking array of vm_page_t both for source and destination. Starting offsets and total transfer size are specified. The function implements optimal algorithm for copying using the platform-specific optimizations. For instance, on the architectures were the direct map is available, no transient mappings are created, for i386 the per-cpu ephemeral page frame is used. The code was typically borrowed from the pmap_copy_page() for the same architecture. Only i386/amd64, powerpc aim and arm/arm-v6 implementations were tested at the time of commit. High-level code, not committed yet to the tree, ensures that the use of the function is only allowed after explicit enablement. For sparc64, the existing code has known issues and a stab is added instead, to allow the kernel linking. Sponsored by: The FreeBSD Foundation Tested by: pho (i386, amd64), scottl (amd64), ian (arm and arm-v6) MFC after: 2 weeks
Notes
Notes: svn path=/head/; revision=248280
Diffstat (limited to 'sys/powerpc')
-rw-r--r--sys/powerpc/aim/mmu_oea.c27
-rw-r--r--sys/powerpc/aim/mmu_oea64.c69
-rw-r--r--sys/powerpc/booke/pmap.c33
-rw-r--r--sys/powerpc/powerpc/mmu_if.m8
-rw-r--r--sys/powerpc/powerpc/pmap_dispatch.c10
5 files changed, 147 insertions, 0 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 6a339ca217a5..ddd22ae64a33 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -275,6 +275,8 @@ void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
void moea_clear_modify(mmu_t, vm_page_t);
void moea_clear_reference(mmu_t, vm_page_t);
void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
+void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+ vm_page_t *mb, vm_offset_t b_offset, int xfersize);
void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
vm_prot_t);
@@ -320,6 +322,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_clear_modify, moea_clear_modify),
MMUMETHOD(mmu_clear_reference, moea_clear_reference),
MMUMETHOD(mmu_copy_page, moea_copy_page),
+ MMUMETHOD(mmu_copy_pages, moea_copy_pages),
MMUMETHOD(mmu_enter, moea_enter),
MMUMETHOD(mmu_enter_object, moea_enter_object),
MMUMETHOD(mmu_enter_quick, moea_enter_quick),
@@ -1043,6 +1046,30 @@ moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
bcopy((void *)src, (void *)dst, PAGE_SIZE);
}
+void
+moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+ vm_page_t *mb, vm_offset_t b_offset, int xfersize)
+{
+ void *a_cp, *b_cp;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
+ a_pg_offset;
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
+ b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+}
+
/*
* Zero a page of physical memory by temporarily mapping it into the tlb.
*/
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 14b88f0935e6..90466e8a485e 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -290,6 +290,8 @@ void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
void moea64_clear_modify(mmu_t, vm_page_t);
void moea64_clear_reference(mmu_t, vm_page_t);
void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
+void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+ vm_page_t *mb, vm_offset_t b_offset, int xfersize);
void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
vm_prot_t);
@@ -334,6 +336,7 @@ static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
MMUMETHOD(mmu_clear_reference, moea64_clear_reference),
MMUMETHOD(mmu_copy_page, moea64_copy_page),
+ MMUMETHOD(mmu_copy_pages, moea64_copy_pages),
MMUMETHOD(mmu_enter, moea64_enter),
MMUMETHOD(mmu_enter_object, moea64_enter_object),
MMUMETHOD(mmu_enter_quick, moea64_enter_quick),
@@ -1104,6 +1107,72 @@ moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
}
}
+static inline void
+moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+ vm_page_t *mb, vm_offset_t b_offset, int xfersize)
+{
+ void *a_cp, *b_cp;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
+ a_pg_offset;
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
+ b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+}
+
+static inline void
+moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+ vm_page_t *mb, vm_offset_t b_offset, int xfersize)
+{
+ void *a_cp, *b_cp;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+
+ mtx_lock(&moea64_scratchpage_mtx);
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ moea64_set_scratchpage_pa(mmu, 0,
+ VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
+ a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ moea64_set_scratchpage_pa(mmu, 1,
+ VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
+ b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+ mtx_unlock(&moea64_scratchpage_mtx);
+}
+
+void
+moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+ vm_page_t *mb, vm_offset_t b_offset, int xfersize)
+{
+
+ if (hw_direct_map) {
+ moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset,
+ xfersize);
+ } else {
+ moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
+ xfersize);
+ }
+}
+
void
moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
{
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 343b04693763..b9f4838893a8 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -274,6 +274,8 @@ static void mmu_booke_clear_reference(mmu_t, vm_page_t);
static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
vm_size_t, vm_offset_t);
static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
+static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
+ vm_offset_t, vm_page_t *, vm_offset_t, int);
static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
vm_prot_t, boolean_t);
static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
@@ -334,6 +336,7 @@ static mmu_method_t mmu_booke_methods[] = {
MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference),
MMUMETHOD(mmu_copy, mmu_booke_copy),
MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
+ MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages),
MMUMETHOD(mmu_enter, mmu_booke_enter),
MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
@@ -2136,6 +2139,36 @@ mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
mtx_unlock(&copy_page_mutex);
}
+static inline void
+mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+ vm_page_t *mb, vm_offset_t b_offset, int xfersize)
+{
+ void *a_cp, *b_cp;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+
+ mtx_lock(&copy_page_mutex);
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ mmu_booke_kenter(mmu, copy_page_src_va,
+ VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
+ a_cp = (char *)copy_page_src_va + a_pg_offset;
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ mmu_booke_kenter(mmu, copy_page_dst_va,
+ VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
+ b_cp = (char *)copy_page_dst_va + b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ mmu_booke_kremove(mmu, copy_page_dst_va);
+ mmu_booke_kremove(mmu, copy_page_src_va);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+ mtx_unlock(&copy_page_mutex);
+}
+
/*
* mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
* into virtual memory and using bzero to clear its contents. This is intended
diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m
index 8cd6e521d3dd..0382bd8ba0ae 100644
--- a/sys/powerpc/powerpc/mmu_if.m
+++ b/sys/powerpc/powerpc/mmu_if.m
@@ -215,6 +215,14 @@ METHOD void copy_page {
vm_page_t _dst;
};
+METHOD void copy_pages {
+ mmu_t _mmu;
+ vm_page_t *_ma;
+ vm_offset_t _a_offset;
+ vm_page_t *_mb;
+ vm_offset_t _b_offset;
+ int _xfersize;
+};
/**
* @brief Create a mapping between a virtual/physical address pair in the
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index c919196525e5..42f1a3975b5a 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
@@ -133,6 +133,16 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
}
void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+ vm_offset_t b_offset, int xfersize)
+{
+
+ CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
+ a_offset, mb, b_offset, xfersize);
+ MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
+}
+
+void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t p,
vm_prot_t prot, boolean_t wired)
{