diff options
author | kib <kib@FreeBSD.org> | 2013-03-14 20:18:12 +0000 |
---|---|---|
committer | kib <kib@FreeBSD.org> | 2013-03-14 20:18:12 +0000 |
commit | 63efc821c3e4785928997ea88e1de93e62ce3acb (patch) | |
tree | 7fc0d10a416efdb974f78de808274b7ebcd9475a /sys/arm | |
parent | 4824f825377f483e33c726bad073beda774d9f4b (diff) | |
download | FreeBSD-src-63efc821c3e4785928997ea88e1de93e62ce3acb.zip FreeBSD-src-63efc821c3e4785928997ea88e1de93e62ce3acb.tar.gz |
Add pmap function pmap_copy_pages(), which copies the content of the
pages around, taking array of vm_page_t both for source and
destination. Starting offsets and total transfer size are specified.
The function implements optimal algorithm for copying using the
platform-specific optimizations. For instance, on the architectures
were the direct map is available, no transient mappings are created,
for i386 the per-cpu ephemeral page frame is used. The code was
typically borrowed from the pmap_copy_page() for the same
architecture.
Only i386/amd64, powerpc aim and arm/arm-v6 implementations were
tested at the time of commit. High-level code, not committed yet to
the tree, ensures that the use of the function is only allowed after
explicit enablement.
For sparc64, the existing code has known issues and a stab is added
instead, to allow the kernel linking.
Sponsored by: The FreeBSD Foundation
Tested by: pho (i386, amd64), scottl (amd64), ian (arm and arm-v6)
MFC after: 2 weeks
Diffstat (limited to 'sys/arm')
-rw-r--r-- | sys/arm/arm/pmap-v6.c | 39 | ||||
-rw-r--r-- | sys/arm/arm/pmap.c | 93 | ||||
-rw-r--r-- | sys/arm/include/pmap.h | 2 |
3 files changed, 133 insertions, 1 deletions
diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c index 2affa3e..0083f29 100644 --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -3313,6 +3313,45 @@ pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst) } void +pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], + vm_offset_t b_offset, int xfersize) +{ + vm_page_t a_pg, b_pg; + vm_offset_t a_pg_offset, b_pg_offset; + int cnt; + + mtx_lock(&cmtx); + while (xfersize > 0) { + a_pg = ma[a_offset >> PAGE_SHIFT]; + a_pg_offset = a_offset & PAGE_MASK; + cnt = min(xfersize, PAGE_SIZE - a_pg_offset); + b_pg = mb[b_offset >> PAGE_SHIFT]; + b_pg_offset = b_offset & PAGE_MASK; + cnt = min(cnt, PAGE_SIZE - b_pg_offset); + *csrc_pte = L2_S_PROTO | VM_PAGE_TO_PHYS(a_pg) | + pte_l2_s_cache_mode; + pmap_set_prot(csrc_pte, VM_PROT_READ, 0); + PTE_SYNC(csrc_pte); + *cdst_pte = L2_S_PROTO | VM_PAGE_TO_PHYS(b_pg) | + pte_l2_s_cache_mode; + pmap_set_prot(cdst_pte, VM_PROT_READ | VM_PROT_WRITE, 0); + PTE_SYNC(cdst_pte); + cpu_tlb_flushD_SE(csrcp); + cpu_tlb_flushD_SE(cdstp); + cpu_cpwait(); + bcopy((char *)csrcp + a_pg_offset, (char *)cdstp + b_pg_offset, + cnt); + cpu_idcache_wbinv_range(cdstp + b_pg_offset, cnt); + pmap_l2cache_wbinv_range(cdstp + b_pg_offset, + VM_PAGE_TO_PHYS(b_pg) + b_pg_offset, cnt); + xfersize -= cnt; + a_offset += cnt; + b_offset += cnt; + } + mtx_unlock(&cmtx); +} + +void pmap_copy_page(vm_page_t src, vm_page_t dst) { diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c index 7070cb2..c18783b 100644 --- a/sys/arm/arm/pmap.c +++ b/sys/arm/arm/pmap.c @@ -258,6 +258,9 @@ pt_entry_t pte_l1_c_proto; pt_entry_t pte_l2_s_proto; void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); +void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, + vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, + int cnt); void (*pmap_zero_page_func)(vm_paddr_t, int, int); struct msgbuf *msgbufp = 0; @@ -400,6 +403,13 @@ static vm_paddr_t pmap_kernel_l2ptp_phys; static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; static struct rwlock pvh_global_lock; +void pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs, + vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); +#if ARM_MMU_XSCALE == 1 +void pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs, + vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); +#endif + /* * This list exists for the benefit of pmap_map_chunk(). It keeps track * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can @@ -484,6 +494,7 @@ pmap_pte_init_generic(void) pte_l2_s_proto = L2_S_PROTO_generic; pmap_copy_page_func = pmap_copy_page_generic; + pmap_copy_page_offs_func = pmap_copy_page_offs_generic; pmap_zero_page_func = pmap_zero_page_generic; } @@ -660,6 +671,7 @@ pmap_pte_init_xscale(void) #ifdef CPU_XSCALE_CORE3 pmap_copy_page_func = pmap_copy_page_generic; + pmap_copy_page_offs_func = pmap_copy_page_offs_generic; pmap_zero_page_func = pmap_zero_page_generic; xscale_use_minidata = 0; /* Make sure it is L2-cachable */ @@ -672,6 +684,7 @@ pmap_pte_init_xscale(void) #else pmap_copy_page_func = pmap_copy_page_xscale; + pmap_copy_page_offs_func = pmap_copy_page_offs_xscale; pmap_zero_page_func = pmap_zero_page_xscale; #endif @@ -4300,6 +4313,29 @@ pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst) cpu_l2cache_inv_range(csrcp, PAGE_SIZE); cpu_l2cache_wbinv_range(cdstp, PAGE_SIZE); } + +void +pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs, + vm_paddr_t b_phys, vm_offset_t b_offs, int cnt) +{ + + mtx_lock(&cmtx); + *csrc_pte = L2_S_PROTO | a_phys | + L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; + PTE_SYNC(csrc_pte); + *cdst_pte = L2_S_PROTO | b_phys | + L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; + PTE_SYNC(cdst_pte); + cpu_tlb_flushD_SE(csrcp); + cpu_tlb_flushD_SE(cdstp); + cpu_cpwait(); + bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt); + mtx_unlock(&cmtx); + cpu_dcache_inv_range(csrcp + a_offs, cnt); + cpu_dcache_wbinv_range(cdstp + b_offs, cnt); + cpu_l2cache_inv_range(csrcp + a_offs, cnt); + cpu_l2cache_wbinv_range(cdstp + b_offs, cnt); +} #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ #if ARM_MMU_XSCALE == 1 @@ -4344,6 +4380,28 @@ pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst) mtx_unlock(&cmtx); xscale_cache_clean_minidata(); } + +void +pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs, + vm_paddr_t b_phys, vm_offset_t b_offs, int cnt) +{ + + mtx_lock(&cmtx); + *csrc_pte = L2_S_PROTO | a_phys | + L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | + L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); + PTE_SYNC(csrc_pte); + *cdst_pte = L2_S_PROTO | b_phys | + L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | + L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); + PTE_SYNC(cdst_pte); + cpu_tlb_flushD_SE(csrcp); + cpu_tlb_flushD_SE(cdstp); + cpu_cpwait(); + bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt); + mtx_unlock(&cmtx); + xscale_cache_clean_minidata(); +} #endif /* ARM_MMU_XSCALE == 1 */ void @@ -4370,8 +4428,41 @@ pmap_copy_page(vm_page_t src, vm_page_t dst) #endif } +void +pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], + vm_offset_t b_offset, int xfersize) +{ + vm_page_t a_pg, b_pg; + vm_offset_t a_pg_offset, b_pg_offset; + int cnt; +#ifdef ARM_USE_SMALL_ALLOC + vm_offset_t a_va, b_va; +#endif - + cpu_dcache_wbinv_all(); + cpu_l2cache_wbinv_all(); + while (xfersize > 0) { + a_pg = ma[a_offset >> PAGE_SHIFT]; + a_pg_offset = a_offset & PAGE_MASK; + cnt = min(xfersize, PAGE_SIZE - a_pg_offset); + b_pg = mb[b_offset >> PAGE_SHIFT]; + b_pg_offset = b_offset & PAGE_MASK; + cnt = min(cnt, PAGE_SIZE - b_pg_offset); +#ifdef ARM_USE_SMALL_ALLOC + a_va = arm_ptovirt(VM_PAGE_TO_PHYS(a_pg)) + a_pg_offset; + b_va = arm_ptovirt(VM_PAGE_TO_PHYS(b_pg)) + b_pg_offset; + bcopy((char *)a_va, (char *)b_va, cnt); + cpu_dcache_wbinv_range(b_va, cnt); + cpu_l2cache_wbinv_range(b_va, cnt); +#else + pmap_copy_page_offs_func(VM_PAGE_TO_PHYS(a_pg), a_pg_offset, + VM_PAGE_TO_PHYS(b_pg), b_pg_offset, cnt); +#endif + xfersize -= cnt; + a_offset += cnt; + b_offset += cnt; + } +} /* * this routine returns true if a physical page resides diff --git a/sys/arm/include/pmap.h b/sys/arm/include/pmap.h index 523499f..7c8d073 100644 --- a/sys/arm/include/pmap.h +++ b/sys/arm/include/pmap.h @@ -533,6 +533,8 @@ extern pt_entry_t pte_l1_c_proto; extern pt_entry_t pte_l2_s_proto; extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); +extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, + vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); extern void (*pmap_zero_page_func)(vm_paddr_t, int, int); #if (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7 + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_81342) |