From b8c4d76738c091ce972e522ec36c8dce6642f6fb Mon Sep 17 00:00:00 2001 From: jah Date: Tue, 4 Aug 2015 19:46:13 +0000 Subject: Add two new pmap functions: vm_offset_t pmap_quick_enter_page(vm_page_t m) void pmap_quick_remove_page(vm_offset_t kva) These will create and destroy a temporary, CPU-local KVA mapping of a specified page. Guarantees: --Will not sleep and will not fail. --Safe to call under a non-sleepable lock or from an ithread Restrictions: --Not guaranteed to be safe to call from an interrupt filter or under a spin mutex on all platforms --Current implementation does not guarantee more than one page of mapping space across all platforms. MI code should not make nested calls to pmap_quick_enter_page. --MI code should not perform locking while holding onto a mapping created by pmap_quick_enter_page The idea is to use this in busdma, for bounce buffer copies as well as virtually-indexed cache maintenance on mips and arm. NOTE: the non-i386, non-amd64 implementations of these functions still need review and testing. Reviewed by: kib Approved by: kib (mentor) Differential Revision: http://reviews.freebsd.org/D3013 --- sys/sparc64/include/pcpu.h | 3 +- sys/sparc64/sparc64/pmap.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 1 deletion(-) (limited to 'sys/sparc64') diff --git a/sys/sparc64/include/pcpu.h b/sys/sparc64/include/pcpu.h index dbcc59c..df43810 100644 --- a/sys/sparc64/include/pcpu.h +++ b/sys/sparc64/include/pcpu.h @@ -51,6 +51,7 @@ struct pmap; struct intr_request *pc_irfree; \ struct pmap *pc_pmap; \ vm_offset_t pc_addr; \ + vm_offset_t pc_qmap_addr; \ u_long pc_tickref; \ u_long pc_tickadj; \ u_long pc_tickincrement; \ @@ -61,7 +62,7 @@ struct pmap; u_int pc_tlb_ctx; \ u_int pc_tlb_ctx_max; \ u_int pc_tlb_ctx_min; \ - char __pad[405] + char __pad[397] #ifdef _KERNEL diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c index f10678e..e0dca86 100644 --- a/sys/sparc64/sparc64/pmap.c +++ b/sys/sparc64/sparc64/pmap.c @@ -143,6 +143,7 @@ static int pmap_protect_tte(struct pmap *pm1, struct pmap *pm2, struct tte *tp, vm_offset_t va); static int pmap_unwire_tte(pmap_t pm, pmap_t pm2, struct tte *tp, vm_offset_t va); +static void pmap_init_qpages(void); /* * Map the given physical page at the specified virtual address in the @@ -680,6 +681,25 @@ pmap_bootstrap(u_int cpu_impl) tlb_flush_nonlocked(); } +static void +pmap_init_qpages(void) +{ + struct pcpu *pc; + int i; + + if (dcache_color_ignore != 0) + return; + + CPU_FOREACH(i) { + pc = pcpu_find(i); + pc->pc_qmap_addr = kva_alloc(PAGE_SIZE * DCACHE_COLORS); + if (pc->pc_qmap_addr == 0) + panic("pmap_init_qpages: unable to allocate KVA"); + } +} + +SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_qpages, NULL); + /* * Map the 4MB kernel TSB pages. */ @@ -1934,6 +1954,54 @@ pmap_copy_page(vm_page_t msrc, vm_page_t mdst) } } +vm_offset_t +pmap_quick_enter_page(vm_page_t m) +{ + vm_paddr_t pa; + vm_offset_t qaddr; + struct tte *tp; + + pa = VM_PAGE_TO_PHYS(m); + if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) + return (TLB_PHYS_TO_DIRECT(pa)); + + critical_enter(); + qaddr = PCPU_GET(qmap_addr); + qaddr += (PAGE_SIZE * ((DCACHE_COLORS + DCACHE_COLOR(pa) - + DCACHE_COLOR(qaddr)) % DCACHE_COLORS)); + tp = tsb_kvtotte(qaddr); + + KASSERT(tp->tte_data == 0, ("pmap_quick_enter_page: PTE busy")); + + tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W; + tp->tte_vpn = TV_VPN(qaddr, TS_8K); + + return (qaddr); +} + +void +pmap_quick_remove_page(vm_offset_t addr) +{ + vm_offset_t qaddr; + struct tte *tp; + + if (addr >= VM_MIN_DIRECT_ADDRESS) + return; + + tp = tsb_kvtotte(addr); + qaddr = PCPU_GET(qmap_addr); + + KASSERT((addr >= qaddr) && (addr < (qaddr + (PAGE_SIZE * DCACHE_COLORS))), + ("pmap_quick_remove_page: invalid address")); + KASSERT(tp->tte_data != 0, ("pmap_quick_remove_page: PTE not in use")); + + stxa(TLB_DEMAP_VA(addr) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, ASI_DMMU_DEMAP, 0); + stxa(TLB_DEMAP_VA(addr) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, ASI_IMMU_DEMAP, 0); + flush(KERNBASE); + TTE_ZERO(tp); + critical_exit(); +} + int unmapped_buf_allowed; void -- cgit v1.1